summaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
Diffstat (limited to 'target')
-rw-r--r--target/alpha/Makefile.objs4
-rw-r--r--target/alpha/STATUS28
-rw-r--r--target/alpha/cpu-qom.h52
-rw-r--r--target/alpha/cpu.c342
-rw-r--r--target/alpha/cpu.h518
-rw-r--r--target/alpha/fpu_helper.c553
-rw-r--r--target/alpha/gdbstub.c94
-rw-r--r--target/alpha/helper.c490
-rw-r--r--target/alpha/helper.h104
-rw-r--r--target/alpha/int_helper.c280
-rw-r--r--target/alpha/machine.c91
-rw-r--r--target/alpha/mem_helper.c89
-rw-r--r--target/alpha/sys_helper.c91
-rw-r--r--target/alpha/translate.c3010
-rw-r--r--target/alpha/vax_helper.c355
-rw-r--r--target/arm/Makefile.objs12
-rw-r--r--target/arm/arch_dump.c337
-rw-r--r--target/arm/arm-powerctl.c228
-rw-r--r--target/arm/arm-powerctl.h75
-rw-r--r--target/arm/arm-semi.c656
-rw-r--r--target/arm/arm_ldst.h49
-rw-r--r--target/arm/cpu-qom.h90
-rw-r--r--target/arm/cpu.c1622
-rw-r--r--target/arm/cpu.h2466
-rw-r--r--target/arm/cpu64.c353
-rw-r--r--target/arm/crypto_helper.c465
-rw-r--r--target/arm/gdbstub.c103
-rw-r--r--target/arm/gdbstub64.c72
-rw-r--r--target/arm/helper-a64.c559
-rw-r--r--target/arm/helper-a64.h50
-rw-r--r--target/arm/helper.c9623
-rw-r--r--target/arm/helper.h542
-rw-r--r--target/arm/internals.h489
-rw-r--r--target/arm/iwmmxt_helper.c671
-rw-r--r--target/arm/kvm-consts.h185
-rw-r--r--target/arm/kvm-stub.c25
-rw-r--r--target/arm/kvm.c640
-rw-r--r--target/arm/kvm32.c529
-rw-r--r--target/arm/kvm64.c982
-rw-r--r--target/arm/kvm_arm.h291
-rw-r--r--target/arm/machine.c333
-rw-r--r--target/arm/monitor.c83
-rw-r--r--target/arm/neon_helper.c2242
-rw-r--r--target/arm/op_addsub.h103
-rw-r--r--target/arm/op_helper.c1335
-rw-r--r--target/arm/psci.c202
-rw-r--r--target/arm/trace-events10
-rw-r--r--target/arm/translate-a64.c11430
-rw-r--r--target/arm/translate.c12055
-rw-r--r--target/arm/translate.h155
-rw-r--r--target/cris/Makefile.objs3
-rw-r--r--target/cris/cpu-qom.h55
-rw-r--r--target/cris/cpu.c357
-rw-r--r--target/cris/cpu.h309
-rw-r--r--target/cris/crisv10-decode.h108
-rw-r--r--target/cris/crisv32-decode.h133
-rw-r--r--target/cris/gdbstub.c131
-rw-r--r--target/cris/helper.c319
-rw-r--r--target/cris/helper.h24
-rw-r--r--target/cris/machine.c95
-rw-r--r--target/cris/mmu.c362
-rw-r--r--target/cris/mmu.h17
-rw-r--r--target/cris/op_helper.c639
-rw-r--r--target/cris/opcode-cris.h355
-rw-r--r--target/cris/translate.c3413
-rw-r--r--target/cris/translate_v10.c1315
-rw-r--r--target/i386/Makefile.objs7
-rw-r--r--target/i386/TODO31
-rw-r--r--target/i386/arch_dump.c453
-rw-r--r--target/i386/arch_memory_mapping.c281
-rw-r--r--target/i386/bpt_helper.c328
-rw-r--r--target/i386/cc_helper.c385
-rw-r--r--target/i386/cc_helper_template.h242
-rw-r--r--target/i386/cpu-qom.h77
-rw-r--r--target/i386/cpu.c3755
-rw-r--r--target/i386/cpu.h1656
-rw-r--r--target/i386/excp_helper.c137
-rw-r--r--target/i386/fpu_helper.c1625
-rw-r--r--target/i386/gdbstub.c234
-rw-r--r--target/i386/helper.c1446
-rw-r--r--target/i386/helper.h230
-rw-r--r--target/i386/hyperv.c140
-rw-r--r--target/i386/hyperv.h42
-rw-r--r--target/i386/int_helper.c483
-rw-r--r--target/i386/kvm-stub.c42
-rw-r--r--target/i386/kvm.c3536
-rw-r--r--target/i386/kvm_i386.h48
-rw-r--r--target/i386/machine.c1047
-rw-r--r--target/i386/mem_helper.c215
-rw-r--r--target/i386/misc_helper.c639
-rw-r--r--target/i386/monitor.c513
-rw-r--r--target/i386/mpx_helper.c168
-rw-r--r--target/i386/ops_sse.h2296
-rw-r--r--target/i386/ops_sse_header.h360
-rw-r--r--target/i386/seg_helper.c2568
-rw-r--r--target/i386/shift_helper_template.h108
-rw-r--r--target/i386/smm_helper.c342
-rw-r--r--target/i386/svm.h222
-rw-r--r--target/i386/svm_helper.c774
-rw-r--r--target/i386/trace-events7
-rw-r--r--target/i386/translate.c8502
-rw-r--r--target/lm32/Makefile.objs4
-rw-r--r--target/lm32/README45
-rw-r--r--target/lm32/TODO1
-rw-r--r--target/lm32/cpu-qom.h52
-rw-r--r--target/lm32/cpu.c328
-rw-r--r--target/lm32/cpu.h274
-rw-r--r--target/lm32/gdbstub.c93
-rw-r--r--target/lm32/helper.c237
-rw-r--r--target/lm32/helper.h14
-rw-r--r--target/lm32/lm32-semi.c212
-rw-r--r--target/lm32/machine.c36
-rw-r--r--target/lm32/op_helper.c162
-rw-r--r--target/lm32/translate.c1254
-rw-r--r--target/m68k/Makefile.objs3
-rw-r--r--target/m68k/cpu-qom.h52
-rw-r--r--target/m68k/cpu.c324
-rw-r--r--target/m68k/cpu.h308
-rw-r--r--target/m68k/gdbstub.c76
-rw-r--r--target/m68k/helper.c810
-rw-r--r--target/m68k/helper.h49
-rw-r--r--target/m68k/m68k-semi.c462
-rw-r--r--target/m68k/op_helper.c229
-rw-r--r--target/m68k/qregs.def13
-rw-r--r--target/m68k/translate.c3595
-rw-r--r--target/microblaze/Makefile.objs3
-rw-r--r--target/microblaze/cpu-qom.h52
-rw-r--r--target/microblaze/cpu.c292
-rw-r--r--target/microblaze/cpu.h381
-rw-r--r--target/microblaze/gdbstub.c57
-rw-r--r--target/microblaze/helper.c307
-rw-r--r--target/microblaze/helper.h37
-rw-r--r--target/microblaze/microblaze-decode.h55
-rw-r--r--target/microblaze/mmu.c303
-rw-r--r--target/microblaze/mmu.h90
-rw-r--r--target/microblaze/op_helper.c523
-rw-r--r--target/microblaze/translate.c1872
-rw-r--r--target/mips/Makefile.objs4
-rw-r--r--target/mips/TODO51
-rw-r--r--target/mips/cpu-qom.h56
-rw-r--r--target/mips/cpu.c203
-rw-r--r--target/mips/cpu.h1069
-rw-r--r--target/mips/dsp_helper.c3762
-rw-r--r--target/mips/gdbstub.c149
-rw-r--r--target/mips/helper.c969
-rw-r--r--target/mips/helper.h962
-rw-r--r--target/mips/kvm.c1060
-rw-r--r--target/mips/kvm_mips.h26
-rw-r--r--target/mips/lmi_helper.c745
-rw-r--r--target/mips/machine.c302
-rw-r--r--target/mips/mips-defs.h91
-rw-r--r--target/mips/mips-semi.c374
-rw-r--r--target/mips/msa_helper.c3453
-rw-r--r--target/mips/op_helper.c4196
-rw-r--r--target/mips/translate.c20423
-rw-r--r--target/mips/translate_init.c944
-rw-r--r--target/moxie/Makefile.objs2
-rw-r--r--target/moxie/cpu.c192
-rw-r--r--target/moxie/cpu.h143
-rw-r--r--target/moxie/helper.c162
-rw-r--r--target/moxie/helper.h5
-rw-r--r--target/moxie/machine.c22
-rw-r--r--target/moxie/machine.h1
-rw-r--r--target/moxie/mmu.c33
-rw-r--r--target/moxie/mmu.h14
-rw-r--r--target/moxie/translate.c908
-rw-r--r--target/openrisc/Makefile.objs5
-rw-r--r--target/openrisc/cpu.c278
-rw-r--r--target/openrisc/cpu.h411
-rw-r--r--target/openrisc/exception.c31
-rw-r--r--target/openrisc/exception.h28
-rw-r--r--target/openrisc/exception_helper.c30
-rw-r--r--target/openrisc/fpu_helper.c301
-rw-r--r--target/openrisc/gdbstub.c84
-rw-r--r--target/openrisc/helper.h66
-rw-r--r--target/openrisc/int_helper.c80
-rw-r--r--target/openrisc/interrupt.c87
-rw-r--r--target/openrisc/interrupt_helper.c60
-rw-r--r--target/openrisc/machine.c54
-rw-r--r--target/openrisc/mmu.c238
-rw-r--r--target/openrisc/mmu_helper.c44
-rw-r--r--target/openrisc/sys_helper.c288
-rw-r--r--target/openrisc/translate.c1783
-rw-r--r--target/ppc/Makefile.objs17
-rw-r--r--target/ppc/STATUS550
-rw-r--r--target/ppc/arch_dump.c286
-rw-r--r--target/ppc/cpu-models.c1426
-rw-r--r--target/ppc/cpu-models.h754
-rw-r--r--target/ppc/cpu-qom.h219
-rw-r--r--target/ppc/cpu.h2454
-rw-r--r--target/ppc/dfp_helper.c1331
-rw-r--r--target/ppc/excp_helper.c1142
-rw-r--r--target/ppc/fpu_helper.c2789
-rw-r--r--target/ppc/gdbstub.c321
-rw-r--r--target/ppc/helper.h742
-rw-r--r--target/ppc/helper_regs.h189
-rw-r--r--target/ppc/int_helper.c3126
-rw-r--r--target/ppc/internal.h50
-rw-r--r--target/ppc/kvm-stub.c20
-rw-r--r--target/ppc/kvm.c2674
-rw-r--r--target/ppc/kvm_ppc.h330
-rw-r--r--target/ppc/machine.c615
-rw-r--r--target/ppc/mem_helper.c310
-rw-r--r--target/ppc/mfrom_table.c79
-rw-r--r--target/ppc/mfrom_table_gen.c32
-rw-r--r--target/ppc/misc_helper.c210
-rw-r--r--target/ppc/mmu-hash32.c568
-rw-r--r--target/ppc/mmu-hash32.h112
-rw-r--r--target/ppc/mmu-hash64.c1059
-rw-r--r--target/ppc/mmu-hash64.h136
-rw-r--r--target/ppc/mmu_helper.c2907
-rw-r--r--target/ppc/monitor.c147
-rw-r--r--target/ppc/timebase_helper.c176
-rw-r--r--target/ppc/trace-events5
-rw-r--r--target/ppc/translate.c7200
-rw-r--r--target/ppc/translate/dfp-impl.inc.c232
-rw-r--r--target/ppc/translate/dfp-ops.inc.c165
-rw-r--r--target/ppc/translate/fp-impl.inc.c1070
-rw-r--r--target/ppc/translate/fp-ops.inc.c111
-rw-r--r--target/ppc/translate/spe-impl.inc.c1229
-rw-r--r--target/ppc/translate/spe-ops.inc.c105
-rw-r--r--target/ppc/translate/vmx-impl.inc.c1113
-rw-r--r--target/ppc/translate/vmx-ops.inc.c294
-rw-r--r--target/ppc/translate/vsx-impl.inc.c1009
-rw-r--r--target/ppc/translate/vsx-ops.inc.c300
-rw-r--r--target/ppc/translate_init.c10601
-rw-r--r--target/ppc/user_only_helper.c47
-rw-r--r--target/s390x/Makefile.objs25
-rw-r--r--target/s390x/arch_dump.c249
-rw-r--r--target/s390x/cc_helper.c570
-rw-r--r--target/s390x/cpu-qom.h66
-rw-r--r--target/s390x/cpu.c462
-rw-r--r--target/s390x/cpu.h1296
-rw-r--r--target/s390x/cpu_features.c404
-rw-r--r--target/s390x/cpu_features.h93
-rw-r--r--target/s390x/cpu_features_def.h231
-rw-r--r--target/s390x/cpu_models.c1100
-rw-r--r--target/s390x/cpu_models.h119
-rw-r--r--target/s390x/fpu_helper.c749
-rw-r--r--target/s390x/gdbstub.c313
-rw-r--r--target/s390x/gen-features.c592
-rw-r--r--target/s390x/helper.c721
-rw-r--r--target/s390x/helper.h133
-rw-r--r--target/s390x/insn-data.def931
-rw-r--r--target/s390x/insn-format.def55
-rw-r--r--target/s390x/int_helper.c156
-rw-r--r--target/s390x/interrupt.c120
-rw-r--r--target/s390x/ioinst.c834
-rw-r--r--target/s390x/kvm.c2653
-rw-r--r--target/s390x/machine.c193
-rw-r--r--target/s390x/mem_helper.c1202
-rw-r--r--target/s390x/misc_helper.c653
-rw-r--r--target/s390x/mmu_helper.c499
-rw-r--r--target/s390x/trace-events22
-rw-r--r--target/s390x/translate.c5452
-rw-r--r--target/sh4/Makefile.objs3
-rw-r--r--target/sh4/README.sh4150
-rw-r--r--target/sh4/cpu-qom.h65
-rw-r--r--target/sh4/cpu.c332
-rw-r--r--target/sh4/cpu.h391
-rw-r--r--target/sh4/gdbstub.c147
-rw-r--r--target/sh4/helper.c872
-rw-r--r--target/sh4/helper.h45
-rw-r--r--target/sh4/monitor.c53
-rw-r--r--target/sh4/op_helper.c498
-rw-r--r--target/sh4/translate.c1944
-rw-r--r--target/sparc/Makefile.objs7
-rw-r--r--target/sparc/TODO88
-rw-r--r--target/sparc/asi.h311
-rw-r--r--target/sparc/cc_helper.c471
-rw-r--r--target/sparc/cpu-qom.h56
-rw-r--r--target/sparc/cpu.c895
-rw-r--r--target/sparc/cpu.h779
-rw-r--r--target/sparc/fop_helper.c400
-rw-r--r--target/sparc/gdbstub.c209
-rw-r--r--target/sparc/helper.c257
-rw-r--r--target/sparc/helper.h168
-rw-r--r--target/sparc/int32_helper.c175
-rw-r--r--target/sparc/int64_helper.c205
-rw-r--r--target/sparc/ldst_helper.c1709
-rw-r--r--target/sparc/machine.c194
-rw-r--r--target/sparc/mmu_helper.c880
-rw-r--r--target/sparc/monitor.c159
-rw-r--r--target/sparc/trace-events28
-rw-r--r--target/sparc/translate.c5924
-rw-r--r--target/sparc/vis_helper.c490
-rw-r--r--target/sparc/win_helper.c400
-rw-r--r--target/tilegx/Makefile.objs1
-rw-r--r--target/tilegx/cpu.c187
-rw-r--r--target/tilegx/cpu.h178
-rw-r--r--target/tilegx/helper.c163
-rw-r--r--target/tilegx/helper.h26
-rw-r--r--target/tilegx/opcode_tilegx.h1406
-rw-r--r--target/tilegx/simd_helper.c166
-rw-r--r--target/tilegx/spr_def_64.h216
-rw-r--r--target/tilegx/translate.c2457
-rw-r--r--target/tricore/Makefile.objs1
-rw-r--r--target/tricore/cpu-qom.h44
-rw-r--r--target/tricore/cpu.c220
-rw-r--r--target/tricore/cpu.h424
-rw-r--r--target/tricore/csfr.def124
-rw-r--r--target/tricore/fpu_helper.c217
-rw-r--r--target/tricore/helper.c146
-rw-r--r--target/tricore/helper.h151
-rw-r--r--target/tricore/op_helper.c2842
-rw-r--r--target/tricore/translate.c8869
-rw-r--r--target/tricore/tricore-defs.h28
-rw-r--r--target/tricore/tricore-opcodes.h1463
-rw-r--r--target/unicore32/Makefile.objs4
-rw-r--r--target/unicore32/cpu-qom.h41
-rw-r--r--target/unicore32/cpu.c204
-rw-r--r--target/unicore32/cpu.h188
-rw-r--r--target/unicore32/helper.c263
-rw-r--r--target/unicore32/helper.h65
-rw-r--r--target/unicore32/op_helper.c261
-rw-r--r--target/unicore32/softmmu.c278
-rw-r--r--target/unicore32/translate.c2111
-rw-r--r--target/unicore32/ucf64_helper.c325
-rw-r--r--target/xtensa/Makefile.objs7
-rw-r--r--target/xtensa/core-dc232b.c50
-rw-r--r--target/xtensa/core-dc232b/core-isa.h422
-rw-r--r--target/xtensa/core-dc232b/gdb-config.c261
-rw-r--r--target/xtensa/core-dc233c.c51
-rw-r--r--target/xtensa/core-dc233c/core-isa.h473
-rw-r--r--target/xtensa/core-dc233c/gdb-config.c145
-rw-r--r--target/xtensa/core-fsf.c49
-rw-r--r--target/xtensa/core-fsf/core-isa.h360
-rw-r--r--target/xtensa/cpu-qom.h66
-rw-r--r--target/xtensa/cpu.c184
-rw-r--r--target/xtensa/cpu.h587
-rw-r--r--target/xtensa/gdbstub.c128
-rw-r--r--target/xtensa/helper.c730
-rw-r--r--target/xtensa/helper.h58
-rwxr-xr-xtarget/xtensa/import_core.sh51
-rw-r--r--target/xtensa/monitor.c35
-rw-r--r--target/xtensa/op_helper.c984
-rw-r--r--target/xtensa/overlay_tool.h602
-rw-r--r--target/xtensa/translate.c3225
-rw-r--r--target/xtensa/xtensa-semi.c318
339 files changed, 270836 insertions, 0 deletions
diff --git a/target/alpha/Makefile.objs b/target/alpha/Makefile.objs
new file mode 100644
index 0000000000..63664629f6
--- /dev/null
+++ b/target/alpha/Makefile.objs
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SOFTMMU) += machine.o
+obj-y += translate.o helper.o cpu.o
+obj-y += int_helper.o fpu_helper.o vax_helper.o sys_helper.o mem_helper.o
+obj-y += gdbstub.o
diff --git a/target/alpha/STATUS b/target/alpha/STATUS
new file mode 100644
index 0000000000..6c9744569e
--- /dev/null
+++ b/target/alpha/STATUS
@@ -0,0 +1,28 @@
+(to be completed)
+
+Alpha emulation structure:
+cpu.h : CPU definitions globally exported
+exec.h : CPU definitions used only for translated code execution
+helper.c : helpers that can be called either by the translated code
+ or the QEMU core, including the exception handler.
+op_helper.c : helpers that can be called only from TCG
+helper.h : TCG helpers prototypes
+translate.c : Alpha instructions to micro-operations translator
+
+Code translator status:
+The Alpha CPU instruction emulation should be quite complete with the
+limitation that the VAX floating-point load and stores are not tested.
+The 4 MMU modes are implemented.
+
+Linux user mode emulation status:
+a few programs start to run. Most crash at a certain point, dereferencing a
+NULL pointer. It seems that the UNIQUE register is not initialized properly.
+It may appear that old executables, not relying on TLS support, run but
+this is to be proved...
+
+Full system emulation status:
+* Alpha PALCode emulation is in a very early stage and is not sufficient
+ to run any real OS. The alpha-softmmu target is not enabled for now.
+* no hardware platform description is implemented
+* there might be problems in the Alpha PALCode dedicated instructions
+ that would prevent to use a native PALCode image.
diff --git a/target/alpha/cpu-qom.h b/target/alpha/cpu-qom.h
new file mode 100644
index 0000000000..bae4945344
--- /dev/null
+++ b/target/alpha/cpu-qom.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU Alpha CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_ALPHA_CPU_QOM_H
+#define QEMU_ALPHA_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_ALPHA_CPU "alpha-cpu"
+
+#define ALPHA_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(AlphaCPUClass, (klass), TYPE_ALPHA_CPU)
+#define ALPHA_CPU(obj) \
+ OBJECT_CHECK(AlphaCPU, (obj), TYPE_ALPHA_CPU)
+#define ALPHA_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(AlphaCPUClass, (obj), TYPE_ALPHA_CPU)
+
+/**
+ * AlphaCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An Alpha CPU model.
+ */
+typedef struct AlphaCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} AlphaCPUClass;
+
+typedef struct AlphaCPU AlphaCPU;
+
+#endif
diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c
new file mode 100644
index 0000000000..30d77ce71c
--- /dev/null
+++ b/target/alpha/cpu.c
@@ -0,0 +1,342 @@
+/*
+ * QEMU Alpha CPU
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+
+static void alpha_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool alpha_cpu_has_work(CPUState *cs)
+{
+ /* Here we are checking to see if the CPU should wake up from HALT.
+ We will have gotten into this state only for WTINT from PALmode. */
+ /* ??? I'm not sure how the IPL state works with WTINT to keep a CPU
+ asleep even if (some) interrupts have been asserted. For now,
+ assume that if a CPU really wants to stay asleep, it will mask
+ interrupts at the chipset level, which will prevent these bits
+ from being set in the first place. */
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_TIMER
+ | CPU_INTERRUPT_SMP
+ | CPU_INTERRUPT_MCHK);
+}
+
+static void alpha_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_alpha_ev6;
+ info->print_insn = print_insn_alpha;
+}
+
+static void alpha_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ AlphaCPUClass *acc = ALPHA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ acc->parent_realize(dev, errp);
+}
+
+/* Sort alphabetically by type name. */
+static gint alpha_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void alpha_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ object_class_get_name(oc));
+}
+
+void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ALPHA_CPU, false);
+ list = g_slist_sort(list, alpha_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, alpha_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+/* Models */
+
+#define TYPE(model) model "-" TYPE_ALPHA_CPU
+
+typedef struct AlphaCPUAlias {
+ const char *alias;
+ const char *typename;
+} AlphaCPUAlias;
+
+static const AlphaCPUAlias alpha_cpu_aliases[] = {
+ { "21064", TYPE("ev4") },
+ { "21164", TYPE("ev5") },
+ { "21164a", TYPE("ev56") },
+ { "21164pc", TYPE("pca56") },
+ { "21264", TYPE("ev6") },
+ { "21264a", TYPE("ev67") },
+};
+
+static ObjectClass *alpha_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc = NULL;
+ char *typename;
+ int i;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL && object_class_dynamic_cast(oc, TYPE_ALPHA_CPU) != NULL &&
+ !object_class_is_abstract(oc)) {
+ return oc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(alpha_cpu_aliases); i++) {
+ if (strcmp(cpu_model, alpha_cpu_aliases[i].alias) == 0) {
+ oc = object_class_by_name(alpha_cpu_aliases[i].typename);
+ assert(oc != NULL && !object_class_is_abstract(oc));
+ return oc;
+ }
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_ALPHA_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && object_class_is_abstract(oc)) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+AlphaCPU *cpu_alpha_init(const char *cpu_model)
+{
+ AlphaCPU *cpu;
+ ObjectClass *cpu_class;
+
+ cpu_class = alpha_cpu_class_by_name(cpu_model);
+ if (cpu_class == NULL) {
+ /* Default to ev67; no reason not to emulate insns by default. */
+ cpu_class = object_class_by_name(TYPE("ev67"));
+ }
+ cpu = ALPHA_CPU(object_new(object_class_get_name(cpu_class)));
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+static void ev4_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_2106x;
+}
+
+static const TypeInfo ev4_cpu_type_info = {
+ .name = TYPE("ev4"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev4_cpu_initfn,
+};
+
+static void ev5_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21164;
+}
+
+static const TypeInfo ev5_cpu_type_info = {
+ .name = TYPE("ev5"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev5_cpu_initfn,
+};
+
+static void ev56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_BWX;
+}
+
+static const TypeInfo ev56_cpu_type_info = {
+ .name = TYPE("ev56"),
+ .parent = TYPE("ev5"),
+ .instance_init = ev56_cpu_initfn,
+};
+
+static void pca56_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_MVI;
+}
+
+static const TypeInfo pca56_cpu_type_info = {
+ .name = TYPE("pca56"),
+ .parent = TYPE("ev56"),
+ .instance_init = pca56_cpu_initfn,
+};
+
+static void ev6_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->implver = IMPLVER_21264;
+ env->amask = AMASK_BWX | AMASK_FIX | AMASK_MVI | AMASK_TRAP;
+}
+
+static const TypeInfo ev6_cpu_type_info = {
+ .name = TYPE("ev6"),
+ .parent = TYPE_ALPHA_CPU,
+ .instance_init = ev6_cpu_initfn,
+};
+
+static void ev67_cpu_initfn(Object *obj)
+{
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ env->amask |= AMASK_CIX | AMASK_PREFETCH;
+}
+
+static const TypeInfo ev67_cpu_type_info = {
+ .name = TYPE("ev67"),
+ .parent = TYPE("ev6"),
+ .instance_init = ev67_cpu_initfn,
+};
+
+static const TypeInfo ev68_cpu_type_info = {
+ .name = TYPE("ev68"),
+ .parent = TYPE("ev67"),
+};
+
+static void alpha_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ AlphaCPU *cpu = ALPHA_CPU(obj);
+ CPUAlphaState *env = &cpu->env;
+
+ cs->env_ptr = env;
+ tlb_flush(cs, 1);
+
+ alpha_translate_init();
+
+#if defined(CONFIG_USER_ONLY)
+ env->ps = PS_USER_MODE;
+ cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD
+ | FPCR_UNFD | FPCR_INED | FPCR_DNOD
+ | FPCR_DYN_NORMAL));
+#endif
+ env->lock_addr = -1;
+ env->fen = 1;
+}
+
+static void alpha_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ AlphaCPUClass *acc = ALPHA_CPU_CLASS(oc);
+
+ acc->parent_realize = dc->realize;
+ dc->realize = alpha_cpu_realizefn;
+
+ cc->class_by_name = alpha_cpu_class_by_name;
+ cc->has_work = alpha_cpu_has_work;
+ cc->do_interrupt = alpha_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = alpha_cpu_exec_interrupt;
+ cc->dump_state = alpha_cpu_dump_state;
+ cc->set_pc = alpha_cpu_set_pc;
+ cc->gdb_read_register = alpha_cpu_gdb_read_register;
+ cc->gdb_write_register = alpha_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = alpha_cpu_handle_mmu_fault;
+#else
+ cc->do_unassigned_access = alpha_cpu_unassigned_access;
+ cc->do_unaligned_access = alpha_cpu_do_unaligned_access;
+ cc->get_phys_page_debug = alpha_cpu_get_phys_page_debug;
+ dc->vmsd = &vmstate_alpha_cpu;
+#endif
+ cc->disas_set_info = alpha_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 67;
+}
+
+static const TypeInfo alpha_cpu_type_info = {
+ .name = TYPE_ALPHA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(AlphaCPU),
+ .instance_init = alpha_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(AlphaCPUClass),
+ .class_init = alpha_cpu_class_init,
+};
+
+static void alpha_cpu_register_types(void)
+{
+ type_register_static(&alpha_cpu_type_info);
+ type_register_static(&ev4_cpu_type_info);
+ type_register_static(&ev5_cpu_type_info);
+ type_register_static(&ev56_cpu_type_info);
+ type_register_static(&pca56_cpu_type_info);
+ type_register_static(&ev6_cpu_type_info);
+ type_register_static(&ev67_cpu_type_info);
+ type_register_static(&ev68_cpu_type_info);
+}
+
+type_init(alpha_cpu_register_types)
diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h
new file mode 100644
index 0000000000..b08d1601d1
--- /dev/null
+++ b/target/alpha/cpu.h
@@ -0,0 +1,518 @@
+/*
+ * Alpha emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ALPHA_CPU_H
+#define ALPHA_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+
+#define TARGET_LONG_BITS 64
+#define ALIGNED_ONLY
+
+#define CPUArchState struct CPUAlphaState
+
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+#define ICACHE_LINE_SIZE 32
+#define DCACHE_LINE_SIZE 32
+
+#define TARGET_PAGE_BITS 13
+
+#ifdef CONFIG_USER_ONLY
+/* ??? The kernel likes to give addresses in high memory. If the host has
+ more virtual address space than the guest, this can lead to impossible
+ allocations. Honor the long-standing assumption that only kernel addrs
+ are negative, but otherwise allow allocations anywhere. This could lead
+ to tricky emulation problems for programs doing tagged addressing, but
+ that's far fewer than encounter the impossible allocation problem. */
+#define TARGET_PHYS_ADDR_SPACE_BITS 63
+#define TARGET_VIRT_ADDR_SPACE_BITS 63
+#else
+/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
+#define TARGET_PHYS_ADDR_SPACE_BITS 44
+#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
+#endif
+
+/* Alpha major type */
+enum {
+ ALPHA_EV3 = 1,
+ ALPHA_EV4 = 2,
+ ALPHA_SIM = 3,
+ ALPHA_LCA = 4,
+ ALPHA_EV5 = 5, /* 21164 */
+ ALPHA_EV45 = 6, /* 21064A */
+ ALPHA_EV56 = 7, /* 21164A */
+};
+
+/* EV4 minor type */
+enum {
+ ALPHA_EV4_2 = 0,
+ ALPHA_EV4_3 = 1,
+};
+
+/* LCA minor type */
+enum {
+ ALPHA_LCA_1 = 1, /* 21066 */
+ ALPHA_LCA_2 = 2, /* 20166 */
+ ALPHA_LCA_3 = 3, /* 21068 */
+ ALPHA_LCA_4 = 4, /* 21068 */
+ ALPHA_LCA_5 = 5, /* 21066A */
+ ALPHA_LCA_6 = 6, /* 21068A */
+};
+
+/* EV5 minor type */
+enum {
+ ALPHA_EV5_1 = 1, /* Rev BA, CA */
+ ALPHA_EV5_2 = 2, /* Rev DA, EA */
+ ALPHA_EV5_3 = 3, /* Pass 3 */
+ ALPHA_EV5_4 = 4, /* Pass 3.2 */
+ ALPHA_EV5_5 = 5, /* Pass 4 */
+};
+
+/* EV45 minor type */
+enum {
+ ALPHA_EV45_1 = 1, /* Pass 1 */
+ ALPHA_EV45_2 = 2, /* Pass 1.1 */
+ ALPHA_EV45_3 = 3, /* Pass 2 */
+};
+
+/* EV56 minor type */
+enum {
+ ALPHA_EV56_1 = 1, /* Pass 1 */
+ ALPHA_EV56_2 = 2, /* Pass 2 */
+};
+
+enum {
+ IMPLVER_2106x = 0, /* EV4, EV45 & LCA45 */
+ IMPLVER_21164 = 1, /* EV5, EV56 & PCA45 */
+ IMPLVER_21264 = 2, /* EV6, EV67 & EV68x */
+ IMPLVER_21364 = 3, /* EV7 & EV79 */
+};
+
+enum {
+ AMASK_BWX = 0x00000001,
+ AMASK_FIX = 0x00000002,
+ AMASK_CIX = 0x00000004,
+ AMASK_MVI = 0x00000100,
+ AMASK_TRAP = 0x00000200,
+ AMASK_PREFETCH = 0x00001000,
+};
+
+enum {
+ VAX_ROUND_NORMAL = 0,
+ VAX_ROUND_CHOPPED,
+};
+
+enum {
+ IEEE_ROUND_NORMAL = 0,
+ IEEE_ROUND_DYNAMIC,
+ IEEE_ROUND_PLUS,
+ IEEE_ROUND_MINUS,
+ IEEE_ROUND_CHOPPED,
+};
+
+/* IEEE floating-point operations encoding */
+/* Trap mode */
+enum {
+ FP_TRAP_I = 0x0,
+ FP_TRAP_U = 0x1,
+ FP_TRAP_S = 0x4,
+ FP_TRAP_SU = 0x5,
+ FP_TRAP_SUI = 0x7,
+};
+
+/* Rounding mode */
+enum {
+ FP_ROUND_CHOPPED = 0x0,
+ FP_ROUND_MINUS = 0x1,
+ FP_ROUND_NORMAL = 0x2,
+ FP_ROUND_DYNAMIC = 0x3,
+};
+
+/* FPCR bits -- right-shifted 32 so we can use a uint32_t. */
+#define FPCR_SUM (1U << (63 - 32))
+#define FPCR_INED (1U << (62 - 32))
+#define FPCR_UNFD (1U << (61 - 32))
+#define FPCR_UNDZ (1U << (60 - 32))
+#define FPCR_DYN_SHIFT (58 - 32)
+#define FPCR_DYN_CHOPPED (0U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_MINUS (1U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_NORMAL (2U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_PLUS (3U << FPCR_DYN_SHIFT)
+#define FPCR_DYN_MASK (3U << FPCR_DYN_SHIFT)
+#define FPCR_IOV (1U << (57 - 32))
+#define FPCR_INE (1U << (56 - 32))
+#define FPCR_UNF (1U << (55 - 32))
+#define FPCR_OVF (1U << (54 - 32))
+#define FPCR_DZE (1U << (53 - 32))
+#define FPCR_INV (1U << (52 - 32))
+#define FPCR_OVFD (1U << (51 - 32))
+#define FPCR_DZED (1U << (50 - 32))
+#define FPCR_INVD (1U << (49 - 32))
+#define FPCR_DNZ (1U << (48 - 32))
+#define FPCR_DNOD (1U << (47 - 32))
+#define FPCR_STATUS_MASK (FPCR_IOV | FPCR_INE | FPCR_UNF \
+ | FPCR_OVF | FPCR_DZE | FPCR_INV)
+
+/* The silly software trap enables implemented by the kernel emulation.
+ These are more or less architecturally required, since the real hardware
+ has read-as-zero bits in the FPCR when the features aren't implemented.
+ For the purposes of QEMU, we pretend the FPCR can hold everything. */
+#define SWCR_TRAP_ENABLE_INV (1U << 1)
+#define SWCR_TRAP_ENABLE_DZE (1U << 2)
+#define SWCR_TRAP_ENABLE_OVF (1U << 3)
+#define SWCR_TRAP_ENABLE_UNF (1U << 4)
+#define SWCR_TRAP_ENABLE_INE (1U << 5)
+#define SWCR_TRAP_ENABLE_DNO (1U << 6)
+#define SWCR_TRAP_ENABLE_MASK ((1U << 7) - (1U << 1))
+
+#define SWCR_MAP_DMZ (1U << 12)
+#define SWCR_MAP_UMZ (1U << 13)
+#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ)
+
+#define SWCR_STATUS_INV (1U << 17)
+#define SWCR_STATUS_DZE (1U << 18)
+#define SWCR_STATUS_OVF (1U << 19)
+#define SWCR_STATUS_UNF (1U << 20)
+#define SWCR_STATUS_INE (1U << 21)
+#define SWCR_STATUS_DNO (1U << 22)
+#define SWCR_STATUS_MASK ((1U << 23) - (1U << 17))
+
+#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK)
+
+/* MMU modes definitions */
+
+/* Alpha has 5 MMU modes: PALcode, Kernel, Executive, Supervisor, and User.
+ The Unix PALcode only exposes the kernel and user modes; presumably
+ executive and supervisor are used by VMS.
+
+ PALcode itself uses physical mode for code and kernel mode for data;
+ there are PALmode instructions that can access data via physical mode
+ or via an os-installed "alternate mode", which is one of the 4 above.
+
+ That said, we're only emulating Unix PALcode, and not attempting VMS,
+ so we don't need to implement Executive and Supervisor. QEMU's own
+ PALcode cheats and usees the KSEG mapping for its code+data rather than
+ physical addresses. */
+
+#define NB_MMU_MODES 3
+
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_KERNEL_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_PHYS_IDX 2
+
+typedef struct CPUAlphaState CPUAlphaState;
+
+struct CPUAlphaState {
+ uint64_t ir[31];
+ float64 fir[31];
+ uint64_t pc;
+ uint64_t unique;
+ uint64_t lock_addr;
+ uint64_t lock_value;
+
+ /* The FPCR, and disassembled portions thereof. */
+ uint32_t fpcr;
+ uint32_t fpcr_exc_enable;
+ float_status fp_status;
+ uint8_t fpcr_dyn_round;
+ uint8_t fpcr_flush_to_zero;
+
+ /* The Internal Processor Registers. Some of these we assume always
+ exist for use in user-mode. */
+ uint8_t ps;
+ uint8_t intr_flag;
+ uint8_t pal_mode;
+ uint8_t fen;
+
+ uint32_t pcc_ofs;
+
+ /* These pass data from the exception logic in the translator and
+ helpers to the OS entry point. This is used for both system
+ emulation and user-mode. */
+ uint64_t trap_arg0;
+ uint64_t trap_arg1;
+ uint64_t trap_arg2;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* The internal data required by our emulation of the Unix PALcode. */
+ uint64_t exc_addr;
+ uint64_t palbr;
+ uint64_t ptbr;
+ uint64_t vptptr;
+ uint64_t sysval;
+ uint64_t usp;
+ uint64_t shadow[8];
+ uint64_t scratch[24];
+#endif
+
+ /* This alarm doesn't exist in real hardware; we wish it did. */
+ uint64_t alarm_expire;
+
+ /* Those resources are used only in QEMU core */
+ CPU_COMMON
+
+ int error_code;
+
+ uint32_t features;
+ uint32_t amask;
+ int implver;
+};
+
+/**
+ * AlphaCPU:
+ * @env: #CPUAlphaState
+ *
+ * An Alpha CPU.
+ */
+struct AlphaCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUAlphaState env;
+
+ /* This alarm doesn't exist in real hardware; we wish it did. */
+ QEMUTimer *alarm_timer;
+};
+
+static inline AlphaCPU *alpha_env_get_cpu(CPUAlphaState *env)
+{
+ return container_of(env, AlphaCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(alpha_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(AlphaCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_alpha_cpu;
+#endif
+
+void alpha_cpu_do_interrupt(CPUState *cpu);
+bool alpha_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int alpha_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+#define cpu_list alpha_cpu_list
+#define cpu_signal_handler cpu_alpha_signal_handler
+
+#include "exec/cpu-all.h"
+
+enum {
+ FEATURE_ASN = 0x00000001,
+ FEATURE_SPS = 0x00000002,
+ FEATURE_VIRBND = 0x00000004,
+ FEATURE_TBCHK = 0x00000008,
+};
+
+enum {
+ EXCP_RESET,
+ EXCP_MCHK,
+ EXCP_SMP_INTERRUPT,
+ EXCP_CLK_INTERRUPT,
+ EXCP_DEV_INTERRUPT,
+ EXCP_MMFAULT,
+ EXCP_UNALIGN,
+ EXCP_OPCDEC,
+ EXCP_ARITH,
+ EXCP_FEN,
+ EXCP_CALL_PAL,
+};
+
+/* Alpha-specific interrupt pending bits. */
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0
+#define CPU_INTERRUPT_SMP CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2
+
+/* OSF/1 Page table bits. */
+enum {
+ PTE_VALID = 0x0001,
+ PTE_FOR = 0x0002, /* used for page protection (fault on read) */
+ PTE_FOW = 0x0004, /* used for page protection (fault on write) */
+ PTE_FOE = 0x0008, /* used for page protection (fault on exec) */
+ PTE_ASM = 0x0010,
+ PTE_KRE = 0x0100,
+ PTE_URE = 0x0200,
+ PTE_KWE = 0x1000,
+ PTE_UWE = 0x2000
+};
+
+/* Hardware interrupt (entInt) constants. */
+enum {
+ INT_K_IP,
+ INT_K_CLK,
+ INT_K_MCHK,
+ INT_K_DEV,
+ INT_K_PERF,
+};
+
+/* Memory management (entMM) constants. */
+enum {
+ MM_K_TNV,
+ MM_K_ACV,
+ MM_K_FOR,
+ MM_K_FOE,
+ MM_K_FOW
+};
+
+/* Arithmetic exception (entArith) constants. */
+enum {
+ EXC_M_SWC = 1, /* Software completion */
+ EXC_M_INV = 2, /* Invalid operation */
+ EXC_M_DZE = 4, /* Division by zero */
+ EXC_M_FOV = 8, /* Overflow */
+ EXC_M_UNF = 16, /* Underflow */
+ EXC_M_INE = 32, /* Inexact result */
+ EXC_M_IOV = 64 /* Integer Overflow */
+};
+
+/* Processor status constants. */
+enum {
+ /* Low 3 bits are interrupt mask level. */
+ PS_INT_MASK = 7,
+
+ /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes;
+ The Unix PALcode only uses bit 4. */
+ PS_USER_MODE = 8
+};
+
+static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
+{
+ if (env->pal_mode) {
+ return MMU_KERNEL_IDX;
+ } else if (env->ps & PS_USER_MODE) {
+ return MMU_USER_IDX;
+ } else {
+ return MMU_KERNEL_IDX;
+ }
+}
+
+enum {
+ IR_V0 = 0,
+ IR_T0 = 1,
+ IR_T1 = 2,
+ IR_T2 = 3,
+ IR_T3 = 4,
+ IR_T4 = 5,
+ IR_T5 = 6,
+ IR_T6 = 7,
+ IR_T7 = 8,
+ IR_S0 = 9,
+ IR_S1 = 10,
+ IR_S2 = 11,
+ IR_S3 = 12,
+ IR_S4 = 13,
+ IR_S5 = 14,
+ IR_S6 = 15,
+ IR_FP = IR_S6,
+ IR_A0 = 16,
+ IR_A1 = 17,
+ IR_A2 = 18,
+ IR_A3 = 19,
+ IR_A4 = 20,
+ IR_A5 = 21,
+ IR_T8 = 22,
+ IR_T9 = 23,
+ IR_T10 = 24,
+ IR_T11 = 25,
+ IR_RA = 26,
+ IR_T12 = 27,
+ IR_PV = IR_T12,
+ IR_AT = 28,
+ IR_GP = 29,
+ IR_SP = 30,
+ IR_ZERO = 31,
+};
+
+void alpha_translate_init(void);
+
+AlphaCPU *cpu_alpha_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_alpha_init(cpu_model))
+
+void alpha_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_alpha_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+int alpha_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int);
+void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t);
+
+uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env);
+void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val);
+uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg);
+void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val);
+#ifndef CONFIG_USER_ONLY
+QEMU_NORETURN void alpha_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec,
+ int unused, unsigned size);
+#endif
+
+/* Bits in TB->FLAGS that control how translation is processed. */
+enum {
+ TB_FLAGS_PAL_MODE = 1,
+ TB_FLAGS_FEN = 2,
+ TB_FLAGS_USER_MODE = 8,
+
+ TB_FLAGS_AMASK_SHIFT = 4,
+ TB_FLAGS_AMASK_BWX = AMASK_BWX << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_FIX = AMASK_FIX << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_CIX = AMASK_CIX << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_MVI = AMASK_MVI << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_TRAP = AMASK_TRAP << TB_FLAGS_AMASK_SHIFT,
+ TB_FLAGS_AMASK_PREFETCH = AMASK_PREFETCH << TB_FLAGS_AMASK_SHIFT,
+};
+
+static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ int flags = 0;
+
+ *pc = env->pc;
+ *cs_base = 0;
+
+ if (env->pal_mode) {
+ flags = TB_FLAGS_PAL_MODE;
+ } else {
+ flags = env->ps & PS_USER_MODE;
+ }
+ if (env->fen) {
+ flags |= TB_FLAGS_FEN;
+ }
+ flags |= env->amask << TB_FLAGS_AMASK_SHIFT;
+
+ *pflags = flags;
+}
+
+#endif /* ALPHA_CPU_H */
diff --git a/target/alpha/fpu_helper.c b/target/alpha/fpu_helper.c
new file mode 100644
index 0000000000..9645978aaa
--- /dev/null
+++ b/target/alpha/fpu_helper.c
@@ -0,0 +1,553 @@
+/*
+ * Helpers for floating point instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define FP_STATUS (env->fp_status)
+
+
+void helper_setroundmode(CPUAlphaState *env, uint32_t val)
+{
+ set_float_rounding_mode(val, &FP_STATUS);
+}
+
+void helper_setflushzero(CPUAlphaState *env, uint32_t val)
+{
+ set_flush_to_zero(val, &FP_STATUS);
+}
+
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
+
+static uint32_t soft_to_fpcr_exc(CPUAlphaState *env)
+{
+ uint8_t exc = get_float_exception_flags(&FP_STATUS);
+ uint32_t ret = 0;
+
+ if (unlikely(exc)) {
+ set_float_exception_flags(0, &FP_STATUS);
+ ret |= CONVERT_BIT(exc, float_flag_invalid, FPCR_INV);
+ ret |= CONVERT_BIT(exc, float_flag_divbyzero, FPCR_DZE);
+ ret |= CONVERT_BIT(exc, float_flag_overflow, FPCR_OVF);
+ ret |= CONVERT_BIT(exc, float_flag_underflow, FPCR_UNF);
+ ret |= CONVERT_BIT(exc, float_flag_inexact, FPCR_INE);
+ }
+
+ return ret;
+}
+
+static void fp_exc_raise1(CPUAlphaState *env, uintptr_t retaddr,
+ uint32_t exc, uint32_t regno, uint32_t hw_exc)
+{
+ hw_exc |= CONVERT_BIT(exc, FPCR_INV, EXC_M_INV);
+ hw_exc |= CONVERT_BIT(exc, FPCR_DZE, EXC_M_DZE);
+ hw_exc |= CONVERT_BIT(exc, FPCR_OVF, EXC_M_FOV);
+ hw_exc |= CONVERT_BIT(exc, FPCR_UNF, EXC_M_UNF);
+ hw_exc |= CONVERT_BIT(exc, FPCR_INE, EXC_M_INE);
+ hw_exc |= CONVERT_BIT(exc, FPCR_IOV, EXC_M_IOV);
+
+ arith_excp(env, retaddr, hw_exc, 1ull << regno);
+}
+
+/* Raise exceptions for ieee fp insns without software completion.
+ In that case there are no exceptions that don't trap; the mask
+ doesn't apply. */
+void helper_fp_exc_raise(CPUAlphaState *env, uint32_t ignore, uint32_t regno)
+{
+ uint32_t exc = env->error_code;
+ if (exc) {
+ env->fpcr |= exc;
+ exc &= ~ignore;
+ if (exc) {
+ fp_exc_raise1(env, GETPC(), exc, regno, 0);
+ }
+ }
+}
+
+/* Raise exceptions for ieee fp insns with software completion. */
+void helper_fp_exc_raise_s(CPUAlphaState *env, uint32_t ignore, uint32_t regno)
+{
+ uint32_t exc = env->error_code & ~ignore;
+ if (exc) {
+ env->fpcr |= exc;
+ exc &= ~ignore;
+ if (exc) {
+ exc &= env->fpcr_exc_enable;
+ fp_exc_raise1(env, GETPC(), exc, regno, EXC_M_SWC);
+ }
+ }
+}
+
+/* Input handing without software completion. Trap for all
+ non-finite numbers. */
+void helper_ieee_input(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ /* Denormals without /S raise an exception. */
+ if (frac != 0) {
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+ } else if (exp == 0x7ff) {
+ /* Infinity or NaN. */
+ env->fpcr |= FPCR_INV;
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+}
+
+/* Similar, but does not trap for infinities. Used for comparisons. */
+void helper_ieee_input_cmp(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
+ uint64_t frac = val & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ /* Denormals without /S raise an exception. */
+ if (frac != 0) {
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+ } else if (exp == 0x7ff && frac) {
+ /* NaN. */
+ env->fpcr |= FPCR_INV;
+ arith_excp(env, GETPC(), EXC_M_INV, 0);
+ }
+}
+
+/* Input handing with software completion. Trap for denorms, unless DNZ
+ is set. If we try to support DNOD (which none of the produced hardware
+ did, AFAICS), we'll need to suppress the trap when FPCR.DNOD is set;
+ then the code downstream of that will need to cope with denorms sans
+ flush_input_to_zero. Most of it should work sanely, but there's
+ nothing to compare with. */
+void helper_ieee_input_s(CPUAlphaState *env, uint64_t val)
+{
+ if (unlikely(2 * val - 1 < 0x1fffffffffffffull)
+ && !env->fp_status.flush_inputs_to_zero) {
+ arith_excp(env, GETPC(), EXC_M_INV | EXC_M_SWC, 0);
+ }
+}
+
+/* S floating (single) */
+
+/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
+static inline uint64_t float32_to_s_int(uint32_t fi)
+{
+ uint32_t frac = fi & 0x7fffff;
+ uint32_t sign = fi >> 31;
+ uint32_t exp_msb = (fi >> 30) & 1;
+ uint32_t exp_low = (fi >> 23) & 0x7f;
+ uint32_t exp;
+
+ exp = (exp_msb << 10) | exp_low;
+ if (exp_msb) {
+ if (exp_low == 0x7f) {
+ exp = 0x7ff;
+ }
+ } else {
+ if (exp_low != 0x00) {
+ exp |= 0x380;
+ }
+ }
+
+ return (((uint64_t)sign << 63)
+ | ((uint64_t)exp << 52)
+ | ((uint64_t)frac << 29));
+}
+
+static inline uint64_t float32_to_s(float32 fa)
+{
+ CPU_FloatU a;
+ a.f = fa;
+ return float32_to_s_int(a.l);
+}
+
+static inline uint32_t s_to_float32_int(uint64_t a)
+{
+ return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
+}
+
+static inline float32 s_to_float32(uint64_t a)
+{
+ CPU_FloatU r;
+ r.l = s_to_float32_int(a);
+ return r.f;
+}
+
+uint32_t helper_s_to_memory(uint64_t a)
+{
+ return s_to_float32_int(a);
+}
+
+uint64_t helper_memory_to_s(uint32_t a)
+{
+ return float32_to_s_int(a);
+}
+
+uint64_t helper_adds(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_add(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_subs(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_muls(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_divs(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = s_to_float32(a);
+ fb = s_to_float32(b);
+ fr = float32_div(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_sqrts(CPUAlphaState *env, uint64_t a)
+{
+ float32 fa, fr;
+
+ fa = s_to_float32(a);
+ fr = float32_sqrt(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+
+/* T floating (double) */
+static inline float64 t_to_float64(uint64_t a)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.ll = a;
+ return r.d;
+}
+
+static inline uint64_t float64_to_t(float64 fa)
+{
+ /* Memory format is the same as float64 */
+ CPU_DoubleU r;
+ r.d = fa;
+ return r.ll;
+}
+
+uint64_t helper_addt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_add(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_subt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_mult(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_divt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+ fr = float64_div(fa, fb, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_sqrtt(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa, fr;
+
+ fa = t_to_float64(a);
+ fr = float64_sqrt(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+/* Comparisons */
+uint64_t helper_cmptun(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_cmpteq(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_cmptle(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_le(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+uint64_t helper_cmptlt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+ uint64_t ret = 0;
+
+ fa = t_to_float64(a);
+ fb = t_to_float64(b);
+
+ if (float64_lt(fa, fb, &FP_STATUS)) {
+ ret = 0x4000000000000000ULL;
+ }
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return ret;
+}
+
+/* Floating point format conversion */
+uint64_t helper_cvtts(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa;
+ float32 fr;
+
+ fa = t_to_float64(a);
+ fr = float64_to_float32(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+uint64_t helper_cvtst(CPUAlphaState *env, uint64_t a)
+{
+ float32 fa;
+ float64 fr;
+
+ fa = s_to_float32(a);
+ fr = float32_to_float64(fa, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float64_to_t(fr);
+}
+
+uint64_t helper_cvtqs(CPUAlphaState *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+
+ return float32_to_s(fr);
+}
+
+/* Implement float64 to uint64_t conversion without saturation -- we must
+ supply the truncated result. This behaviour is used by the compiler
+ to get unsigned conversion for free with the same instruction. */
+
+static uint64_t do_cvttq(CPUAlphaState *env, uint64_t a, int roundmode)
+{
+ uint64_t frac, ret = 0;
+ uint32_t exp, sign, exc = 0;
+ int shift;
+
+ sign = (a >> 63);
+ exp = (uint32_t)(a >> 52) & 0x7ff;
+ frac = a & 0xfffffffffffffull;
+
+ if (exp == 0) {
+ if (unlikely(frac != 0) && !env->fp_status.flush_inputs_to_zero) {
+ goto do_underflow;
+ }
+ } else if (exp == 0x7ff) {
+ exc = FPCR_INV;
+ } else {
+ /* Restore implicit bit. */
+ frac |= 0x10000000000000ull;
+
+ shift = exp - 1023 - 52;
+ if (shift >= 0) {
+ /* In this case the number is so large that we must shift
+ the fraction left. There is no rounding to do. */
+ if (shift < 64) {
+ ret = frac << shift;
+ }
+ /* Check for overflow. Note the special case of -0x1p63. */
+ if (shift >= 11 && a != 0xC3E0000000000000ull) {
+ exc = FPCR_IOV | FPCR_INE;
+ }
+ } else {
+ uint64_t round;
+
+ /* In this case the number is smaller than the fraction as
+ represented by the 52 bit number. Here we must think
+ about rounding the result. Handle this by shifting the
+ fractional part of the number into the high bits of ROUND.
+ This will let us efficiently handle round-to-nearest. */
+ shift = -shift;
+ if (shift < 63) {
+ ret = frac >> shift;
+ round = frac << (64 - shift);
+ } else {
+ /* The exponent is so small we shift out everything.
+ Leave a sticky bit for proper rounding below. */
+ do_underflow:
+ round = 1;
+ }
+
+ if (round) {
+ exc = FPCR_INE;
+ switch (roundmode) {
+ case float_round_nearest_even:
+ if (round == (1ull << 63)) {
+ /* Fraction is exactly 0.5; round to even. */
+ ret += (ret & 1);
+ } else if (round > (1ull << 63)) {
+ ret += 1;
+ }
+ break;
+ case float_round_to_zero:
+ break;
+ case float_round_up:
+ ret += 1 - sign;
+ break;
+ case float_round_down:
+ ret += sign;
+ break;
+ }
+ }
+ }
+ if (sign) {
+ ret = -ret;
+ }
+ }
+ env->error_code = exc;
+
+ return ret;
+}
+
+uint64_t helper_cvttq(CPUAlphaState *env, uint64_t a)
+{
+ return do_cvttq(env, a, FP_STATUS.float_rounding_mode);
+}
+
+uint64_t helper_cvttq_c(CPUAlphaState *env, uint64_t a)
+{
+ return do_cvttq(env, a, float_round_to_zero);
+}
+
+uint64_t helper_cvtqt(CPUAlphaState *env, uint64_t a)
+{
+ float64 fr = int64_to_float64(a, &FP_STATUS);
+ env->error_code = soft_to_fpcr_exc(env);
+ return float64_to_t(fr);
+}
+
+uint64_t helper_cvtql(CPUAlphaState *env, uint64_t val)
+{
+ uint32_t exc = 0;
+ if (val != (int32_t)val) {
+ exc = FPCR_IOV | FPCR_INE;
+ }
+ env->error_code = exc;
+
+ return ((val & 0xc0000000) << 32) | ((val & 0x3fffffff) << 29);
+}
diff --git a/target/alpha/gdbstub.c b/target/alpha/gdbstub.c
new file mode 100644
index 0000000000..d64bcccfa0
--- /dev/null
+++ b/target/alpha/gdbstub.c
@@ -0,0 +1,94 @@
+/*
+ * Alpha gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int alpha_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ uint64_t val;
+ CPU_DoubleU d;
+
+ switch (n) {
+ case 0 ... 30:
+ val = cpu_alpha_load_gr(env, n);
+ break;
+ case 32 ... 62:
+ d.d = env->fir[n - 32];
+ val = d.ll;
+ break;
+ case 63:
+ val = cpu_alpha_load_fpcr(env);
+ break;
+ case 64:
+ val = env->pc;
+ break;
+ case 66:
+ val = env->unique;
+ break;
+ case 31:
+ case 65:
+ /* 31 really is the zero register; 65 is unassigned in the
+ gdb protocol, but is still required to occupy 8 bytes. */
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+ return gdb_get_regl(mem_buf, val);
+}
+
+int alpha_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ target_ulong tmp = ldtul_p(mem_buf);
+ CPU_DoubleU d;
+
+ switch (n) {
+ case 0 ... 30:
+ cpu_alpha_store_gr(env, n, tmp);
+ break;
+ case 32 ... 62:
+ d.ll = tmp;
+ env->fir[n - 32] = d.d;
+ break;
+ case 63:
+ cpu_alpha_store_fpcr(env, tmp);
+ break;
+ case 64:
+ env->pc = tmp;
+ break;
+ case 66:
+ env->unique = tmp;
+ break;
+ case 31:
+ case 65:
+ /* 31 really is the zero register; 65 is unassigned in the
+ gdb protocol, but is still required to occupy 8 bytes. */
+ break;
+ default:
+ return 0;
+ }
+ return 8;
+}
diff --git a/target/alpha/helper.c b/target/alpha/helper.c
new file mode 100644
index 0000000000..a5c308859b
--- /dev/null
+++ b/target/alpha/helper.c
@@ -0,0 +1,490 @@
+/*
+ * Alpha emulation cpu helpers for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "fpu/softfloat.h"
+#include "exec/helper-proto.h"
+
+
+#define CONVERT_BIT(X, SRC, DST) \
+ (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
+
+uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
+{
+ return (uint64_t)env->fpcr << 32;
+}
+
+void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
+{
+ uint32_t fpcr = val >> 32;
+ uint32_t t = 0;
+
+ t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
+ t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
+ t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
+ t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
+ t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
+
+ env->fpcr = fpcr;
+ env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
+
+ switch (fpcr & FPCR_DYN_MASK) {
+ case FPCR_DYN_NORMAL:
+ default:
+ t = float_round_nearest_even;
+ break;
+ case FPCR_DYN_CHOPPED:
+ t = float_round_to_zero;
+ break;
+ case FPCR_DYN_MINUS:
+ t = float_round_down;
+ break;
+ case FPCR_DYN_PLUS:
+ t = float_round_up;
+ break;
+ }
+ env->fpcr_dyn_round = t;
+
+ env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
+ env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
+}
+
+uint64_t helper_load_fpcr(CPUAlphaState *env)
+{
+ return cpu_alpha_load_fpcr(env);
+}
+
+void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
+{
+ cpu_alpha_store_fpcr(env, val);
+}
+
+static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
+{
+#ifndef CONFIG_USER_ONLY
+ if (env->pal_mode) {
+ if (reg >= 8 && reg <= 14) {
+ return &env->shadow[reg - 8];
+ } else if (reg == 25) {
+ return &env->shadow[7];
+ }
+ }
+#endif
+ return &env->ir[reg];
+}
+
+uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
+{
+ return *cpu_alpha_addr_gr(env, reg);
+}
+
+void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
+{
+ *cpu_alpha_addr_gr(env, reg) = val;
+}
+
+#if defined(CONFIG_USER_ONLY)
+int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int rw, int mmu_idx)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+
+ cs->exception_index = EXCP_MMFAULT;
+ cpu->env.trap_arg0 = address;
+ return 1;
+}
+#else
+/* Returns the OSF/1 entMM failure indication, or -1 on success. */
+static int get_physical_address(CPUAlphaState *env, target_ulong addr,
+ int prot_need, int mmu_idx,
+ target_ulong *pphys, int *pprot)
+{
+ CPUState *cs = CPU(alpha_env_get_cpu(env));
+ target_long saddr = addr;
+ target_ulong phys = 0;
+ target_ulong L1pte, L2pte, L3pte;
+ target_ulong pt, index;
+ int prot = 0;
+ int ret = MM_K_ACV;
+
+ /* Handle physical accesses. */
+ if (mmu_idx == MMU_PHYS_IDX) {
+ phys = addr;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ }
+
+ /* Ensure that the virtual address is properly sign-extended from
+ the last implemented virtual address bit. */
+ if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
+ goto exit;
+ }
+
+ /* Translate the superpage. */
+ /* ??? When we do more than emulate Unix PALcode, we'll need to
+ determine which KSEG is actually active. */
+ if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
+ /* User-space cannot access KSEG addresses. */
+ if (mmu_idx != MMU_KERNEL_IDX) {
+ goto exit;
+ }
+
+ /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
+ We would not do this if the 48-bit KSEG is enabled. */
+ phys = saddr & ((1ull << 40) - 1);
+ phys |= (saddr & (1ull << 40)) << 3;
+
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = -1;
+ goto exit;
+ }
+
+ /* Interpret the page table exactly like PALcode does. */
+
+ pt = env->ptbr;
+
+ /* L1 page table read. */
+ index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
+ L1pte = ldq_phys(cs->as, pt + index*8);
+
+ if (unlikely((L1pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (unlikely((L1pte & PTE_KRE) == 0)) {
+ goto exit;
+ }
+ pt = L1pte >> 32 << TARGET_PAGE_BITS;
+
+ /* L2 page table read. */
+ index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
+ L2pte = ldq_phys(cs->as, pt + index*8);
+
+ if (unlikely((L2pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+ if (unlikely((L2pte & PTE_KRE) == 0)) {
+ goto exit;
+ }
+ pt = L2pte >> 32 << TARGET_PAGE_BITS;
+
+ /* L3 page table read. */
+ index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
+ L3pte = ldq_phys(cs->as, pt + index*8);
+
+ phys = L3pte >> 32 << TARGET_PAGE_BITS;
+ if (unlikely((L3pte & PTE_VALID) == 0)) {
+ ret = MM_K_TNV;
+ goto exit;
+ }
+
+#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
+# error page bits out of date
+#endif
+
+ /* Check access violations. */
+ if (L3pte & (PTE_KRE << mmu_idx)) {
+ prot |= PAGE_READ | PAGE_EXEC;
+ }
+ if (L3pte & (PTE_KWE << mmu_idx)) {
+ prot |= PAGE_WRITE;
+ }
+ if (unlikely((prot & prot_need) == 0 && prot_need)) {
+ goto exit;
+ }
+
+ /* Check fault-on-operation violations. */
+ prot &= ~(L3pte >> 1);
+ ret = -1;
+ if (unlikely((prot & prot_need) == 0)) {
+ ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
+ prot_need & PAGE_WRITE ? MM_K_FOW :
+ prot_need & PAGE_READ ? MM_K_FOR : -1);
+ }
+
+ exit:
+ *pphys = phys;
+ *pprot = prot;
+ return ret;
+}
+
+hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ target_ulong phys;
+ int prot, fail;
+
+ fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
+ return (fail >= 0 ? -1 : phys);
+}
+
+int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
+ int mmu_idx)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ target_ulong phys;
+ int prot, fail;
+
+ fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
+ if (unlikely(fail >= 0)) {
+ cs->exception_index = EXCP_MMFAULT;
+ env->trap_arg0 = addr;
+ env->trap_arg1 = fail;
+ env->trap_arg2 = (rw == 2 ? -1 : rw);
+ return 1;
+ }
+
+ tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
+}
+#endif /* USER_ONLY */
+
+void alpha_cpu_do_interrupt(CPUState *cs)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ int i = cs->exception_index;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name = "<unknown>";
+
+ switch (i) {
+ case EXCP_RESET:
+ name = "reset";
+ break;
+ case EXCP_MCHK:
+ name = "mchk";
+ break;
+ case EXCP_SMP_INTERRUPT:
+ name = "smp_interrupt";
+ break;
+ case EXCP_CLK_INTERRUPT:
+ name = "clk_interrupt";
+ break;
+ case EXCP_DEV_INTERRUPT:
+ name = "dev_interrupt";
+ break;
+ case EXCP_MMFAULT:
+ name = "mmfault";
+ break;
+ case EXCP_UNALIGN:
+ name = "unalign";
+ break;
+ case EXCP_OPCDEC:
+ name = "opcdec";
+ break;
+ case EXCP_ARITH:
+ name = "arith";
+ break;
+ case EXCP_FEN:
+ name = "fen";
+ break;
+ case EXCP_CALL_PAL:
+ name = "call_pal";
+ break;
+ }
+ qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
+ PRIx64 " sp=%016" PRIx64 "\n",
+ ++count, name, env->error_code, cs->cpu_index,
+ env->pc, env->ir[IR_SP]);
+ }
+
+ cs->exception_index = -1;
+
+#if !defined(CONFIG_USER_ONLY)
+ switch (i) {
+ case EXCP_RESET:
+ i = 0x0000;
+ break;
+ case EXCP_MCHK:
+ i = 0x0080;
+ break;
+ case EXCP_SMP_INTERRUPT:
+ i = 0x0100;
+ break;
+ case EXCP_CLK_INTERRUPT:
+ i = 0x0180;
+ break;
+ case EXCP_DEV_INTERRUPT:
+ i = 0x0200;
+ break;
+ case EXCP_MMFAULT:
+ i = 0x0280;
+ break;
+ case EXCP_UNALIGN:
+ i = 0x0300;
+ break;
+ case EXCP_OPCDEC:
+ i = 0x0380;
+ break;
+ case EXCP_ARITH:
+ i = 0x0400;
+ break;
+ case EXCP_FEN:
+ i = 0x0480;
+ break;
+ case EXCP_CALL_PAL:
+ i = env->error_code;
+ /* There are 64 entry points for both privileged and unprivileged,
+ with bit 0x80 indicating unprivileged. Each entry point gets
+ 64 bytes to do its job. */
+ if (i & 0x80) {
+ i = 0x2000 + (i - 0x80) * 64;
+ } else {
+ i = 0x1000 + i * 64;
+ }
+ break;
+ default:
+ cpu_abort(cs, "Unhandled CPU exception");
+ }
+
+ /* Remember where the exception happened. Emulate real hardware in
+ that the low bit of the PC indicates PALmode. */
+ env->exc_addr = env->pc | env->pal_mode;
+
+ /* Continue execution at the PALcode entry point. */
+ env->pc = env->palbr + i;
+
+ /* Switch to PALmode. */
+ env->pal_mode = 1;
+#endif /* !USER_ONLY */
+}
+
+bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ int idx = -1;
+
+ /* We never take interrupts while in PALmode. */
+ if (env->pal_mode) {
+ return false;
+ }
+
+ /* Fall through the switch, collecting the highest priority
+ interrupt that isn't masked by the processor status IPL. */
+ /* ??? This hard-codes the OSF/1 interrupt levels. */
+ switch (env->ps & PS_INT_MASK) {
+ case 0 ... 3:
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ idx = EXCP_DEV_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 4:
+ if (interrupt_request & CPU_INTERRUPT_TIMER) {
+ idx = EXCP_CLK_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 5:
+ if (interrupt_request & CPU_INTERRUPT_SMP) {
+ idx = EXCP_SMP_INTERRUPT;
+ }
+ /* FALLTHRU */
+ case 6:
+ if (interrupt_request & CPU_INTERRUPT_MCHK) {
+ idx = EXCP_MCHK;
+ }
+ }
+ if (idx >= 0) {
+ cs->exception_index = idx;
+ env->error_code = 0;
+ alpha_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ static const char *linux_reg_names[] = {
+ "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
+ "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
+ "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
+ "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
+ };
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ int i;
+
+ cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
+ env->pc, env->ps);
+ for (i = 0; i < 31; i++) {
+ cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
+ linux_reg_names[i], cpu_alpha_load_gr(env, i));
+ if ((i % 3) == 2)
+ cpu_fprintf(f, "\n");
+ }
+
+ cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
+ env->lock_addr, env->lock_value);
+
+ for (i = 0; i < 31; i++) {
+ cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i,
+ *((uint64_t *)(&env->fir[i])));
+ if ((i % 3) == 2)
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "\n");
+}
+
+/* This should only be called from translate, via gen_excp.
+ We expect that ENV->PC has already been updated. */
+void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
+{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ env->error_code = error;
+ cpu_loop_exit(cs);
+}
+
+/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
+void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
+ int excp, int error)
+{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ env->error_code = error;
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr);
+ /* Floating-point exceptions (our only users) point to the next PC. */
+ env->pc += 4;
+ }
+ cpu_loop_exit(cs);
+}
+
+void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
+ int exc, uint64_t mask)
+{
+ env->trap_arg0 = exc;
+ env->trap_arg1 = mask;
+ dynamic_excp(env, retaddr, EXCP_ARITH, 0);
+}
diff --git a/target/alpha/helper.h b/target/alpha/helper.h
new file mode 100644
index 0000000000..004221df8c
--- /dev/null
+++ b/target/alpha/helper.h
@@ -0,0 +1,104 @@
+DEF_HELPER_3(excp, noreturn, env, int, int)
+DEF_HELPER_FLAGS_1(load_pcc, TCG_CALL_NO_RWG_SE, i64, env)
+
+DEF_HELPER_FLAGS_3(check_overflow, TCG_CALL_NO_WG, void, env, i64, i64)
+
+DEF_HELPER_FLAGS_1(ctpop, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(ctlz, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(cttz, TCG_CALL_NO_RWG_SE, i64, i64)
+
+DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_1(cmpbe0, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(cmpbge, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(minub8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(minsb8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(minuw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(minsw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxub8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxsb8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxuw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(maxsw4, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(perr, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(pklb, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(pkwb, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(unpkbl, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(unpkbw, TCG_CALL_NO_RWG_SE, i64, i64)
+
+DEF_HELPER_FLAGS_1(load_fpcr, TCG_CALL_NO_RWG_SE, i64, env)
+DEF_HELPER_FLAGS_2(store_fpcr, TCG_CALL_NO_RWG, void, env, i64)
+
+DEF_HELPER_FLAGS_1(f_to_memory, TCG_CALL_NO_RWG_SE, i32, i64)
+DEF_HELPER_FLAGS_1(memory_to_f, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_3(addf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mulf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divf, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtf, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_1(g_to_memory, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(memory_to_g, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_3(addg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mulg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divg, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtg, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_NO_RWG_SE, i32, i64)
+DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_3(adds, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subs, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(muls, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divs, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrts, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(addt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(subt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mult, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(sqrtt, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_3(cmptun, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpteq, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmptle, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmptlt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpgeq, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpgle, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(cmpglt, TCG_CALL_NO_RWG, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(cvtts, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtst, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqs, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqt, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqf, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtgf, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtgq, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvtqg, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(cvttq, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(cvttq_c, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(cvtql, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_FLAGS_2(setroundmode, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_2(setflushzero, TCG_CALL_NO_RWG, void, env, i32)
+DEF_HELPER_FLAGS_3(fp_exc_raise, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_3(fp_exc_raise_s, TCG_CALL_NO_WG, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(ieee_input, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64)
+
+#if !defined (CONFIG_USER_ONLY)
+DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_1(halt, void, i64)
+
+DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64)
+DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64)
+DEF_HELPER_FLAGS_2(set_alarm, TCG_CALL_NO_RWG, void, env, i64)
+#endif
diff --git a/target/alpha/int_helper.c b/target/alpha/int_helper.c
new file mode 100644
index 0000000000..19bebfe742
--- /dev/null
+++ b/target/alpha/int_helper.c
@@ -0,0 +1,280 @@
+/*
+ * Helpers for integer and multimedia instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+
+
+uint64_t helper_ctpop(uint64_t arg)
+{
+ return ctpop64(arg);
+}
+
+uint64_t helper_ctlz(uint64_t arg)
+{
+ return clz64(arg);
+}
+
+uint64_t helper_cttz(uint64_t arg)
+{
+ return ctz64(arg);
+}
+
+uint64_t helper_zapnot(uint64_t val, uint64_t mskb)
+{
+ uint64_t mask;
+
+ mask = -(mskb & 0x01) & 0x00000000000000ffull;
+ mask |= -(mskb & 0x02) & 0x000000000000ff00ull;
+ mask |= -(mskb & 0x04) & 0x0000000000ff0000ull;
+ mask |= -(mskb & 0x08) & 0x00000000ff000000ull;
+ mask |= -(mskb & 0x10) & 0x000000ff00000000ull;
+ mask |= -(mskb & 0x20) & 0x0000ff0000000000ull;
+ mask |= -(mskb & 0x40) & 0x00ff000000000000ull;
+ mask |= -(mskb & 0x80) & 0xff00000000000000ull;
+
+ return val & mask;
+}
+
+uint64_t helper_zap(uint64_t val, uint64_t mask)
+{
+ return helper_zapnot(val, ~mask);
+}
+
+uint64_t helper_cmpbe0(uint64_t a)
+{
+ uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
+ uint64_t c = ~(((a & m) + m) | a | m);
+ /* a.......b.......c.......d.......e.......f.......g.......h....... */
+ c |= c << 7;
+ /* ab......bc......cd......de......ef......fg......gh......h....... */
+ c |= c << 14;
+ /* abcd....bcde....cdef....defg....efgh....fgh.....gh......h....... */
+ c |= c << 28;
+ /* abcdefghbcdefgh.cdefgh..defgh...efgh....fgh.....gh......h....... */
+ return c >> 56;
+}
+
+uint64_t helper_cmpbge(uint64_t a, uint64_t b)
+{
+ uint64_t mask = 0x00ff00ff00ff00ffULL;
+ uint64_t test = 0x0100010001000100ULL;
+ uint64_t al, ah, bl, bh, cl, ch;
+
+ /* Separate the bytes to avoid false positives. */
+ al = a & mask;
+ bl = b & mask;
+ ah = (a >> 8) & mask;
+ bh = (b >> 8) & mask;
+
+ /* "Compare". If a byte in B is greater than a byte in A,
+ it will clear the test bit. */
+ cl = ((al | test) - bl) & test;
+ ch = ((ah | test) - bh) & test;
+
+ /* Fold all of the test bits into a contiguous set. */
+ /* ch=.......a...............c...............e...............g........ */
+ /* cl=.......b...............d...............f...............h........ */
+ cl += ch << 1;
+ /* cl=......ab..............cd..............ef..............gh........ */
+ cl |= cl << 14;
+ /* cl=......abcd............cdef............efgh............gh........ */
+ cl |= cl << 28;
+ /* cl=......abcdefgh........cdefgh..........efgh............gh........ */
+ return cl >> 50;
+}
+
+uint64_t helper_minub8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint8_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_minsb8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int8_t opa, opb;
+ uint8_t opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_minuw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint16_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_minsw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int16_t opa, opb;
+ uint16_t opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa < opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_maxub8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint8_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_maxsb8(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int8_t opa, opb;
+ uint8_t opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 8);
+ }
+ return res;
+}
+
+uint64_t helper_maxuw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint16_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_maxsw4(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ int16_t opa, opb;
+ uint16_t opr;
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ opa = op1 >> (i * 16);
+ opb = op2 >> (i * 16);
+ opr = opa > opb ? opa : opb;
+ res |= (uint64_t)opr << (i * 16);
+ }
+ return res;
+}
+
+uint64_t helper_perr(uint64_t op1, uint64_t op2)
+{
+ uint64_t res = 0;
+ uint8_t opa, opb, opr;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ opa = op1 >> (i * 8);
+ opb = op2 >> (i * 8);
+ if (opa >= opb) {
+ opr = opa - opb;
+ } else {
+ opr = opb - opa;
+ }
+ res += opr;
+ }
+ return res;
+}
+
+uint64_t helper_pklb(uint64_t op1)
+{
+ return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
+}
+
+uint64_t helper_pkwb(uint64_t op1)
+{
+ return ((op1 & 0xff)
+ | ((op1 >> 8) & 0xff00)
+ | ((op1 >> 16) & 0xff0000)
+ | ((op1 >> 24) & 0xff000000));
+}
+
+uint64_t helper_unpkbl(uint64_t op1)
+{
+ return (op1 & 0xff) | ((op1 & 0xff00) << 24);
+}
+
+uint64_t helper_unpkbw(uint64_t op1)
+{
+ return ((op1 & 0xff)
+ | ((op1 & 0xff00) << 8)
+ | ((op1 & 0xff0000) << 16)
+ | ((op1 & 0xff000000) << 24));
+}
+
+void helper_check_overflow(CPUAlphaState *env, uint64_t op1, uint64_t op2)
+{
+ if (unlikely(op1 != op2)) {
+ arith_excp(env, GETPC(), EXC_M_IOV, 0);
+ }
+}
diff --git a/target/alpha/machine.c b/target/alpha/machine.c
new file mode 100644
index 0000000000..b99a123a39
--- /dev/null
+++ b/target/alpha/machine.c
@@ -0,0 +1,91 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "migration/cpu.h"
+
+static int get_fpcr(QEMUFile *f, void *opaque, size_t size)
+{
+ CPUAlphaState *env = opaque;
+ cpu_alpha_store_fpcr(env, qemu_get_be64(f));
+ return 0;
+}
+
+static void put_fpcr(QEMUFile *f, void *opaque, size_t size)
+{
+ CPUAlphaState *env = opaque;
+ qemu_put_be64(f, cpu_alpha_load_fpcr(env));
+}
+
+static const VMStateInfo vmstate_fpcr = {
+ .name = "fpcr",
+ .get = get_fpcr,
+ .put = put_fpcr,
+};
+
+static VMStateField vmstate_env_fields[] = {
+ VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31),
+ VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31),
+ /* Save the architecture value of the fpcr, not the internally
+ expanded version. Since this architecture value does not
+ exist in memory to be stored, this requires a but of hoop
+ jumping. We want OFFSET=0 so that we effectively pass ENV
+ to the helper functions, and we need to fill in the name by
+ hand since there's no field of that name. */
+ {
+ .name = "fpcr",
+ .version_id = 0,
+ .size = sizeof(uint64_t),
+ .info = &vmstate_fpcr,
+ .flags = VMS_SINGLE,
+ .offset = 0
+ },
+ VMSTATE_UINTTL(pc, CPUAlphaState),
+ VMSTATE_UINTTL(unique, CPUAlphaState),
+ VMSTATE_UINTTL(lock_addr, CPUAlphaState),
+ VMSTATE_UINTTL(lock_value, CPUAlphaState),
+
+ VMSTATE_UINT8(ps, CPUAlphaState),
+ VMSTATE_UINT8(intr_flag, CPUAlphaState),
+ VMSTATE_UINT8(pal_mode, CPUAlphaState),
+ VMSTATE_UINT8(fen, CPUAlphaState),
+
+ VMSTATE_UINT32(pcc_ofs, CPUAlphaState),
+
+ VMSTATE_UINTTL(trap_arg0, CPUAlphaState),
+ VMSTATE_UINTTL(trap_arg1, CPUAlphaState),
+ VMSTATE_UINTTL(trap_arg2, CPUAlphaState),
+
+ VMSTATE_UINTTL(exc_addr, CPUAlphaState),
+ VMSTATE_UINTTL(palbr, CPUAlphaState),
+ VMSTATE_UINTTL(ptbr, CPUAlphaState),
+ VMSTATE_UINTTL(vptptr, CPUAlphaState),
+ VMSTATE_UINTTL(sysval, CPUAlphaState),
+ VMSTATE_UINTTL(usp, CPUAlphaState),
+
+ VMSTATE_UINTTL_ARRAY(shadow, CPUAlphaState, 8),
+ VMSTATE_UINTTL_ARRAY(scratch, CPUAlphaState, 24),
+
+ VMSTATE_END_OF_LIST()
+};
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = vmstate_env_fields,
+};
+
+static VMStateField vmstate_cpu_fields[] = {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, AlphaCPU, 1, vmstate_env, CPUAlphaState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_alpha_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_cpu_fields,
+};
diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c
new file mode 100644
index 0000000000..78a7d45590
--- /dev/null
+++ b/target/alpha/mem_helper.c
@@ -0,0 +1,89 @@
+/*
+ * Helpers for loads and stores
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+/* Softmmu support */
+#ifndef CONFIG_USER_ONLY
+void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+ uint64_t pc;
+ uint32_t insn;
+
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr);
+ }
+
+ pc = env->pc;
+ insn = cpu_ldl_code(env, pc);
+
+ env->trap_arg0 = addr;
+ env->trap_arg1 = insn >> 26; /* opcode */
+ env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
+ cs->exception_index = EXCP_UNALIGN;
+ env->error_code = 0;
+ cpu_loop_exit(cs);
+}
+
+void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int unused,
+ unsigned size)
+{
+ AlphaCPU *cpu = ALPHA_CPU(cs);
+ CPUAlphaState *env = &cpu->env;
+
+ env->trap_arg0 = addr;
+ env->trap_arg1 = is_write ? 1 : 0;
+ cs->exception_index = EXCP_MCHK;
+ env->error_code = 0;
+
+ /* ??? We should cpu_restore_state to the faulting insn, but this hook
+ does not have access to the retaddr value from the original helper.
+ It's all moot until the QEMU PALcode grows an MCHK handler. */
+
+ cpu_loop_exit(cs);
+}
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = alpha_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret != 0)) {
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr);
+ }
+ /* Exception index and error code are already set */
+ cpu_loop_exit(cs);
+ }
+}
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c
new file mode 100644
index 0000000000..bec1e178be
--- /dev/null
+++ b/target/alpha/sys_helper.c
@@ -0,0 +1,91 @@
+/*
+ * Helpers for system instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "sysemu/sysemu.h"
+#include "qemu/timer.h"
+
+
+uint64_t helper_load_pcc(CPUAlphaState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ /* In system mode we have access to a decent high-resolution clock.
+ In order to make OS-level time accounting work with the RPCC,
+ present it with a well-timed clock fixed at 250MHz. */
+ return (((uint64_t)env->pcc_ofs << 32)
+ | (uint32_t)(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) >> 2));
+#else
+ /* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist. Just pass through the host cpu
+ clock ticks. Also, don't bother taking PCC_OFS into account. */
+ return (uint32_t)cpu_get_host_ticks();
+#endif
+}
+
+/* PALcode support special instructions */
+#ifndef CONFIG_USER_ONLY
+void helper_tbia(CPUAlphaState *env)
+{
+ tlb_flush(CPU(alpha_env_get_cpu(env)), 1);
+}
+
+void helper_tbis(CPUAlphaState *env, uint64_t p)
+{
+ tlb_flush_page(CPU(alpha_env_get_cpu(env)), p);
+}
+
+void helper_tb_flush(CPUAlphaState *env)
+{
+ tb_flush(CPU(alpha_env_get_cpu(env)));
+}
+
+void helper_halt(uint64_t restart)
+{
+ if (restart) {
+ qemu_system_reset_request();
+ } else {
+ qemu_system_shutdown_request();
+ }
+}
+
+uint64_t helper_get_vmtime(void)
+{
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+}
+
+uint64_t helper_get_walltime(void)
+{
+ return qemu_clock_get_ns(rtc_clock);
+}
+
+void helper_set_alarm(CPUAlphaState *env, uint64_t expire)
+{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+
+ if (expire) {
+ env->alarm_expire = expire;
+ timer_mod(cpu->alarm_timer, expire);
+ } else {
+ timer_del(cpu->alarm_timer);
+ }
+}
+
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
new file mode 100644
index 0000000000..114927b751
--- /dev/null
+++ b/target/alpha/translate.c
@@ -0,0 +1,3010 @@
+/*
+ * Alpha emulation cpu translation for qemu.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#undef ALPHA_DEBUG_DISAS
+#define CONFIG_SOFTFLOAT_INLINE
+
+#ifdef ALPHA_DEBUG_DISAS
+# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+
+typedef struct DisasContext DisasContext;
+struct DisasContext {
+ struct TranslationBlock *tb;
+ uint64_t pc;
+#ifndef CONFIG_USER_ONLY
+ uint64_t palbr;
+#endif
+ int mem_idx;
+
+ /* Current rounding mode for this TB. */
+ int tb_rm;
+ /* Current flush-to-zero setting for this TB. */
+ int tb_ftz;
+
+ /* implver value for this CPU. */
+ int implver;
+
+ /* The set of registers active in the current context. */
+ TCGv *ir;
+
+ /* Temporaries for $31 and $f31 as source and destination. */
+ TCGv zero;
+ TCGv sink;
+ /* Temporary for immediate constants. */
+ TCGv lit;
+
+ bool singlestep_enabled;
+};
+
+/* Return values from translate_one, indicating the state of the TB.
+ Note that zero indicates that we are not exiting the TB. */
+
+typedef enum {
+ NO_EXIT,
+
+ /* We have emitted one or more goto_tb. No fixup required. */
+ EXIT_GOTO_TB,
+
+ /* We are not using a goto_tb (for whatever reason), but have updated
+ the PC (for whatever reason), so there's no need to do it again on
+ exiting the TB. */
+ EXIT_PC_UPDATED,
+
+ /* We are exiting the TB, but have neither emitted a goto_tb, nor
+ updated the PC for the next instruction to be executed. */
+ EXIT_PC_STALE,
+
+ /* We are ending the TB with a noreturn function call, e.g. longjmp.
+ No following code will be executed. */
+ EXIT_NORETURN,
+} ExitStatus;
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv cpu_std_ir[31];
+static TCGv cpu_fir[31];
+static TCGv cpu_pc;
+static TCGv cpu_lock_addr;
+static TCGv cpu_lock_value;
+
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_pal_ir[31];
+#endif
+
+#include "exec/gen-icount.h"
+
+void alpha_translate_init(void)
+{
+#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
+
+ typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
+ static const GlobalVar vars[] = {
+ DEF_VAR(pc),
+ DEF_VAR(lock_addr),
+ DEF_VAR(lock_value),
+ };
+
+#undef DEF_VAR
+
+ /* Use the symbolic register names that match the disassembler. */
+ static const char greg_names[31][4] = {
+ "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
+ "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
+ "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
+ "t10", "t11", "ra", "t12", "at", "gp", "sp"
+ };
+ static const char freg_names[31][4] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30"
+ };
+#ifndef CONFIG_USER_ONLY
+ static const char shadow_names[8][8] = {
+ "pal_t7", "pal_s0", "pal_s1", "pal_s2",
+ "pal_s3", "pal_s4", "pal_s5", "pal_t11"
+ };
+#endif
+
+ static bool done_init = 0;
+ int i;
+
+ if (done_init) {
+ return;
+ }
+ done_init = 1;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ for (i = 0; i < 31; i++) {
+ cpu_std_ir[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState, ir[i]),
+ greg_names[i]);
+ }
+
+ for (i = 0; i < 31; i++) {
+ cpu_fir[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState, fir[i]),
+ freg_names[i]);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
+ for (i = 0; i < 8; i++) {
+ int r = (i == 7 ? 25 : i + 8);
+ cpu_pal_ir[r] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUAlphaState,
+ shadow[i]),
+ shadow_names[i]);
+ }
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(vars); ++i) {
+ const GlobalVar *v = &vars[i];
+ *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name);
+ }
+}
+
+static TCGv load_zero(DisasContext *ctx)
+{
+ if (TCGV_IS_UNUSED_I64(ctx->zero)) {
+ ctx->zero = tcg_const_i64(0);
+ }
+ return ctx->zero;
+}
+
+static TCGv dest_sink(DisasContext *ctx)
+{
+ if (TCGV_IS_UNUSED_I64(ctx->sink)) {
+ ctx->sink = tcg_temp_new();
+ }
+ return ctx->sink;
+}
+
+static void free_context_temps(DisasContext *ctx)
+{
+ if (!TCGV_IS_UNUSED_I64(ctx->sink)) {
+ tcg_gen_discard_i64(ctx->sink);
+ tcg_temp_free(ctx->sink);
+ TCGV_UNUSED_I64(ctx->sink);
+ }
+ if (!TCGV_IS_UNUSED_I64(ctx->zero)) {
+ tcg_temp_free(ctx->zero);
+ TCGV_UNUSED_I64(ctx->zero);
+ }
+ if (!TCGV_IS_UNUSED_I64(ctx->lit)) {
+ tcg_temp_free(ctx->lit);
+ TCGV_UNUSED_I64(ctx->lit);
+ }
+}
+
+static TCGv load_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
+ uint8_t lit, bool islit)
+{
+ if (islit) {
+ ctx->lit = tcg_const_i64(lit);
+ return ctx->lit;
+ } else if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return ctx->ir[reg];
+ } else {
+ return dest_sink(ctx);
+ }
+}
+
+static TCGv load_fpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_fir[reg];
+ } else {
+ return load_zero(ctx);
+ }
+}
+
+static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
+{
+ if (likely(reg < 31)) {
+ return cpu_fir[reg];
+ } else {
+ return dest_sink(ctx);
+ }
+}
+
+static void gen_excp_1(int exception, int error_code)
+{
+ TCGv_i32 tmp1, tmp2;
+
+ tmp1 = tcg_const_i32(exception);
+ tmp2 = tcg_const_i32(error_code);
+ gen_helper_excp(cpu_env, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp1);
+}
+
+static ExitStatus gen_excp(DisasContext *ctx, int exception, int error_code)
+{
+ tcg_gen_movi_i64(cpu_pc, ctx->pc);
+ gen_excp_1(exception, error_code);
+ return EXIT_NORETURN;
+}
+
+static inline ExitStatus gen_invalid(DisasContext *ctx)
+{
+ return gen_excp(ctx, EXCP_OPCDEC, 0);
+}
+
+static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
+ gen_helper_memory_to_f(t0, tmp32);
+ tcg_temp_free_i32(tmp32);
+}
+
+static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags)
+{
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ);
+ gen_helper_memory_to_g(t0, tmp);
+ tcg_temp_free(tmp);
+}
+
+static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL);
+ gen_helper_memory_to_s(t0, tmp32);
+ tcg_temp_free_i32(tmp32);
+}
+
+static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags)
+{
+ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL);
+ tcg_gen_mov_i64(cpu_lock_addr, t1);
+ tcg_gen_mov_i64(cpu_lock_value, t0);
+}
+
+static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags)
+{
+ tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ);
+ tcg_gen_mov_i64(cpu_lock_addr, t1);
+ tcg_gen_mov_i64(cpu_lock_value, t0);
+}
+
+static inline void gen_load_mem(DisasContext *ctx,
+ void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1,
+ int flags),
+ int ra, int rb, int32_t disp16, bool fp,
+ bool clear)
+{
+ TCGv tmp, addr, va;
+
+ /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
+ prefetches, which we can treat as nops. No worries about
+ missed exceptions here. */
+ if (unlikely(ra == 31)) {
+ return;
+ }
+
+ tmp = tcg_temp_new();
+ addr = load_gpr(ctx, rb);
+
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, disp16);
+ addr = tmp;
+ }
+ if (clear) {
+ tcg_gen_andi_i64(tmp, addr, ~0x7);
+ addr = tmp;
+ }
+
+ va = (fp ? cpu_fir[ra] : ctx->ir[ra]);
+ tcg_gen_qemu_load(va, addr, ctx->mem_idx);
+
+ tcg_temp_free(tmp);
+}
+
+static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ gen_helper_f_to_memory(tmp32, t0);
+ tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
+ tcg_temp_free_i32(tmp32);
+}
+
+static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags)
+{
+ TCGv tmp = tcg_temp_new();
+ gen_helper_g_to_memory(tmp, t0);
+ tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ);
+ tcg_temp_free(tmp);
+}
+
+static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags)
+{
+ TCGv_i32 tmp32 = tcg_temp_new_i32();
+ gen_helper_s_to_memory(tmp32, t0);
+ tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL);
+ tcg_temp_free_i32(tmp32);
+}
+
+static inline void gen_store_mem(DisasContext *ctx,
+ void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1,
+ int flags),
+ int ra, int rb, int32_t disp16, bool fp,
+ bool clear)
+{
+ TCGv tmp, addr, va;
+
+ tmp = tcg_temp_new();
+ addr = load_gpr(ctx, rb);
+
+ if (disp16) {
+ tcg_gen_addi_i64(tmp, addr, disp16);
+ addr = tmp;
+ }
+ if (clear) {
+ tcg_gen_andi_i64(tmp, addr, ~0x7);
+ addr = tmp;
+ }
+
+ va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra));
+ tcg_gen_qemu_store(va, addr, ctx->mem_idx);
+
+ tcg_temp_free(tmp);
+}
+
+static ExitStatus gen_store_conditional(DisasContext *ctx, int ra, int rb,
+ int32_t disp16, int mem_idx,
+ TCGMemOp op)
+{
+ TCGLabel *lab_fail, *lab_done;
+ TCGv addr, val;
+
+ addr = tcg_temp_new_i64();
+ tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
+ free_context_temps(ctx);
+
+ lab_fail = gen_new_label();
+ lab_done = gen_new_label();
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);
+ tcg_temp_free_i64(addr);
+
+ val = tcg_temp_new_i64();
+ tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
+ load_gpr(ctx, ra), mem_idx, op);
+ free_context_temps(ctx);
+
+ if (ra != 31) {
+ tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
+ }
+ tcg_temp_free_i64(val);
+ tcg_gen_br(lab_done);
+
+ gen_set_label(lab_fail);
+ if (ra != 31) {
+ tcg_gen_movi_i64(ctx->ir[ra], 0);
+ }
+
+ gen_set_label(lab_done);
+ tcg_gen_movi_i64(cpu_lock_addr, -1);
+ return NO_EXIT;
+}
+
+static bool in_superpage(DisasContext *ctx, int64_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+ return ((ctx->tb->flags & TB_FLAGS_USER_MODE) == 0
+ && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1
+ && ((addr >> 41) & 3) == 2);
+#else
+ return false;
+#endif
+}
+
+static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
+{
+ /* Suppress goto_tb in the case of single-steping and IO. */
+ if ((ctx->tb->cflags & CF_LAST_IO)
+ || ctx->singlestep_enabled || singlestep) {
+ return false;
+ }
+#ifndef CONFIG_USER_ONLY
+ /* If the destination is in the superpage, the page perms can't change. */
+ if (in_superpage(ctx, dest)) {
+ return true;
+ }
+ /* Check for the dest on the same page as the start of the TB. */
+ return ((ctx->tb->pc ^ dest) & TARGET_PAGE_MASK) == 0;
+#else
+ return true;
+#endif
+}
+
+static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
+{
+ uint64_t dest = ctx->pc + (disp << 2);
+
+ if (ra != 31) {
+ tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
+ }
+
+ /* Notice branch-to-next; used to initialize RA with the PC. */
+ if (disp == 0) {
+ return 0;
+ } else if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, dest);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb);
+ return EXIT_GOTO_TB;
+ } else {
+ tcg_gen_movi_i64(cpu_pc, dest);
+ return EXIT_PC_UPDATED;
+ }
+}
+
+static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
+ TCGv cmp, int32_t disp)
+{
+ uint64_t dest = ctx->pc + (disp << 2);
+ TCGLabel *lab_true = gen_new_label();
+
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
+
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, ctx->pc);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb);
+
+ gen_set_label(lab_true);
+ tcg_gen_goto_tb(1);
+ tcg_gen_movi_i64(cpu_pc, dest);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
+
+ return EXIT_GOTO_TB;
+ } else {
+ TCGv_i64 z = tcg_const_i64(0);
+ TCGv_i64 d = tcg_const_i64(dest);
+ TCGv_i64 p = tcg_const_i64(ctx->pc);
+
+ tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
+
+ tcg_temp_free_i64(z);
+ tcg_temp_free_i64(d);
+ tcg_temp_free_i64(p);
+ return EXIT_PC_UPDATED;
+ }
+}
+
+static ExitStatus gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
+ int32_t disp, int mask)
+{
+ TCGv cmp_tmp;
+
+ if (mask) {
+ cmp_tmp = tcg_temp_new();
+ tcg_gen_andi_i64(cmp_tmp, load_gpr(ctx, ra), 1);
+ } else {
+ cmp_tmp = load_gpr(ctx, ra);
+ }
+
+ return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
+}
+
+/* Fold -0.0 for comparison with COND. */
+
+static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
+{
+ uint64_t mzero = 1ull << 63;
+
+ switch (cond) {
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ /* For <= or >, the -0.0 value directly compares the way we want. */
+ tcg_gen_mov_i64(dest, src);
+ break;
+
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ /* For == or !=, we can simply mask off the sign bit and compare. */
+ tcg_gen_andi_i64(dest, src, mzero - 1);
+ break;
+
+ case TCG_COND_GE:
+ case TCG_COND_LT:
+ /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
+ tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero);
+ tcg_gen_neg_i64(dest, dest);
+ tcg_gen_and_i64(dest, dest, src);
+ break;
+
+ default:
+ abort();
+ }
+}
+
+static ExitStatus gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
+ int32_t disp)
+{
+ TCGv cmp_tmp = tcg_temp_new();
+ gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
+ return gen_bcond_internal(ctx, cond, cmp_tmp, disp);
+}
+
+static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
+{
+ TCGv_i64 va, vb, z;
+
+ z = load_zero(ctx);
+ vb = load_fpr(ctx, rb);
+ va = tcg_temp_new();
+ gen_fold_mzero(cond, va, load_fpr(ctx, ra));
+
+ tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
+
+ tcg_temp_free(va);
+}
+
+#define QUAL_RM_N 0x080 /* Round mode nearest even */
+#define QUAL_RM_C 0x000 /* Round mode chopped */
+#define QUAL_RM_M 0x040 /* Round mode minus infinity */
+#define QUAL_RM_D 0x0c0 /* Round mode dynamic */
+#define QUAL_RM_MASK 0x0c0
+
+#define QUAL_U 0x100 /* Underflow enable (fp output) */
+#define QUAL_V 0x100 /* Overflow enable (int output) */
+#define QUAL_S 0x400 /* Software completion enable */
+#define QUAL_I 0x200 /* Inexact detection enable */
+
+static void gen_qual_roundmode(DisasContext *ctx, int fn11)
+{
+ TCGv_i32 tmp;
+
+ fn11 &= QUAL_RM_MASK;
+ if (fn11 == ctx->tb_rm) {
+ return;
+ }
+ ctx->tb_rm = fn11;
+
+ tmp = tcg_temp_new_i32();
+ switch (fn11) {
+ case QUAL_RM_N:
+ tcg_gen_movi_i32(tmp, float_round_nearest_even);
+ break;
+ case QUAL_RM_C:
+ tcg_gen_movi_i32(tmp, float_round_to_zero);
+ break;
+ case QUAL_RM_M:
+ tcg_gen_movi_i32(tmp, float_round_down);
+ break;
+ case QUAL_RM_D:
+ tcg_gen_ld8u_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fpcr_dyn_round));
+ break;
+ }
+
+#if defined(CONFIG_SOFTFLOAT_INLINE)
+ /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
+ With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
+ sets the one field. */
+ tcg_gen_st8_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fp_status.float_rounding_mode));
+#else
+ gen_helper_setroundmode(tmp);
+#endif
+
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_qual_flushzero(DisasContext *ctx, int fn11)
+{
+ TCGv_i32 tmp;
+
+ fn11 &= QUAL_U;
+ if (fn11 == ctx->tb_ftz) {
+ return;
+ }
+ ctx->tb_ftz = fn11;
+
+ tmp = tcg_temp_new_i32();
+ if (fn11) {
+ /* Underflow is enabled, use the FPCR setting. */
+ tcg_gen_ld8u_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fpcr_flush_to_zero));
+ } else {
+ /* Underflow is disabled, force flush-to-zero. */
+ tcg_gen_movi_i32(tmp, 1);
+ }
+
+#if defined(CONFIG_SOFTFLOAT_INLINE)
+ tcg_gen_st8_i32(tmp, cpu_env,
+ offsetof(CPUAlphaState, fp_status.flush_to_zero));
+#else
+ gen_helper_setflushzero(tmp);
+#endif
+
+ tcg_temp_free_i32(tmp);
+}
+
+static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
+{
+ TCGv val;
+
+ if (unlikely(reg == 31)) {
+ val = load_zero(ctx);
+ } else {
+ val = cpu_fir[reg];
+ if ((fn11 & QUAL_S) == 0) {
+ if (is_cmp) {
+ gen_helper_ieee_input_cmp(cpu_env, val);
+ } else {
+ gen_helper_ieee_input(cpu_env, val);
+ }
+ } else {
+#ifndef CONFIG_USER_ONLY
+ /* In system mode, raise exceptions for denormals like real
+ hardware. In user mode, proceed as if the OS completion
+ handler is handling the denormal as per spec. */
+ gen_helper_ieee_input_s(cpu_env, val);
+#endif
+ }
+ }
+ return val;
+}
+
+static void gen_fp_exc_raise(int rc, int fn11)
+{
+ /* ??? We ought to be able to do something with imprecise exceptions.
+ E.g. notice we're still in the trap shadow of something within the
+ TB and do not generate the code to signal the exception; end the TB
+ when an exception is forced to arrive, either by consumption of a
+ register value or TRAPB or EXCB. */
+ TCGv_i32 reg, ign;
+ uint32_t ignore = 0;
+
+ if (!(fn11 & QUAL_U)) {
+ /* Note that QUAL_U == QUAL_V, so ignore either. */
+ ignore |= FPCR_UNF | FPCR_IOV;
+ }
+ if (!(fn11 & QUAL_I)) {
+ ignore |= FPCR_INE;
+ }
+ ign = tcg_const_i32(ignore);
+
+ /* ??? Pass in the regno of the destination so that the helper can
+ set EXC_MASK, which contains a bitmask of destination registers
+ that have caused arithmetic traps. A simple userspace emulation
+ does not require this. We do need it for a guest kernel's entArith,
+ or if we were to do something clever with imprecise exceptions. */
+ reg = tcg_const_i32(rc + 32);
+ if (fn11 & QUAL_S) {
+ gen_helper_fp_exc_raise_s(cpu_env, ign, reg);
+ } else {
+ gen_helper_fp_exc_raise(cpu_env, ign, reg);
+ }
+
+ tcg_temp_free_i32(reg);
+ tcg_temp_free_i32(ign);
+}
+
+static void gen_cvtlq(TCGv vc, TCGv vb)
+{
+ TCGv tmp = tcg_temp_new();
+
+ /* The arithmetic right shift here, plus the sign-extended mask below
+ yields a sign-extended result without an explicit ext32s_i64. */
+ tcg_gen_sari_i64(tmp, vb, 32);
+ tcg_gen_shri_i64(vc, vb, 29);
+ tcg_gen_andi_i64(tmp, tmp, (int32_t)0xc0000000);
+ tcg_gen_andi_i64(vc, vc, 0x3fffffff);
+ tcg_gen_or_i64(vc, vc, tmp);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_ieee_arith2(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv),
+ int rb, int rc, int fn11)
+{
+ TCGv vb;
+
+ gen_qual_roundmode(ctx, fn11);
+ gen_qual_flushzero(ctx, fn11);
+
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ helper(dest_fpr(ctx, rc), cpu_env, vb);
+
+ gen_fp_exc_raise(rc, fn11);
+}
+
+#define IEEE_ARITH2(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int rb, int rc, int fn11) \
+{ \
+ gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
+}
+IEEE_ARITH2(sqrts)
+IEEE_ARITH2(sqrtt)
+IEEE_ARITH2(cvtst)
+IEEE_ARITH2(cvtts)
+
+static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
+{
+ TCGv vb, vc;
+
+ /* No need to set flushzero, since we have an integer output. */
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ vc = dest_fpr(ctx, rc);
+
+ /* Almost all integer conversions use cropped rounding;
+ special case that. */
+ if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
+ gen_helper_cvttq_c(vc, cpu_env, vb);
+ } else {
+ gen_qual_roundmode(ctx, fn11);
+ gen_helper_cvttq(vc, cpu_env, vb);
+ }
+ gen_fp_exc_raise(rc, fn11);
+}
+
+static void gen_ieee_intcvt(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv),
+ int rb, int rc, int fn11)
+{
+ TCGv vb, vc;
+
+ gen_qual_roundmode(ctx, fn11);
+ vb = load_fpr(ctx, rb);
+ vc = dest_fpr(ctx, rc);
+
+ /* The only exception that can be raised by integer conversion
+ is inexact. Thus we only need to worry about exceptions when
+ inexact handling is requested. */
+ if (fn11 & QUAL_I) {
+ helper(vc, cpu_env, vb);
+ gen_fp_exc_raise(rc, fn11);
+ } else {
+ helper(vc, cpu_env, vb);
+ }
+}
+
+#define IEEE_INTCVT(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int rb, int rc, int fn11) \
+{ \
+ gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
+}
+IEEE_INTCVT(cvtqs)
+IEEE_INTCVT(cvtqt)
+
+static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
+{
+ TCGv vmask = tcg_const_i64(mask);
+ TCGv tmp = tcg_temp_new_i64();
+
+ if (inv_a) {
+ tcg_gen_andc_i64(tmp, vmask, va);
+ } else {
+ tcg_gen_and_i64(tmp, va, vmask);
+ }
+
+ tcg_gen_andc_i64(vc, vb, vmask);
+ tcg_gen_or_i64(vc, vc, tmp);
+
+ tcg_temp_free(vmask);
+ tcg_temp_free(tmp);
+}
+
+static void gen_ieee_arith3(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
+ int ra, int rb, int rc, int fn11)
+{
+ TCGv va, vb, vc;
+
+ gen_qual_roundmode(ctx, fn11);
+ gen_qual_flushzero(ctx, fn11);
+
+ va = gen_ieee_input(ctx, ra, fn11, 0);
+ vb = gen_ieee_input(ctx, rb, fn11, 0);
+ vc = dest_fpr(ctx, rc);
+ helper(vc, cpu_env, va, vb);
+
+ gen_fp_exc_raise(rc, fn11);
+}
+
+#define IEEE_ARITH3(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int ra, int rb, int rc, int fn11) \
+{ \
+ gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
+}
+IEEE_ARITH3(adds)
+IEEE_ARITH3(subs)
+IEEE_ARITH3(muls)
+IEEE_ARITH3(divs)
+IEEE_ARITH3(addt)
+IEEE_ARITH3(subt)
+IEEE_ARITH3(mult)
+IEEE_ARITH3(divt)
+
+static void gen_ieee_compare(DisasContext *ctx,
+ void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
+ int ra, int rb, int rc, int fn11)
+{
+ TCGv va, vb, vc;
+
+ va = gen_ieee_input(ctx, ra, fn11, 1);
+ vb = gen_ieee_input(ctx, rb, fn11, 1);
+ vc = dest_fpr(ctx, rc);
+ helper(vc, cpu_env, va, vb);
+
+ gen_fp_exc_raise(rc, fn11);
+}
+
+#define IEEE_CMP3(name) \
+static inline void glue(gen_, name)(DisasContext *ctx, \
+ int ra, int rb, int rc, int fn11) \
+{ \
+ gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
+}
+IEEE_CMP3(cmptun)
+IEEE_CMP3(cmpteq)
+IEEE_CMP3(cmptlt)
+IEEE_CMP3(cmptle)
+
+static inline uint64_t zapnot_mask(uint8_t lit)
+{
+ uint64_t mask = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i) {
+ if ((lit >> i) & 1) {
+ mask |= 0xffull << (i * 8);
+ }
+ }
+ return mask;
+}
+
+/* Implement zapnot with an immediate operand, which expands to some
+ form of immediate AND. This is a basic building block in the
+ definition of many of the other byte manipulation instructions. */
+static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
+{
+ switch (lit) {
+ case 0x00:
+ tcg_gen_movi_i64(dest, 0);
+ break;
+ case 0x01:
+ tcg_gen_ext8u_i64(dest, src);
+ break;
+ case 0x03:
+ tcg_gen_ext16u_i64(dest, src);
+ break;
+ case 0x0f:
+ tcg_gen_ext32u_i64(dest, src);
+ break;
+ case 0xff:
+ tcg_gen_mov_i64(dest, src);
+ break;
+ default:
+ tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
+ break;
+ }
+}
+
+/* EXTWH, EXTLH, EXTQH */
+static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ tcg_gen_shli_i64(vc, va, (64 - lit * 8) & 0x3f);
+ } else {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
+ tcg_gen_neg_i64(tmp, tmp);
+ tcg_gen_andi_i64(tmp, tmp, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ gen_zapnoti(vc, vc, byte_mask);
+}
+
+/* EXTBL, EXTWL, EXTLL, EXTQL */
+static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ tcg_gen_shri_i64(vc, va, (lit & 7) * 8);
+ } else {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(tmp, tmp, 3);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ gen_zapnoti(vc, vc, byte_mask);
+}
+
+/* INSWH, INSLH, INSQH */
+static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ TCGv tmp = tcg_temp_new();
+
+ /* The instruction description has us left-shift the byte mask and extract
+ bits <15:8> and apply that zap at the end. This is equivalent to simply
+ performing the zap first and shifting afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
+
+ if (islit) {
+ lit &= 7;
+ if (unlikely(lit == 0)) {
+ tcg_gen_movi_i64(vc, 0);
+ } else {
+ tcg_gen_shri_i64(vc, tmp, 64 - lit * 8);
+ }
+ } else {
+ TCGv shift = tcg_temp_new();
+
+ /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
+ portably by splitting the shift into two parts: shift_count-1 and 1.
+ Arrange for the -1 by using ones-complement instead of
+ twos-complement in the negation: ~(B * 8) & 63. */
+
+ tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+
+ tcg_gen_shr_i64(vc, tmp, shift);
+ tcg_gen_shri_i64(vc, vc, 1);
+ tcg_temp_free(shift);
+ }
+ tcg_temp_free(tmp);
+}
+
+/* INSBL, INSWL, INSLL, INSQL */
+static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ TCGv tmp = tcg_temp_new();
+
+ /* The instruction description has us left-shift the byte mask
+ the same number of byte slots as the data and apply the zap
+ at the end. This is equivalent to simply performing the zap
+ first and shifting afterward. */
+ gen_zapnoti(tmp, va, byte_mask);
+
+ if (islit) {
+ tcg_gen_shli_i64(vc, tmp, (lit & 7) * 8);
+ } else {
+ TCGv shift = tcg_temp_new();
+ tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_shl_i64(vc, tmp, shift);
+ tcg_temp_free(shift);
+ }
+ tcg_temp_free(tmp);
+}
+
+/* MSKWH, MSKLH, MSKQH */
+static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
+ } else {
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ /* The instruction description is as above, where the byte_mask
+ is shifted left, and then we extract bits <15:8>. This can be
+ emulated with a right-shift on the expanded byte mask. This
+ requires extra care because for an input <2:0> == 0 we need a
+ shift of 64 bits in order to generate a zero. This is done by
+ splitting the shift into two parts, the variable shift - 1
+ followed by a constant 1 shift. The code we expand below is
+ equivalent to ~(B * 8) & 63. */
+
+ tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
+ tcg_gen_not_i64(shift, shift);
+ tcg_gen_andi_i64(shift, shift, 0x3f);
+ tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
+ tcg_gen_shr_i64(mask, mask, shift);
+ tcg_gen_shri_i64(mask, mask, 1);
+
+ tcg_gen_andc_i64(vc, va, mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+ }
+}
+
+/* MSKBL, MSKWL, MSKLL, MSKQL */
+static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
+ uint8_t lit, uint8_t byte_mask)
+{
+ if (islit) {
+ gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
+ } else {
+ TCGv shift = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+
+ tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
+ tcg_gen_shli_i64(shift, shift, 3);
+ tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
+ tcg_gen_shl_i64(mask, mask, shift);
+
+ tcg_gen_andc_i64(vc, va, mask);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(shift);
+ }
+}
+
+static void gen_rx(DisasContext *ctx, int ra, int set)
+{
+ TCGv_i32 tmp;
+
+ if (ra != 31) {
+ tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env,
+ offsetof(CPUAlphaState, intr_flag));
+ }
+
+ tmp = tcg_const_i32(set);
+ tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
+ tcg_temp_free_i32(tmp);
+}
+
+static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
+{
+ /* We're emulating OSF/1 PALcode. Many of these are trivial access
+ to internal cpu registers. */
+
+ /* Unprivileged PAL call */
+ if (palcode >= 0x80 && palcode < 0xC0) {
+ switch (palcode) {
+ case 0x86:
+ /* IMB */
+ /* No-op inside QEMU. */
+ break;
+ case 0x9E:
+ /* RDUNIQUE */
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, unique));
+ break;
+ case 0x9F:
+ /* WRUNIQUE */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, unique));
+ break;
+ default:
+ palcode &= 0xbf;
+ goto do_call_pal;
+ }
+ return NO_EXIT;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* Privileged PAL code */
+ if (palcode < 0x40 && (ctx->tb->flags & TB_FLAGS_USER_MODE) == 0) {
+ switch (palcode) {
+ case 0x01:
+ /* CFLUSH */
+ /* No-op inside QEMU. */
+ break;
+ case 0x02:
+ /* DRAINA */
+ /* No-op inside QEMU. */
+ break;
+ case 0x2D:
+ /* WRVPTPTR */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, vptptr));
+ break;
+ case 0x31:
+ /* WRVAL */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, sysval));
+ break;
+ case 0x32:
+ /* RDVAL */
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, sysval));
+ break;
+
+ case 0x35: {
+ /* SWPIPL */
+ TCGv tmp;
+
+ /* Note that we already know we're in kernel mode, so we know
+ that PS only contains the 3 IPL bits. */
+ tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, ps));
+
+ /* But make sure and store only the 3 IPL bits from the user. */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps));
+ tcg_temp_free(tmp);
+ break;
+ }
+
+ case 0x36:
+ /* RDPS */
+ tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, ps));
+ break;
+ case 0x38:
+ /* WRUSP */
+ tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env,
+ offsetof(CPUAlphaState, usp));
+ break;
+ case 0x3A:
+ /* RDUSP */
+ tcg_gen_ld_i64(ctx->ir[IR_V0], cpu_env,
+ offsetof(CPUAlphaState, usp));
+ break;
+ case 0x3C:
+ /* WHAMI */
+ tcg_gen_ld32s_i64(ctx->ir[IR_V0], cpu_env,
+ -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
+ break;
+
+ default:
+ palcode &= 0x3f;
+ goto do_call_pal;
+ }
+ return NO_EXIT;
+ }
+#endif
+ return gen_invalid(ctx);
+
+ do_call_pal:
+#ifdef CONFIG_USER_ONLY
+ return gen_excp(ctx, EXCP_CALL_PAL, palcode);
+#else
+ {
+ TCGv tmp = tcg_temp_new();
+ uint64_t exc_addr = ctx->pc;
+ uint64_t entry = ctx->palbr;
+
+ if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
+ exc_addr |= 1;
+ } else {
+ tcg_gen_movi_i64(tmp, 1);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
+ }
+
+ tcg_gen_movi_i64(tmp, exc_addr);
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ tcg_temp_free(tmp);
+
+ entry += (palcode & 0x80
+ ? 0x2000 + (palcode - 0x80) * 64
+ : 0x1000 + palcode * 64);
+
+ /* Since the destination is running in PALmode, we don't really
+ need the page permissions check. We'll see the existence of
+ the page when we create the TB, and we'll flush all TBs if
+ we change the PAL base register. */
+ if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(cpu_pc, entry);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb);
+ return EXIT_GOTO_TB;
+ } else {
+ tcg_gen_movi_i64(cpu_pc, entry);
+ return EXIT_PC_UPDATED;
+ }
+ }
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+
+#define PR_BYTE 0x100000
+#define PR_LONG 0x200000
+
+static int cpu_pr_data(int pr)
+{
+ switch (pr) {
+ case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE;
+ case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE;
+ case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
+ case 3: return offsetof(CPUAlphaState, trap_arg0);
+ case 4: return offsetof(CPUAlphaState, trap_arg1);
+ case 5: return offsetof(CPUAlphaState, trap_arg2);
+ case 6: return offsetof(CPUAlphaState, exc_addr);
+ case 7: return offsetof(CPUAlphaState, palbr);
+ case 8: return offsetof(CPUAlphaState, ptbr);
+ case 9: return offsetof(CPUAlphaState, vptptr);
+ case 10: return offsetof(CPUAlphaState, unique);
+ case 11: return offsetof(CPUAlphaState, sysval);
+ case 12: return offsetof(CPUAlphaState, usp);
+
+ case 40 ... 63:
+ return offsetof(CPUAlphaState, scratch[pr - 40]);
+
+ case 251:
+ return offsetof(CPUAlphaState, alarm_expire);
+ }
+ return 0;
+}
+
+static ExitStatus gen_mfpr(DisasContext *ctx, TCGv va, int regno)
+{
+ void (*helper)(TCGv);
+ int data;
+
+ switch (regno) {
+ case 32 ... 39:
+ /* Accessing the "non-shadow" general registers. */
+ regno = regno == 39 ? 25 : regno - 32 + 8;
+ tcg_gen_mov_i64(va, cpu_std_ir[regno]);
+ break;
+
+ case 250: /* WALLTIME */
+ helper = gen_helper_get_walltime;
+ goto do_helper;
+ case 249: /* VMTIME */
+ helper = gen_helper_get_vmtime;
+ do_helper:
+ if (use_icount) {
+ gen_io_start();
+ helper(va);
+ gen_io_end();
+ return EXIT_PC_STALE;
+ } else {
+ helper(va);
+ }
+ break;
+
+ default:
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ data = cpu_pr_data(regno);
+ if (data == 0) {
+ tcg_gen_movi_i64(va, 0);
+ } else if (data & PR_BYTE) {
+ tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE);
+ } else if (data & PR_LONG) {
+ tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_ld_i64(va, cpu_env, data);
+ }
+ break;
+ }
+
+ return NO_EXIT;
+}
+
+static ExitStatus gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
+{
+ TCGv tmp;
+ int data;
+
+ switch (regno) {
+ case 255:
+ /* TBIA */
+ gen_helper_tbia(cpu_env);
+ break;
+
+ case 254:
+ /* TBIS */
+ gen_helper_tbis(cpu_env, vb);
+ break;
+
+ case 253:
+ /* WAIT */
+ tmp = tcg_const_i64(1);
+ tcg_gen_st32_i64(tmp, cpu_env, -offsetof(AlphaCPU, env) +
+ offsetof(CPUState, halted));
+ return gen_excp(ctx, EXCP_HLT, 0);
+
+ case 252:
+ /* HALT */
+ gen_helper_halt(vb);
+ return EXIT_PC_STALE;
+
+ case 251:
+ /* ALARM */
+ gen_helper_set_alarm(cpu_env, vb);
+ break;
+
+ case 7:
+ /* PALBR */
+ tcg_gen_st_i64(vb, cpu_env, offsetof(CPUAlphaState, palbr));
+ /* Changing the PAL base register implies un-chaining all of the TBs
+ that ended with a CALL_PAL. Since the base register usually only
+ changes during boot, flushing everything works well. */
+ gen_helper_tb_flush(cpu_env);
+ return EXIT_PC_STALE;
+
+ case 32 ... 39:
+ /* Accessing the "non-shadow" general registers. */
+ regno = regno == 39 ? 25 : regno - 32 + 8;
+ tcg_gen_mov_i64(cpu_std_ir[regno], vb);
+ break;
+
+ default:
+ /* The basic registers are data only, and unknown registers
+ are read-zero, write-ignore. */
+ data = cpu_pr_data(regno);
+ if (data != 0) {
+ if (data & PR_BYTE) {
+ tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE);
+ } else if (data & PR_LONG) {
+ tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG);
+ } else {
+ tcg_gen_st_i64(vb, cpu_env, data);
+ }
+ }
+ break;
+ }
+
+ return NO_EXIT;
+}
+#endif /* !USER_ONLY*/
+
+#define REQUIRE_NO_LIT \
+ do { \
+ if (real_islit) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_TB_FLAG(FLAG) \
+ do { \
+ if ((ctx->tb->flags & (FLAG)) == 0) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+#define REQUIRE_REG_31(WHICH) \
+ do { \
+ if (WHICH != 31) { \
+ goto invalid_opc; \
+ } \
+ } while (0)
+
+static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
+{
+ int32_t disp21, disp16, disp12 __attribute__((unused));
+ uint16_t fn11;
+ uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
+ bool islit, real_islit;
+ TCGv va, vb, vc, tmp, tmp2;
+ TCGv_i32 t32;
+ ExitStatus ret;
+
+ /* Decode all instruction fields */
+ opc = extract32(insn, 26, 6);
+ ra = extract32(insn, 21, 5);
+ rb = extract32(insn, 16, 5);
+ rc = extract32(insn, 0, 5);
+ real_islit = islit = extract32(insn, 12, 1);
+ lit = extract32(insn, 13, 8);
+
+ disp21 = sextract32(insn, 0, 21);
+ disp16 = sextract32(insn, 0, 16);
+ disp12 = sextract32(insn, 0, 12);
+
+ fn11 = extract32(insn, 5, 11);
+ fpfn = extract32(insn, 5, 6);
+ fn7 = extract32(insn, 5, 7);
+
+ if (rb == 31 && !islit) {
+ islit = true;
+ lit = 0;
+ }
+
+ ret = NO_EXIT;
+ switch (opc) {
+ case 0x00:
+ /* CALL_PAL */
+ ret = gen_call_pal(ctx, insn & 0x03ffffff);
+ break;
+ case 0x01:
+ /* OPC01 */
+ goto invalid_opc;
+ case 0x02:
+ /* OPC02 */
+ goto invalid_opc;
+ case 0x03:
+ /* OPC03 */
+ goto invalid_opc;
+ case 0x04:
+ /* OPC04 */
+ goto invalid_opc;
+ case 0x05:
+ /* OPC05 */
+ goto invalid_opc;
+ case 0x06:
+ /* OPC06 */
+ goto invalid_opc;
+ case 0x07:
+ /* OPC07 */
+ goto invalid_opc;
+
+ case 0x09:
+ /* LDAH */
+ disp16 = (uint32_t)disp16 << 16;
+ /* fall through */
+ case 0x08:
+ /* LDA */
+ va = dest_gpr(ctx, ra);
+ /* It's worth special-casing immediate loads. */
+ if (rb == 31) {
+ tcg_gen_movi_i64(va, disp16);
+ } else {
+ tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
+ }
+ break;
+
+ case 0x0A:
+ /* LDBU */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
+ break;
+ case 0x0B:
+ /* LDQ_U */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
+ break;
+ case 0x0C:
+ /* LDWU */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
+ break;
+ case 0x0D:
+ /* STW */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
+ break;
+ case 0x0E:
+ /* STB */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
+ break;
+ case 0x0F:
+ /* STQ_U */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
+ break;
+
+ case 0x10:
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+
+ if (ra == 31) {
+ if (fn7 == 0x00) {
+ /* Special case ADDL as SEXTL. */
+ tcg_gen_ext32s_i64(vc, vb);
+ break;
+ }
+ if (fn7 == 0x29) {
+ /* Special case SUBQ as NEGQ. */
+ tcg_gen_neg_i64(vc, vb);
+ break;
+ }
+ }
+
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x00:
+ /* ADDL */
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x02:
+ /* S4ADDL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x09:
+ /* SUBL */
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x0B:
+ /* S4SUBL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x0F:
+ /* CMPBGE */
+ if (ra == 31) {
+ /* Special case 0 >= X as X == 0. */
+ gen_helper_cmpbe0(vc, vb);
+ } else {
+ gen_helper_cmpbge(vc, va, vb);
+ }
+ break;
+ case 0x12:
+ /* S8ADDL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x1B:
+ /* S8SUBL */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(tmp, tmp, vb);
+ tcg_gen_ext32s_i64(vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x1D:
+ /* CMPULT */
+ tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
+ break;
+ case 0x20:
+ /* ADDQ */
+ tcg_gen_add_i64(vc, va, vb);
+ break;
+ case 0x22:
+ /* S4ADDQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x29:
+ /* SUBQ */
+ tcg_gen_sub_i64(vc, va, vb);
+ break;
+ case 0x2B:
+ /* S4SUBQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 2);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x2D:
+ /* CMPEQ */
+ tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
+ break;
+ case 0x32:
+ /* S8ADDQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_add_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x3B:
+ /* S8SUBQ */
+ tmp = tcg_temp_new();
+ tcg_gen_shli_i64(tmp, va, 3);
+ tcg_gen_sub_i64(vc, tmp, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x3D:
+ /* CMPULE */
+ tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
+ break;
+ case 0x40:
+ /* ADDL/V */
+ tmp = tcg_temp_new();
+ tcg_gen_ext32s_i64(tmp, va);
+ tcg_gen_ext32s_i64(vc, vb);
+ tcg_gen_add_i64(tmp, tmp, vc);
+ tcg_gen_ext32s_i64(vc, tmp);
+ gen_helper_check_overflow(cpu_env, vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x49:
+ /* SUBL/V */
+ tmp = tcg_temp_new();
+ tcg_gen_ext32s_i64(tmp, va);
+ tcg_gen_ext32s_i64(vc, vb);
+ tcg_gen_sub_i64(tmp, tmp, vc);
+ tcg_gen_ext32s_i64(vc, tmp);
+ gen_helper_check_overflow(cpu_env, vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x4D:
+ /* CMPLT */
+ tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
+ break;
+ case 0x60:
+ /* ADDQ/V */
+ tmp = tcg_temp_new();
+ tmp2 = tcg_temp_new();
+ tcg_gen_eqv_i64(tmp, va, vb);
+ tcg_gen_mov_i64(tmp2, va);
+ tcg_gen_add_i64(vc, va, vb);
+ tcg_gen_xor_i64(tmp2, tmp2, vc);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ tcg_gen_shri_i64(tmp, tmp, 63);
+ tcg_gen_movi_i64(tmp2, 0);
+ gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ tcg_temp_free(tmp);
+ tcg_temp_free(tmp2);
+ break;
+ case 0x69:
+ /* SUBQ/V */
+ tmp = tcg_temp_new();
+ tmp2 = tcg_temp_new();
+ tcg_gen_xor_i64(tmp, va, vb);
+ tcg_gen_mov_i64(tmp2, va);
+ tcg_gen_sub_i64(vc, va, vb);
+ tcg_gen_xor_i64(tmp2, tmp2, vc);
+ tcg_gen_and_i64(tmp, tmp, tmp2);
+ tcg_gen_shri_i64(tmp, tmp, 63);
+ tcg_gen_movi_i64(tmp2, 0);
+ gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ tcg_temp_free(tmp);
+ tcg_temp_free(tmp2);
+ break;
+ case 0x6D:
+ /* CMPLE */
+ tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x11:
+ if (fn7 == 0x20) {
+ if (rc == 31) {
+ /* Special case BIS as NOP. */
+ break;
+ }
+ if (ra == 31) {
+ /* Special case BIS as MOV. */
+ vc = dest_gpr(ctx, rc);
+ if (islit) {
+ tcg_gen_movi_i64(vc, lit);
+ } else {
+ tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
+ }
+ break;
+ }
+ }
+
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+
+ if (fn7 == 0x28 && ra == 31) {
+ /* Special case ORNOT as NOT. */
+ tcg_gen_not_i64(vc, vb);
+ break;
+ }
+
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x00:
+ /* AND */
+ tcg_gen_and_i64(vc, va, vb);
+ break;
+ case 0x08:
+ /* BIC */
+ tcg_gen_andc_i64(vc, va, vb);
+ break;
+ case 0x14:
+ /* CMOVLBS */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ tcg_temp_free(tmp);
+ break;
+ case 0x16:
+ /* CMOVLBC */
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i64(tmp, va, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ tcg_temp_free(tmp);
+ break;
+ case 0x20:
+ /* BIS */
+ tcg_gen_or_i64(vc, va, vb);
+ break;
+ case 0x24:
+ /* CMOVEQ */
+ tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x26:
+ /* CMOVNE */
+ tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x28:
+ /* ORNOT */
+ tcg_gen_orc_i64(vc, va, vb);
+ break;
+ case 0x40:
+ /* XOR */
+ tcg_gen_xor_i64(vc, va, vb);
+ break;
+ case 0x44:
+ /* CMOVLT */
+ tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x46:
+ /* CMOVGE */
+ tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x48:
+ /* EQV */
+ tcg_gen_eqv_i64(vc, va, vb);
+ break;
+ case 0x61:
+ /* AMASK */
+ REQUIRE_REG_31(ra);
+ {
+ uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
+ tcg_gen_andi_i64(vc, vb, ~amask);
+ }
+ break;
+ case 0x64:
+ /* CMOVLE */
+ tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x66:
+ /* CMOVGT */
+ tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
+ vb, load_gpr(ctx, rc));
+ break;
+ case 0x6C:
+ /* IMPLVER */
+ REQUIRE_REG_31(ra);
+ tcg_gen_movi_i64(vc, ctx->implver);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x12:
+ vc = dest_gpr(ctx, rc);
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x02:
+ /* MSKBL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
+ break;
+ case 0x06:
+ /* EXTBL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
+ break;
+ case 0x0B:
+ /* INSBL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
+ break;
+ case 0x12:
+ /* MSKWL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x16:
+ /* EXTWL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x1B:
+ /* INSWL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x22:
+ /* MSKLL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x26:
+ /* EXTLL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x2B:
+ /* INSLL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x30:
+ /* ZAP */
+ if (islit) {
+ gen_zapnoti(vc, va, ~lit);
+ } else {
+ gen_helper_zap(vc, va, load_gpr(ctx, rb));
+ }
+ break;
+ case 0x31:
+ /* ZAPNOT */
+ if (islit) {
+ gen_zapnoti(vc, va, lit);
+ } else {
+ gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
+ }
+ break;
+ case 0x32:
+ /* MSKQL */
+ gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x34:
+ /* SRL */
+ if (islit) {
+ tcg_gen_shri_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shr_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ case 0x36:
+ /* EXTQL */
+ gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x39:
+ /* SLL */
+ if (islit) {
+ tcg_gen_shli_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_shl_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ case 0x3B:
+ /* INSQL */
+ gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x3C:
+ /* SRA */
+ if (islit) {
+ tcg_gen_sari_i64(vc, va, lit & 0x3f);
+ } else {
+ tmp = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(tmp, vb, 0x3f);
+ tcg_gen_sar_i64(vc, va, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ case 0x52:
+ /* MSKWH */
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x57:
+ /* INSWH */
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x5A:
+ /* EXTWH */
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
+ break;
+ case 0x62:
+ /* MSKLH */
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x67:
+ /* INSLH */
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x6A:
+ /* EXTLH */
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
+ break;
+ case 0x72:
+ /* MSKQH */
+ gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x77:
+ /* INSQH */
+ gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ case 0x7A:
+ /* EXTQH */
+ gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x13:
+ vc = dest_gpr(ctx, rc);
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+ va = load_gpr(ctx, ra);
+ switch (fn7) {
+ case 0x00:
+ /* MULL */
+ tcg_gen_mul_i64(vc, va, vb);
+ tcg_gen_ext32s_i64(vc, vc);
+ break;
+ case 0x20:
+ /* MULQ */
+ tcg_gen_mul_i64(vc, va, vb);
+ break;
+ case 0x30:
+ /* UMULH */
+ tmp = tcg_temp_new();
+ tcg_gen_mulu2_i64(tmp, vc, va, vb);
+ tcg_temp_free(tmp);
+ break;
+ case 0x40:
+ /* MULL/V */
+ tmp = tcg_temp_new();
+ tcg_gen_ext32s_i64(tmp, va);
+ tcg_gen_ext32s_i64(vc, vb);
+ tcg_gen_mul_i64(tmp, tmp, vc);
+ tcg_gen_ext32s_i64(vc, tmp);
+ gen_helper_check_overflow(cpu_env, vc, tmp);
+ tcg_temp_free(tmp);
+ break;
+ case 0x60:
+ /* MULQ/V */
+ tmp = tcg_temp_new();
+ tmp2 = tcg_temp_new();
+ tcg_gen_muls2_i64(vc, tmp, va, vb);
+ tcg_gen_sari_i64(tmp2, vc, 63);
+ gen_helper_check_overflow(cpu_env, tmp, tmp2);
+ tcg_temp_free(tmp);
+ tcg_temp_free(tmp2);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x14:
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+ vc = dest_fpr(ctx, rc);
+ switch (fpfn) { /* fn11 & 0x3F */
+ case 0x04:
+ /* ITOFS */
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_gpr(ctx, ra);
+ tcg_gen_extrl_i64_i32(t32, va);
+ gen_helper_memory_to_s(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ case 0x0A:
+ /* SQRTF */
+ REQUIRE_REG_31(ra);
+ vb = load_fpr(ctx, rb);
+ gen_helper_sqrtf(vc, cpu_env, vb);
+ break;
+ case 0x0B:
+ /* SQRTS */
+ REQUIRE_REG_31(ra);
+ gen_sqrts(ctx, rb, rc, fn11);
+ break;
+ case 0x14:
+ /* ITOFF */
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_gpr(ctx, ra);
+ tcg_gen_extrl_i64_i32(t32, va);
+ gen_helper_memory_to_f(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ case 0x24:
+ /* ITOFT */
+ REQUIRE_REG_31(rb);
+ va = load_gpr(ctx, ra);
+ tcg_gen_mov_i64(vc, va);
+ break;
+ case 0x2A:
+ /* SQRTG */
+ REQUIRE_REG_31(ra);
+ vb = load_fpr(ctx, rb);
+ gen_helper_sqrtg(vc, cpu_env, vb);
+ break;
+ case 0x02B:
+ /* SQRTT */
+ REQUIRE_REG_31(ra);
+ gen_sqrtt(ctx, rb, rc, fn11);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x15:
+ /* VAX floating point */
+ /* XXX: rounding mode and trap are ignored (!) */
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ switch (fpfn) { /* fn11 & 0x3F */
+ case 0x00:
+ /* ADDF */
+ gen_helper_addf(vc, cpu_env, va, vb);
+ break;
+ case 0x01:
+ /* SUBF */
+ gen_helper_subf(vc, cpu_env, va, vb);
+ break;
+ case 0x02:
+ /* MULF */
+ gen_helper_mulf(vc, cpu_env, va, vb);
+ break;
+ case 0x03:
+ /* DIVF */
+ gen_helper_divf(vc, cpu_env, va, vb);
+ break;
+ case 0x1E:
+ /* CVTDG -- TODO */
+ REQUIRE_REG_31(ra);
+ goto invalid_opc;
+ case 0x20:
+ /* ADDG */
+ gen_helper_addg(vc, cpu_env, va, vb);
+ break;
+ case 0x21:
+ /* SUBG */
+ gen_helper_subg(vc, cpu_env, va, vb);
+ break;
+ case 0x22:
+ /* MULG */
+ gen_helper_mulg(vc, cpu_env, va, vb);
+ break;
+ case 0x23:
+ /* DIVG */
+ gen_helper_divg(vc, cpu_env, va, vb);
+ break;
+ case 0x25:
+ /* CMPGEQ */
+ gen_helper_cmpgeq(vc, cpu_env, va, vb);
+ break;
+ case 0x26:
+ /* CMPGLT */
+ gen_helper_cmpglt(vc, cpu_env, va, vb);
+ break;
+ case 0x27:
+ /* CMPGLE */
+ gen_helper_cmpgle(vc, cpu_env, va, vb);
+ break;
+ case 0x2C:
+ /* CVTGF */
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtgf(vc, cpu_env, vb);
+ break;
+ case 0x2D:
+ /* CVTGD -- TODO */
+ REQUIRE_REG_31(ra);
+ goto invalid_opc;
+ case 0x2F:
+ /* CVTGQ */
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtgq(vc, cpu_env, vb);
+ break;
+ case 0x3C:
+ /* CVTQF */
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtqf(vc, cpu_env, vb);
+ break;
+ case 0x3E:
+ /* CVTQG */
+ REQUIRE_REG_31(ra);
+ gen_helper_cvtqg(vc, cpu_env, vb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x16:
+ /* IEEE floating-point */
+ switch (fpfn) { /* fn11 & 0x3F */
+ case 0x00:
+ /* ADDS */
+ gen_adds(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x01:
+ /* SUBS */
+ gen_subs(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x02:
+ /* MULS */
+ gen_muls(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x03:
+ /* DIVS */
+ gen_divs(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x20:
+ /* ADDT */
+ gen_addt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x21:
+ /* SUBT */
+ gen_subt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x22:
+ /* MULT */
+ gen_mult(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x23:
+ /* DIVT */
+ gen_divt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x24:
+ /* CMPTUN */
+ gen_cmptun(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x25:
+ /* CMPTEQ */
+ gen_cmpteq(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x26:
+ /* CMPTLT */
+ gen_cmptlt(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x27:
+ /* CMPTLE */
+ gen_cmptle(ctx, ra, rb, rc, fn11);
+ break;
+ case 0x2C:
+ REQUIRE_REG_31(ra);
+ if (fn11 == 0x2AC || fn11 == 0x6AC) {
+ /* CVTST */
+ gen_cvtst(ctx, rb, rc, fn11);
+ } else {
+ /* CVTTS */
+ gen_cvtts(ctx, rb, rc, fn11);
+ }
+ break;
+ case 0x2F:
+ /* CVTTQ */
+ REQUIRE_REG_31(ra);
+ gen_cvttq(ctx, rb, rc, fn11);
+ break;
+ case 0x3C:
+ /* CVTQS */
+ REQUIRE_REG_31(ra);
+ gen_cvtqs(ctx, rb, rc, fn11);
+ break;
+ case 0x3E:
+ /* CVTQT */
+ REQUIRE_REG_31(ra);
+ gen_cvtqt(ctx, rb, rc, fn11);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x17:
+ switch (fn11) {
+ case 0x010:
+ /* CVTLQ */
+ REQUIRE_REG_31(ra);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_cvtlq(vc, vb);
+ break;
+ case 0x020:
+ /* CPYS */
+ if (rc == 31) {
+ /* Special case CPYS as FNOP. */
+ } else {
+ vc = dest_fpr(ctx, rc);
+ va = load_fpr(ctx, ra);
+ if (ra == rb) {
+ /* Special case CPYS as FMOV. */
+ tcg_gen_mov_i64(vc, va);
+ } else {
+ vb = load_fpr(ctx, rb);
+ gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
+ }
+ }
+ break;
+ case 0x021:
+ /* CPYSN */
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
+ break;
+ case 0x022:
+ /* CPYSE */
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ va = load_fpr(ctx, ra);
+ gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
+ break;
+ case 0x024:
+ /* MT_FPCR */
+ va = load_fpr(ctx, ra);
+ gen_helper_store_fpcr(cpu_env, va);
+ if (ctx->tb_rm == QUAL_RM_D) {
+ /* Re-do the copy of the rounding mode to fp_status
+ the next time we use dynamic rounding. */
+ ctx->tb_rm = -1;
+ }
+ break;
+ case 0x025:
+ /* MF_FPCR */
+ va = dest_fpr(ctx, ra);
+ gen_helper_load_fpcr(va, cpu_env);
+ break;
+ case 0x02A:
+ /* FCMOVEQ */
+ gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
+ break;
+ case 0x02B:
+ /* FCMOVNE */
+ gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
+ break;
+ case 0x02C:
+ /* FCMOVLT */
+ gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
+ break;
+ case 0x02D:
+ /* FCMOVGE */
+ gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
+ break;
+ case 0x02E:
+ /* FCMOVLE */
+ gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
+ break;
+ case 0x02F:
+ /* FCMOVGT */
+ gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
+ break;
+ case 0x030: /* CVTQL */
+ case 0x130: /* CVTQL/V */
+ case 0x530: /* CVTQL/SV */
+ REQUIRE_REG_31(ra);
+ vc = dest_fpr(ctx, rc);
+ vb = load_fpr(ctx, rb);
+ gen_helper_cvtql(vc, cpu_env, vb);
+ gen_fp_exc_raise(rc, fn11);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x18:
+ switch ((uint16_t)disp16) {
+ case 0x0000:
+ /* TRAPB */
+ /* No-op. */
+ break;
+ case 0x0400:
+ /* EXCB */
+ /* No-op. */
+ break;
+ case 0x4000:
+ /* MB */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ break;
+ case 0x4400:
+ /* WMB */
+ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
+ break;
+ case 0x8000:
+ /* FETCH */
+ /* No-op */
+ break;
+ case 0xA000:
+ /* FETCH_M */
+ /* No-op */
+ break;
+ case 0xC000:
+ /* RPCC */
+ va = dest_gpr(ctx, ra);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ gen_helper_load_pcc(va, cpu_env);
+ gen_io_end();
+ ret = EXIT_PC_STALE;
+ } else {
+ gen_helper_load_pcc(va, cpu_env);
+ }
+ break;
+ case 0xE000:
+ /* RC */
+ gen_rx(ctx, ra, 0);
+ break;
+ case 0xE800:
+ /* ECB */
+ break;
+ case 0xF000:
+ /* RS */
+ gen_rx(ctx, ra, 1);
+ break;
+ case 0xF800:
+ /* WH64 */
+ /* No-op */
+ break;
+ case 0xFC00:
+ /* WH64EN */
+ /* No-op */
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x19:
+ /* HW_MFPR (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ va = dest_gpr(ctx, ra);
+ ret = gen_mfpr(ctx, va, insn & 0xffff);
+ break;
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1A:
+ /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
+ prediction stack action, which of course we don't implement. */
+ vb = load_gpr(ctx, rb);
+ tcg_gen_andi_i64(cpu_pc, vb, ~3);
+ if (ra != 31) {
+ tcg_gen_movi_i64(ctx->ir[ra], ctx->pc);
+ }
+ ret = EXIT_PC_UPDATED;
+ break;
+
+ case 0x1B:
+ /* HW_LD (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ {
+ TCGv addr = tcg_temp_new();
+ vb = load_gpr(ctx, rb);
+ va = dest_gpr(ctx, ra);
+
+ tcg_gen_addi_i64(addr, vb, disp12);
+ switch ((insn >> 12) & 0xF) {
+ case 0x0:
+ /* Longword physical access (hw_ldl/p) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL);
+ break;
+ case 0x1:
+ /* Quadword physical access (hw_ldq/p) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ);
+ break;
+ case 0x2:
+ /* Longword physical access with lock (hw_ldl_l/p) */
+ gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX);
+ break;
+ case 0x3:
+ /* Quadword physical access with lock (hw_ldq_l/p) */
+ gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX);
+ break;
+ case 0x4:
+ /* Longword virtual PTE fetch (hw_ldl/v) */
+ goto invalid_opc;
+ case 0x5:
+ /* Quadword virtual PTE fetch (hw_ldq/v) */
+ goto invalid_opc;
+ break;
+ case 0x6:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x7:
+ /* Invaliid */
+ goto invalid_opc;
+ case 0x8:
+ /* Longword virtual access (hw_ldl) */
+ goto invalid_opc;
+ case 0x9:
+ /* Quadword virtual access (hw_ldq) */
+ goto invalid_opc;
+ case 0xA:
+ /* Longword virtual access with protection check (hw_ldl/w) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LESL);
+ break;
+ case 0xB:
+ /* Quadword virtual access with protection check (hw_ldq/w) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX, MO_LEQ);
+ break;
+ case 0xC:
+ /* Longword virtual access with alt access mode (hw_ldl/a)*/
+ goto invalid_opc;
+ case 0xD:
+ /* Quadword virtual access with alt access mode (hw_ldq/a) */
+ goto invalid_opc;
+ case 0xE:
+ /* Longword virtual access with alternate access mode and
+ protection checks (hw_ldl/wa) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LESL);
+ break;
+ case 0xF:
+ /* Quadword virtual access with alternate access mode and
+ protection checks (hw_ldq/wa) */
+ tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX, MO_LEQ);
+ break;
+ }
+ tcg_temp_free(addr);
+ break;
+ }
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1C:
+ vc = dest_gpr(ctx, rc);
+ if (fn7 == 0x70) {
+ /* FTOIT */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+ REQUIRE_REG_31(rb);
+ va = load_fpr(ctx, ra);
+ tcg_gen_mov_i64(vc, va);
+ break;
+ } else if (fn7 == 0x78) {
+ /* FTOIS */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX);
+ REQUIRE_REG_31(rb);
+ t32 = tcg_temp_new_i32();
+ va = load_fpr(ctx, ra);
+ gen_helper_s_to_memory(t32, va);
+ tcg_gen_ext_i32_i64(vc, t32);
+ tcg_temp_free_i32(t32);
+ break;
+ }
+
+ vb = load_gpr_lit(ctx, rb, lit, islit);
+ switch (fn7) {
+ case 0x00:
+ /* SEXTB */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ REQUIRE_REG_31(ra);
+ tcg_gen_ext8s_i64(vc, vb);
+ break;
+ case 0x01:
+ /* SEXTW */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX);
+ REQUIRE_REG_31(ra);
+ tcg_gen_ext16s_i64(vc, vb);
+ break;
+ case 0x30:
+ /* CTPOP */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_ctpop(vc, vb);
+ break;
+ case 0x31:
+ /* PERR */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_NO_LIT;
+ va = load_gpr(ctx, ra);
+ gen_helper_perr(vc, va, vb);
+ break;
+ case 0x32:
+ /* CTLZ */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_ctlz(vc, vb);
+ break;
+ case 0x33:
+ /* CTTZ */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_cttz(vc, vb);
+ break;
+ case 0x34:
+ /* UNPKBW */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_unpkbw(vc, vb);
+ break;
+ case 0x35:
+ /* UNPKBL */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_unpkbl(vc, vb);
+ break;
+ case 0x36:
+ /* PKWB */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_pkwb(vc, vb);
+ break;
+ case 0x37:
+ /* PKLB */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ REQUIRE_REG_31(ra);
+ REQUIRE_NO_LIT;
+ gen_helper_pklb(vc, vb);
+ break;
+ case 0x38:
+ /* MINSB8 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minsb8(vc, va, vb);
+ break;
+ case 0x39:
+ /* MINSW4 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minsw4(vc, va, vb);
+ break;
+ case 0x3A:
+ /* MINUB8 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minub8(vc, va, vb);
+ break;
+ case 0x3B:
+ /* MINUW4 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_minuw4(vc, va, vb);
+ break;
+ case 0x3C:
+ /* MAXUB8 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxub8(vc, va, vb);
+ break;
+ case 0x3D:
+ /* MAXUW4 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxuw4(vc, va, vb);
+ break;
+ case 0x3E:
+ /* MAXSB8 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxsb8(vc, va, vb);
+ break;
+ case 0x3F:
+ /* MAXSW4 */
+ REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI);
+ va = load_gpr(ctx, ra);
+ gen_helper_maxsw4(vc, va, vb);
+ break;
+ default:
+ goto invalid_opc;
+ }
+ break;
+
+ case 0x1D:
+ /* HW_MTPR (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ vb = load_gpr(ctx, rb);
+ ret = gen_mtpr(ctx, vb, insn & 0xffff);
+ break;
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1E:
+ /* HW_RET (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ if (rb == 31) {
+ /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
+ address from EXC_ADDR. This turns out to be useful for our
+ emulation PALcode, so continue to accept it. */
+ ctx->lit = vb = tcg_temp_new();
+ tcg_gen_ld_i64(vb, cpu_env, offsetof(CPUAlphaState, exc_addr));
+ } else {
+ vb = load_gpr(ctx, rb);
+ }
+ tmp = tcg_temp_new();
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag));
+ tcg_gen_movi_i64(cpu_lock_addr, -1);
+ tcg_gen_andi_i64(tmp, vb, 1);
+ tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode));
+ tcg_gen_andi_i64(cpu_pc, vb, ~3);
+ ret = EXIT_PC_UPDATED;
+ break;
+#else
+ goto invalid_opc;
+#endif
+
+ case 0x1F:
+ /* HW_ST (PALcode) */
+#ifndef CONFIG_USER_ONLY
+ REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE);
+ {
+ switch ((insn >> 12) & 0xF) {
+ case 0x0:
+ /* Longword physical access */
+ va = load_gpr(ctx, ra);
+ vb = load_gpr(ctx, rb);
+ tmp = tcg_temp_new();
+ tcg_gen_addi_i64(tmp, vb, disp12);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL);
+ tcg_temp_free(tmp);
+ break;
+ case 0x1:
+ /* Quadword physical access */
+ va = load_gpr(ctx, ra);
+ vb = load_gpr(ctx, rb);
+ tmp = tcg_temp_new();
+ tcg_gen_addi_i64(tmp, vb, disp12);
+ tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEQ);
+ tcg_temp_free(tmp);
+ break;
+ case 0x2:
+ /* Longword physical access with lock */
+ ret = gen_store_conditional(ctx, ra, rb, disp12,
+ MMU_PHYS_IDX, MO_LESL);
+ break;
+ case 0x3:
+ /* Quadword physical access with lock */
+ ret = gen_store_conditional(ctx, ra, rb, disp12,
+ MMU_PHYS_IDX, MO_LEQ);
+ break;
+ case 0x4:
+ /* Longword virtual access */
+ goto invalid_opc;
+ case 0x5:
+ /* Quadword virtual access */
+ goto invalid_opc;
+ case 0x6:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x7:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x8:
+ /* Invalid */
+ goto invalid_opc;
+ case 0x9:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xA:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xB:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xC:
+ /* Longword virtual access with alternate access mode */
+ goto invalid_opc;
+ case 0xD:
+ /* Quadword virtual access with alternate access mode */
+ goto invalid_opc;
+ case 0xE:
+ /* Invalid */
+ goto invalid_opc;
+ case 0xF:
+ /* Invalid */
+ goto invalid_opc;
+ }
+ break;
+ }
+#else
+ goto invalid_opc;
+#endif
+ case 0x20:
+ /* LDF */
+ gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
+ break;
+ case 0x21:
+ /* LDG */
+ gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
+ break;
+ case 0x22:
+ /* LDS */
+ gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
+ break;
+ case 0x23:
+ /* LDT */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
+ break;
+ case 0x24:
+ /* STF */
+ gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
+ break;
+ case 0x25:
+ /* STG */
+ gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
+ break;
+ case 0x26:
+ /* STS */
+ gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
+ break;
+ case 0x27:
+ /* STT */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
+ break;
+ case 0x28:
+ /* LDL */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
+ break;
+ case 0x29:
+ /* LDQ */
+ gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2A:
+ /* LDL_L */
+ gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2B:
+ /* LDQ_L */
+ gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2C:
+ /* STL */
+ gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2D:
+ /* STQ */
+ gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
+ break;
+ case 0x2E:
+ /* STL_C */
+ ret = gen_store_conditional(ctx, ra, rb, disp16,
+ ctx->mem_idx, MO_LESL);
+ break;
+ case 0x2F:
+ /* STQ_C */
+ ret = gen_store_conditional(ctx, ra, rb, disp16,
+ ctx->mem_idx, MO_LEQ);
+ break;
+ case 0x30:
+ /* BR */
+ ret = gen_bdirect(ctx, ra, disp21);
+ break;
+ case 0x31: /* FBEQ */
+ ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
+ break;
+ case 0x32: /* FBLT */
+ ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
+ break;
+ case 0x33: /* FBLE */
+ ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
+ break;
+ case 0x34:
+ /* BSR */
+ ret = gen_bdirect(ctx, ra, disp21);
+ break;
+ case 0x35: /* FBNE */
+ ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
+ break;
+ case 0x36: /* FBGE */
+ ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
+ break;
+ case 0x37: /* FBGT */
+ ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
+ break;
+ case 0x38:
+ /* BLBC */
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
+ break;
+ case 0x39:
+ /* BEQ */
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
+ break;
+ case 0x3A:
+ /* BLT */
+ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
+ break;
+ case 0x3B:
+ /* BLE */
+ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
+ break;
+ case 0x3C:
+ /* BLBS */
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
+ break;
+ case 0x3D:
+ /* BNE */
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
+ break;
+ case 0x3E:
+ /* BGE */
+ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
+ break;
+ case 0x3F:
+ /* BGT */
+ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
+ break;
+ invalid_opc:
+ ret = gen_invalid(ctx);
+ break;
+ }
+
+ return ret;
+}
+
+void gen_intermediate_code(CPUAlphaState *env, struct TranslationBlock *tb)
+{
+ AlphaCPU *cpu = alpha_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx, *ctxp = &ctx;
+ target_ulong pc_start;
+ target_ulong pc_mask;
+ uint32_t insn;
+ ExitStatus ret;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+
+ ctx.tb = tb;
+ ctx.pc = pc_start;
+ ctx.mem_idx = cpu_mmu_index(env, false);
+ ctx.implver = env->implver;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
+
+#ifdef CONFIG_USER_ONLY
+ ctx.ir = cpu_std_ir;
+#else
+ ctx.palbr = env->palbr;
+ ctx.ir = (tb->flags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
+#endif
+
+ /* ??? Every TB begins with unset rounding mode, to be initialized on
+ the first fp insn of the TB. Alternately we could define a proper
+ default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
+ to reset the FP_STATUS to that default at the end of any TB that
+ changes the default. We could even (gasp) dynamiclly figure out
+ what default would be most efficient given the running program. */
+ ctx.tb_rm = -1;
+ /* Similarly for flush-to-zero. */
+ ctx.tb_ftz = -1;
+
+ TCGV_UNUSED_I64(ctx.zero);
+ TCGV_UNUSED_I64(ctx.sink);
+ TCGV_UNUSED_I64(ctx.lit);
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ if (in_superpage(&ctx, pc_start)) {
+ pc_mask = (1ULL << 41) - 1;
+ } else {
+ pc_mask = ~TARGET_PAGE_MASK;
+ }
+
+ gen_tb_start(tb);
+ do {
+ tcg_gen_insn_start(ctx.pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+ ret = gen_excp(&ctx, EXCP_DEBUG, 0);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.pc += 4;
+ break;
+ }
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+ insn = cpu_ldl_code(env, ctx.pc);
+
+ ctx.pc += 4;
+ ret = translate_one(ctxp, insn);
+ free_context_temps(ctxp);
+
+ /* If we reach a page boundary, are single stepping,
+ or exhaust instruction count, stop generation. */
+ if (ret == NO_EXIT
+ && ((ctx.pc & pc_mask) == 0
+ || tcg_op_buf_full()
+ || num_insns >= max_insns
+ || singlestep
+ || ctx.singlestep_enabled)) {
+ ret = EXIT_PC_STALE;
+ }
+ } while (ret == NO_EXIT);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ switch (ret) {
+ case EXIT_GOTO_TB:
+ case EXIT_NORETURN:
+ break;
+ case EXIT_PC_STALE:
+ tcg_gen_movi_i64(cpu_pc, ctx.pc);
+ /* FALLTHRU */
+ case EXIT_PC_UPDATED:
+ if (ctx.singlestep_enabled) {
+ gen_excp_1(EXCP_DEBUG, 0);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ break;
+ default:
+ abort();
+ }
+
+ gen_tb_end(tb, num_insns);
+
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, ctx.pc - pc_start, 1);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void restore_state_to_opc(CPUAlphaState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/alpha/vax_helper.c b/target/alpha/vax_helper.c
new file mode 100644
index 0000000000..2b0c178274
--- /dev/null
+++ b/target/alpha/vax_helper.c
@@ -0,0 +1,355 @@
+/*
+ * Helpers for vax floating point instructions.
+ *
+ * Copyright (c) 2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+
+#define FP_STATUS (env->fp_status)
+
+
+/* F floating (VAX) */
+static uint64_t float32_to_f(float32 fa)
+{
+ uint64_t r, exp, mant, sig;
+ CPU_FloatU a;
+
+ a.f = fa;
+ sig = ((uint64_t)a.l & 0x80000000) << 32;
+ exp = (a.l >> 23) & 0xff;
+ mant = ((uint64_t)a.l & 0x007fffff) << 29;
+
+ if (exp == 255) {
+ /* NaN or infinity */
+ r = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ r = 0;
+ } else {
+ /* Denormalized */
+ r = sig | ((exp + 1) << 52) | mant;
+ }
+ } else {
+ if (exp >= 253) {
+ /* Overflow */
+ r = 1; /* VAX dirty zero */
+ } else {
+ r = sig | ((exp + 2) << 52);
+ }
+ }
+
+ return r;
+}
+
+static float32 f_to_float32(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
+{
+ uint32_t exp, mant_sig;
+ CPU_FloatU r;
+
+ exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
+ mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
+
+ if (unlikely(!exp && mant_sig)) {
+ /* Reserved operands / Dirty zero */
+ dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
+ }
+
+ if (exp < 3) {
+ /* Underflow */
+ r.l = 0;
+ } else {
+ r.l = ((exp - 2) << 23) | mant_sig;
+ }
+
+ return r.f;
+}
+
+uint32_t helper_f_to_memory(uint64_t a)
+{
+ uint32_t r;
+ r = (a & 0x00001fffe0000000ull) >> 13;
+ r |= (a & 0x07ffe00000000000ull) >> 45;
+ r |= (a & 0xc000000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_memory_to_f(uint32_t a)
+{
+ uint64_t r;
+ r = ((uint64_t)(a & 0x0000c000)) << 48;
+ r |= ((uint64_t)(a & 0x003fffff)) << 45;
+ r |= ((uint64_t)(a & 0xffff0000)) << 13;
+ if (!(a & 0x00004000)) {
+ r |= 0x7ll << 59;
+ }
+ return r;
+}
+
+/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
+ either implement VAX arithmetic properly or just signal invalid opcode. */
+
+uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_add(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_sub(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_mul(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float32 fa, fb, fr;
+
+ fa = f_to_float32(env, GETPC(), a);
+ fb = f_to_float32(env, GETPC(), b);
+ fr = float32_div(fa, fb, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t)
+{
+ float32 ft, fr;
+
+ ft = f_to_float32(env, GETPC(), t);
+ fr = float32_sqrt(ft, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+
+/* G floating (VAX) */
+static uint64_t float64_to_g(float64 fa)
+{
+ uint64_t r, exp, mant, sig;
+ CPU_DoubleU a;
+
+ a.d = fa;
+ sig = a.ll & 0x8000000000000000ull;
+ exp = (a.ll >> 52) & 0x7ff;
+ mant = a.ll & 0x000fffffffffffffull;
+
+ if (exp == 2047) {
+ /* NaN or infinity */
+ r = 1; /* VAX dirty zero */
+ } else if (exp == 0) {
+ if (mant == 0) {
+ /* Zero */
+ r = 0;
+ } else {
+ /* Denormalized */
+ r = sig | ((exp + 1) << 52) | mant;
+ }
+ } else {
+ if (exp >= 2045) {
+ /* Overflow */
+ r = 1; /* VAX dirty zero */
+ } else {
+ r = sig | ((exp + 2) << 52);
+ }
+ }
+
+ return r;
+}
+
+static float64 g_to_float64(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
+{
+ uint64_t exp, mant_sig;
+ CPU_DoubleU r;
+
+ exp = (a >> 52) & 0x7ff;
+ mant_sig = a & 0x800fffffffffffffull;
+
+ if (!exp && mant_sig) {
+ /* Reserved operands / Dirty zero */
+ dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
+ }
+
+ if (exp < 3) {
+ /* Underflow */
+ r.ll = 0;
+ } else {
+ r.ll = ((exp - 2) << 52) | mant_sig;
+ }
+
+ return r.d;
+}
+
+uint64_t helper_g_to_memory(uint64_t a)
+{
+ uint64_t r;
+ r = (a & 0x000000000000ffffull) << 48;
+ r |= (a & 0x00000000ffff0000ull) << 16;
+ r |= (a & 0x0000ffff00000000ull) >> 16;
+ r |= (a & 0xffff000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_memory_to_g(uint64_t a)
+{
+ uint64_t r;
+ r = (a & 0x000000000000ffffull) << 48;
+ r |= (a & 0x00000000ffff0000ull) << 16;
+ r |= (a & 0x0000ffff00000000ull) >> 16;
+ r |= (a & 0xffff000000000000ull) >> 48;
+ return r;
+}
+
+uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_add(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_sub(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_mul(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+ fr = float64_div(fa, fb, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa, fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fr = float64_sqrt(fa, &FP_STATUS);
+ return float64_to_g(fr);
+}
+
+uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_le(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b)
+{
+ float64 fa, fb;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fb = g_to_float64(env, GETPC(), b);
+
+ if (float64_lt(fa, fb, &FP_STATUS)) {
+ return 0x4000000000000000ULL;
+ } else {
+ return 0;
+ }
+}
+
+uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a)
+{
+ float32 fr = int64_to_float32(a, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa;
+ float32 fr;
+
+ fa = g_to_float64(env, GETPC(), a);
+ fr = float64_to_float32(fa, &FP_STATUS);
+ return float32_to_f(fr);
+}
+
+uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a)
+{
+ float64 fa = g_to_float64(env, GETPC(), a);
+ return float64_to_int64_round_to_zero(fa, &FP_STATUS);
+}
+
+uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
+{
+ float64 fr;
+ fr = int64_to_float64(a, &FP_STATUS);
+ return float64_to_g(fr);
+}
diff --git a/target/arm/Makefile.objs b/target/arm/Makefile.objs
new file mode 100644
index 0000000000..847fb52ee0
--- /dev/null
+++ b/target/arm/Makefile.objs
@@ -0,0 +1,12 @@
+obj-y += arm-semi.o
+obj-$(CONFIG_SOFTMMU) += machine.o psci.o arch_dump.o monitor.o
+obj-$(CONFIG_KVM) += kvm.o
+obj-$(call land,$(CONFIG_KVM),$(call lnot,$(TARGET_AARCH64))) += kvm32.o
+obj-$(call land,$(CONFIG_KVM),$(TARGET_AARCH64)) += kvm64.o
+obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += neon_helper.o iwmmxt_helper.o
+obj-y += gdbstub.o
+obj-$(TARGET_AARCH64) += cpu64.o translate-a64.o helper-a64.o gdbstub64.o
+obj-y += crypto_helper.o
+obj-$(CONFIG_SOFTMMU) += arm-powerctl.o
diff --git a/target/arm/arch_dump.c b/target/arm/arch_dump.c
new file mode 100644
index 0000000000..1a9861f69b
--- /dev/null
+++ b/target/arm/arch_dump.c
@@ -0,0 +1,337 @@
+/* Support for writing ELF notes for ARM architectures
+ *
+ * Copyright (C) 2015 Red Hat Inc.
+ *
+ * Author: Andrew Jones <drjones@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "sysemu/dump.h"
+
+/* struct user_pt_regs from arch/arm64/include/uapi/asm/ptrace.h */
+struct aarch64_user_regs {
+ uint64_t regs[31];
+ uint64_t sp;
+ uint64_t pc;
+ uint64_t pstate;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_regs) != 272);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct aarch64_elf_prstatus {
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[76]; /* 76 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct aarch64_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+ char pad3[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_elf_prstatus) != 392);
+
+/* struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h
+ *
+ * While the vregs member of user_fpsimd_state is of type __uint128_t,
+ * QEMU uses an array of uint64_t, where the high half of the 128-bit
+ * value is always in the 2n+1'th index. Thus we also break the 128-
+ * bit values into two halves in this reproduction of user_fpsimd_state.
+ */
+struct aarch64_user_vfp_state {
+ uint64_t vregs[64];
+ uint32_t fpsr;
+ uint32_t fpcr;
+ char pad[8];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct aarch64_user_vfp_state) != 528);
+
+struct aarch64_note {
+ Elf64_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
+ union {
+ struct aarch64_elf_prstatus prstatus;
+ struct aarch64_user_vfp_state vfp;
+ };
+} QEMU_PACKED;
+
+#define AARCH64_NOTE_HEADER_SIZE offsetof(struct aarch64_note, prstatus)
+#define AARCH64_PRSTATUS_NOTE_SIZE \
+ (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_elf_prstatus))
+#define AARCH64_PRFPREG_NOTE_SIZE \
+ (AARCH64_NOTE_HEADER_SIZE + sizeof(struct aarch64_user_vfp_state))
+
+static void aarch64_note_init(struct aarch64_note *note, DumpState *s,
+ const char *name, Elf64_Word namesz,
+ Elf64_Word type, Elf64_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int aarch64_write_elf64_prfpreg(WriteCoreDumpFunction f,
+ CPUARMState *env, int cpuid,
+ DumpState *s)
+{
+ struct aarch64_note note;
+ int ret, i;
+
+ aarch64_note_init(&note, s, "CORE", 5, NT_PRFPREG, sizeof(note.vfp));
+
+ for (i = 0; i < 64; ++i) {
+ note.vfp.vregs[i] = cpu_to_dump64(s, float64_val(env->vfp.regs[i]));
+ }
+
+ if (s->dump_info.d_endian == ELFDATA2MSB) {
+ /* For AArch64 we must always swap the vfp.regs's 2n and 2n+1
+ * entries when generating BE notes, because even big endian
+ * hosts use 2n+1 for the high half.
+ */
+ for (i = 0; i < 32; ++i) {
+ uint64_t tmp = note.vfp.vregs[2*i];
+ note.vfp.vregs[2*i] = note.vfp.vregs[2*i+1];
+ note.vfp.vregs[2*i+1] = tmp;
+ }
+ }
+
+ note.vfp.fpsr = cpu_to_dump32(s, vfp_get_fpsr(env));
+ note.vfp.fpcr = cpu_to_dump32(s, vfp_get_fpcr(env));
+
+ ret = f(&note, AARCH64_PRFPREG_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct aarch64_note note;
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ DumpState *s = opaque;
+ uint64_t pstate, sp;
+ int ret, i;
+
+ aarch64_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1);
+
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ pstate = cpsr_read(env);
+ sp = 0;
+ } else {
+ pstate = pstate_read(env);
+ sp = env->xregs[31];
+ }
+
+ for (i = 0; i < 31; ++i) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump64(s, env->xregs[i]);
+ }
+ note.prstatus.pr_reg.sp = cpu_to_dump64(s, sp);
+ note.prstatus.pr_reg.pc = cpu_to_dump64(s, env->pc);
+ note.prstatus.pr_reg.pstate = cpu_to_dump64(s, pstate);
+
+ ret = f(&note, AARCH64_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return aarch64_write_elf64_prfpreg(f, env, cpuid, s);
+}
+
+/* struct pt_regs from arch/arm/include/asm/ptrace.h */
+struct arm_user_regs {
+ uint32_t regs[17];
+ char pad[4];
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_user_regs) != 72);
+
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
+struct arm_elf_prstatus {
+ char pad1[24]; /* 24 == offsetof(struct elf_prstatus, pr_pid) */
+ uint32_t pr_pid;
+ char pad2[44]; /* 44 == offsetof(struct elf_prstatus, pr_reg) -
+ offsetof(struct elf_prstatus, pr_ppid) */
+ struct arm_user_regs pr_reg;
+ uint32_t pr_fpvalid;
+} QEMU_PACKED arm_elf_prstatus;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_elf_prstatus) != 148);
+
+/* struct user_vfp from arch/arm/include/asm/user.h */
+struct arm_user_vfp_state {
+ uint64_t vregs[32];
+ uint32_t fpscr;
+} QEMU_PACKED;
+
+QEMU_BUILD_BUG_ON(sizeof(struct arm_user_vfp_state) != 260);
+
+struct arm_note {
+ Elf32_Nhdr hdr;
+ char name[8]; /* align_up(sizeof("LINUX"), 4) */
+ union {
+ struct arm_elf_prstatus prstatus;
+ struct arm_user_vfp_state vfp;
+ };
+} QEMU_PACKED;
+
+#define ARM_NOTE_HEADER_SIZE offsetof(struct arm_note, prstatus)
+#define ARM_PRSTATUS_NOTE_SIZE \
+ (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_elf_prstatus))
+#define ARM_VFP_NOTE_SIZE \
+ (ARM_NOTE_HEADER_SIZE + sizeof(struct arm_user_vfp_state))
+
+static void arm_note_init(struct arm_note *note, DumpState *s,
+ const char *name, Elf32_Word namesz,
+ Elf32_Word type, Elf32_Word descsz)
+{
+ memset(note, 0, sizeof(*note));
+
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
+ note->hdr.n_type = cpu_to_dump32(s, type);
+
+ memcpy(note->name, name, namesz);
+}
+
+static int arm_write_elf32_vfp(WriteCoreDumpFunction f, CPUARMState *env,
+ int cpuid, DumpState *s)
+{
+ struct arm_note note;
+ int ret, i;
+
+ arm_note_init(&note, s, "LINUX", 6, NT_ARM_VFP, sizeof(note.vfp));
+
+ for (i = 0; i < 32; ++i) {
+ note.vfp.vregs[i] = cpu_to_dump64(s, float64_val(env->vfp.regs[i]));
+ }
+
+ note.vfp.fpscr = cpu_to_dump32(s, vfp_get_fpscr(env));
+
+ ret = f(&note, ARM_VFP_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ struct arm_note note;
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ DumpState *s = opaque;
+ int ret, i, fpvalid = !!arm_feature(env, ARM_FEATURE_VFP);
+
+ arm_note_init(&note, s, "CORE", 5, NT_PRSTATUS, sizeof(note.prstatus));
+
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, fpvalid);
+
+ for (i = 0; i < 16; ++i) {
+ note.prstatus.pr_reg.regs[i] = cpu_to_dump32(s, env->regs[i]);
+ }
+ note.prstatus.pr_reg.regs[16] = cpu_to_dump32(s, cpsr_read(env));
+
+ ret = f(&note, ARM_PRSTATUS_NOTE_SIZE, s);
+ if (ret < 0) {
+ return -1;
+ } else if (fpvalid) {
+ return arm_write_elf32_vfp(f, env, cpuid, s);
+ }
+
+ return 0;
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ ARMCPU *cpu = ARM_CPU(first_cpu);
+ CPUARMState *env = &cpu->env;
+ GuestPhysBlock *block;
+ hwaddr lowest_addr = ULLONG_MAX;
+
+ /* Take a best guess at the phys_base. If we get it wrong then crash
+ * will need '--machdep phys_offset=<phys-offset>' added to its command
+ * line, which isn't any worse than assuming we can use zero, but being
+ * wrong. This is the same algorithm the crash utility uses when
+ * attempting to guess as it loads non-dumpfile formatted files.
+ */
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_start < lowest_addr) {
+ lowest_addr = block->target_start;
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ info->d_machine = EM_AARCH64;
+ info->d_class = ELFCLASS64;
+ info->page_size = (1 << 16); /* aarch64 max pagesize */
+ if (lowest_addr != ULLONG_MAX) {
+ info->phys_base = lowest_addr;
+ }
+ } else {
+ info->d_machine = EM_ARM;
+ info->d_class = ELFCLASS32;
+ info->page_size = (1 << 12);
+ if (lowest_addr < UINT_MAX) {
+ info->phys_base = lowest_addr;
+ }
+ }
+
+ /* We assume the relevant endianness is that of EL1; this is right
+ * for kernels, but might give the wrong answer if you're trying to
+ * dump a hypervisor that happens to be running an opposite-endian
+ * kernel.
+ */
+ info->d_endian = (env->cp15.sctlr_el[1] & SCTLR_EE) != 0
+ ? ELFDATA2MSB : ELFDATA2LSB;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ ARMCPU *cpu = ARM_CPU(first_cpu);
+ CPUARMState *env = &cpu->env;
+ size_t note_size;
+
+ if (class == ELFCLASS64) {
+ note_size = AARCH64_PRSTATUS_NOTE_SIZE;
+ note_size += AARCH64_PRFPREG_NOTE_SIZE;
+ } else {
+ note_size = ARM_PRSTATUS_NOTE_SIZE;
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ note_size += ARM_VFP_NOTE_SIZE;
+ }
+ }
+
+ return note_size * nr_cpus;
+}
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c
new file mode 100644
index 0000000000..fbb7a15daa
--- /dev/null
+++ b/target/arm/arm-powerctl.c
@@ -0,0 +1,228 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-qom.h"
+#include "internals.h"
+#include "arm-powerctl.h"
+#include "qemu/log.h"
+#include "exec/exec-all.h"
+
+#ifndef DEBUG_ARM_POWERCTL
+#define DEBUG_ARM_POWERCTL 0
+#endif
+
+#define DPRINTF(fmt, args...) \
+ do { \
+ if (DEBUG_ARM_POWERCTL) { \
+ fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \
+ } \
+ } while (0)
+
+CPUState *arm_get_cpu_by_id(uint64_t id)
+{
+ CPUState *cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", id);
+
+ CPU_FOREACH(cpu) {
+ ARMCPU *armcpu = ARM_CPU(cpu);
+
+ if (armcpu->mp_affinity == id) {
+ return cpu;
+ }
+ }
+
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: Requesting unknown CPU %" PRId64 "\n",
+ __func__, id);
+
+ return NULL;
+}
+
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64
+ "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry,
+ context_id);
+
+ /* requested EL level need to be in the 1 to 3 range */
+ assert((target_el > 0) && (target_el < 4));
+
+ if (target_aa64 && (entry & 3)) {
+ /*
+ * if we are booting in AArch64 mode then "entry" needs to be 4 bytes
+ * aligned.
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Retrieve the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ /* The cpu was not found */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (!target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already on\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_ALREADY_ON;
+ }
+
+ /*
+ * The newly brought CPU is requested to enter the exception level
+ * "target_el" and be in the requested mode (AArch64 or AArch32).
+ */
+
+ if (((target_el == 3) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) ||
+ ((target_el == 2) && !arm_feature(&target_cpu->env, ARM_FEATURE_EL2))) {
+ /*
+ * The CPU does not support requested level
+ */
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ if (!target_aa64 && arm_feature(&target_cpu->env, ARM_FEATURE_AARCH64)) {
+ /*
+ * For now we don't support booting an AArch64 CPU in AArch32 mode
+ * TODO: We should add this support later
+ */
+ qemu_log_mask(LOG_UNIMP,
+ "[ARM]%s: Starting AArch64 CPU %" PRId64
+ " in AArch32 mode is not supported yet\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+
+ /* Initialize the cpu we are turning on */
+ cpu_reset(target_cpu_state);
+ target_cpu->powered_off = false;
+ target_cpu_state->halted = 0;
+
+ if (target_aa64) {
+ if ((target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 2) to AArch64
+ */
+ target_cpu->env.cp15.scr_el3 |= SCR_RW;
+ }
+
+ if ((target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) {
+ /*
+ * As target mode is AArch64, we need to set lower
+ * exception level (the requested level 1) to AArch64
+ */
+ target_cpu->env.cp15.hcr_el2 |= HCR_RW;
+ }
+
+ target_cpu->env.pstate = aarch64_pstate_mode(target_el, true);
+ } else {
+ /* We are requested to boot in AArch32 mode */
+ static uint32_t mode_for_el[] = { 0,
+ ARM_CPU_MODE_SVC,
+ ARM_CPU_MODE_HYP,
+ ARM_CPU_MODE_SVC };
+
+ cpsr_write(&target_cpu->env, mode_for_el[target_el], CPSR_M,
+ CPSRWriteRaw);
+ }
+
+ if (target_el == 3) {
+ /* Processor is in secure mode */
+ target_cpu->env.cp15.scr_el3 &= ~SCR_NS;
+ } else {
+ /* Processor is not in secure mode */
+ target_cpu->env.cp15.scr_el3 |= SCR_NS;
+ }
+
+ /* We check if the started CPU is now at the correct level */
+ assert(target_el == arm_current_el(&target_cpu->env));
+
+ if (target_aa64) {
+ target_cpu->env.xregs[0] = context_id;
+ target_cpu->env.thumb = false;
+ } else {
+ target_cpu->env.regs[0] = context_id;
+ target_cpu->env.thumb = entry & 1;
+ entry &= 0xfffffffe;
+ }
+
+ /* Start the new CPU at the requested address */
+ cpu_set_pc(target_cpu_state, entry);
+
+ qemu_cpu_kick(target_cpu_state);
+
+ /* We are good to go */
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+int arm_set_cpu_off(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are powering up */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is already off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ target_cpu->powered_off = true;
+ target_cpu_state->halted = 1;
+ target_cpu_state->exception_index = EXCP_HLT;
+ cpu_loop_exit(target_cpu_state);
+ /* notreached */
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
+
+int arm_reset_cpu(uint64_t cpuid)
+{
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ DPRINTF("cpu %" PRId64 "\n", cpuid);
+
+ /* change to the cpu we are resetting */
+ target_cpu_state = arm_get_cpu_by_id(cpuid);
+ if (!target_cpu_state) {
+ return QEMU_ARM_POWERCTL_INVALID_PARAM;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ if (target_cpu->powered_off) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "[ARM]%s: CPU %" PRId64 " is off\n",
+ __func__, cpuid);
+ return QEMU_ARM_POWERCTL_IS_OFF;
+ }
+
+ /* Reset the cpu */
+ cpu_reset(target_cpu_state);
+
+ return QEMU_ARM_POWERCTL_RET_SUCCESS;
+}
diff --git a/target/arm/arm-powerctl.h b/target/arm/arm-powerctl.h
new file mode 100644
index 0000000000..98ee04989b
--- /dev/null
+++ b/target/arm/arm-powerctl.h
@@ -0,0 +1,75 @@
+/*
+ * QEMU support -- ARM Power Control specific functions.
+ *
+ * Copyright (c) 2016 Jean-Christophe Dubois
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_ARM_POWERCTL_H
+#define QEMU_ARM_POWERCTL_H
+
+#include "kvm-consts.h"
+
+#define QEMU_ARM_POWERCTL_RET_SUCCESS QEMU_PSCI_RET_SUCCESS
+#define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS
+#define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON
+#define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED
+
+/*
+ * arm_get_cpu_by_id:
+ * @cpuid: the id of the CPU we want to retrieve the state
+ *
+ * Retrieve a CPUState object from its CPU ID provided in @cpuid.
+ *
+ * Returns: a pointer to the CPUState structure of the requested CPU.
+ */
+CPUState *arm_get_cpu_by_id(uint64_t cpuid);
+
+/*
+ * arm_set_cpu_on:
+ * @cpuid: the id of the CPU we want to start/wake up.
+ * @entry: the address the CPU shall start from.
+ * @context_id: the value to put in r0/x0.
+ * @target_el: The desired exception level.
+ * @target_aa64: 1 if the requested mode is AArch64. 0 otherwise.
+ *
+ * Start the cpu designated by @cpuid in @target_el exception level. The mode
+ * shall be AArch64 if @target_aa64 is set to 1. Otherwise the mode is
+ * AArch32. The CPU shall start at @entry with @context_id in r0/x0.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started.
+ */
+int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id,
+ uint32_t target_el, bool target_aa64);
+
+/*
+ * arm_set_cpu_off:
+ * @cpuid: the id of the CPU we want to stop/shut down.
+ *
+ * Stop the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is already off
+ */
+
+int arm_set_cpu_off(uint64_t cpuid);
+
+/*
+ * arm_reset_cpu:
+ * @cpuid: the id of the CPU we want to reset.
+ *
+ * Reset the cpu designated by @cpuid.
+ *
+ * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success.
+ * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided.
+ * QEMU_ARM_POWERCTL_IS_OFF if CPU is off
+ */
+int arm_reset_cpu(uint64_t cpuid);
+
+#endif
diff --git a/target/arm/arm-semi.c b/target/arm/arm-semi.c
new file mode 100644
index 0000000000..7cac8734c7
--- /dev/null
+++ b/target/arm/arm-semi.c
@@ -0,0 +1,656 @@
+/*
+ * Arm "Angel" semihosting syscalls
+ *
+ * Copyright (c) 2005, 2007 CodeSourcery.
+ * Written by Paul Brook.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/semihost.h"
+#ifdef CONFIG_USER_ONLY
+#include "qemu.h"
+
+#define ARM_ANGEL_HEAP_SIZE (128 * 1024 * 1024)
+#else
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "hw/arm/arm.h"
+#include "qemu/cutils.h"
+#endif
+
+#define TARGET_SYS_OPEN 0x01
+#define TARGET_SYS_CLOSE 0x02
+#define TARGET_SYS_WRITEC 0x03
+#define TARGET_SYS_WRITE0 0x04
+#define TARGET_SYS_WRITE 0x05
+#define TARGET_SYS_READ 0x06
+#define TARGET_SYS_READC 0x07
+#define TARGET_SYS_ISTTY 0x09
+#define TARGET_SYS_SEEK 0x0a
+#define TARGET_SYS_FLEN 0x0c
+#define TARGET_SYS_TMPNAM 0x0d
+#define TARGET_SYS_REMOVE 0x0e
+#define TARGET_SYS_RENAME 0x0f
+#define TARGET_SYS_CLOCK 0x10
+#define TARGET_SYS_TIME 0x11
+#define TARGET_SYS_SYSTEM 0x12
+#define TARGET_SYS_ERRNO 0x13
+#define TARGET_SYS_GET_CMDLINE 0x15
+#define TARGET_SYS_HEAPINFO 0x16
+#define TARGET_SYS_EXIT 0x18
+#define TARGET_SYS_SYNCCACHE 0x19
+
+/* ADP_Stopped_ApplicationExit is used for exit(0),
+ * anything else is implemented as exit(1) */
+#define ADP_Stopped_ApplicationExit (0x20026)
+
+#ifndef O_BINARY
+#define O_BINARY 0
+#endif
+
+#define GDB_O_RDONLY 0x000
+#define GDB_O_WRONLY 0x001
+#define GDB_O_RDWR 0x002
+#define GDB_O_APPEND 0x008
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_BINARY 0
+
+static int gdb_open_modeflags[12] = {
+ GDB_O_RDONLY,
+ GDB_O_RDONLY | GDB_O_BINARY,
+ GDB_O_RDWR,
+ GDB_O_RDWR | GDB_O_BINARY,
+ GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC,
+ GDB_O_WRONLY | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY,
+ GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC,
+ GDB_O_RDWR | GDB_O_CREAT | GDB_O_TRUNC | GDB_O_BINARY,
+ GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND,
+ GDB_O_WRONLY | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY,
+ GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND,
+ GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND | GDB_O_BINARY
+};
+
+static int open_modeflags[12] = {
+ O_RDONLY,
+ O_RDONLY | O_BINARY,
+ O_RDWR,
+ O_RDWR | O_BINARY,
+ O_WRONLY | O_CREAT | O_TRUNC,
+ O_WRONLY | O_CREAT | O_TRUNC | O_BINARY,
+ O_RDWR | O_CREAT | O_TRUNC,
+ O_RDWR | O_CREAT | O_TRUNC | O_BINARY,
+ O_WRONLY | O_CREAT | O_APPEND,
+ O_WRONLY | O_CREAT | O_APPEND | O_BINARY,
+ O_RDWR | O_CREAT | O_APPEND,
+ O_RDWR | O_CREAT | O_APPEND | O_BINARY
+};
+
+#ifdef CONFIG_USER_ONLY
+static inline uint32_t set_swi_errno(TaskState *ts, uint32_t code)
+{
+ if (code == (uint32_t)-1)
+ ts->swi_errno = errno;
+ return code;
+}
+#else
+static inline uint32_t set_swi_errno(CPUARMState *env, uint32_t code)
+{
+ return code;
+}
+
+#include "exec/softmmu-semi.h"
+#endif
+
+static target_ulong arm_semi_syscall_len;
+
+#if !defined(CONFIG_USER_ONLY)
+static target_ulong syscall_err;
+#endif
+
+static void arm_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+#ifdef CONFIG_USER_ONLY
+ TaskState *ts = cs->opaque;
+#endif
+ target_ulong reg0 = is_a64(env) ? env->xregs[0] : env->regs[0];
+
+ if (ret == (target_ulong)-1) {
+#ifdef CONFIG_USER_ONLY
+ ts->swi_errno = err;
+#else
+ syscall_err = err;
+#endif
+ reg0 = ret;
+ } else {
+ /* Fixup syscalls that use nonstardard return conventions. */
+ switch (reg0) {
+ case TARGET_SYS_WRITE:
+ case TARGET_SYS_READ:
+ reg0 = arm_semi_syscall_len - ret;
+ break;
+ case TARGET_SYS_SEEK:
+ reg0 = 0;
+ break;
+ default:
+ reg0 = ret;
+ break;
+ }
+ }
+ if (is_a64(env)) {
+ env->xregs[0] = reg0;
+ } else {
+ env->regs[0] = reg0;
+ }
+}
+
+static target_ulong arm_flen_buf(ARMCPU *cpu)
+{
+ /* Return an address in target memory of 64 bytes where the remote
+ * gdb should write its stat struct. (The format of this structure
+ * is defined by GDB's remote protocol and is not target-specific.)
+ * We put this on the guest's stack just below SP.
+ */
+ CPUARMState *env = &cpu->env;
+ target_ulong sp;
+
+ if (is_a64(env)) {
+ sp = env->xregs[31];
+ } else {
+ sp = env->regs[13];
+ }
+
+ return sp - 64;
+}
+
+static void arm_semi_flen_cb(CPUState *cs, target_ulong ret, target_ulong err)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ /* The size is always stored in big-endian order, extract
+ the value. We assume the size always fit in 32 bits. */
+ uint32_t size;
+ cpu_memory_rw_debug(cs, arm_flen_buf(cpu) + 32, (uint8_t *)&size, 4, 0);
+ size = be32_to_cpu(size);
+ if (is_a64(env)) {
+ env->xregs[0] = size;
+ } else {
+ env->regs[0] = size;
+ }
+#ifdef CONFIG_USER_ONLY
+ ((TaskState *)cs->opaque)->swi_errno = err;
+#else
+ syscall_err = err;
+#endif
+}
+
+static target_ulong arm_gdb_syscall(ARMCPU *cpu, gdb_syscall_complete_cb cb,
+ const char *fmt, ...)
+{
+ va_list va;
+ CPUARMState *env = &cpu->env;
+
+ va_start(va, fmt);
+ gdb_do_syscallv(cb, fmt, va);
+ va_end(va);
+
+ /* FIXME: we are implicitly relying on the syscall completing
+ * before this point, which is not guaranteed. We should
+ * put in an explicit synchronization between this and
+ * the callback function.
+ */
+
+ return is_a64(env) ? env->xregs[0] : env->regs[0];
+}
+
+/* Read the input value from the argument block; fail the semihosting
+ * call if the memory read fails.
+ */
+#define GET_ARG(n) do { \
+ if (is_a64(env)) { \
+ if (get_user_u64(arg ## n, args + (n) * 8)) { \
+ return -1; \
+ } \
+ } else { \
+ if (get_user_u32(arg ## n, args + (n) * 4)) { \
+ return -1; \
+ } \
+ } \
+} while (0)
+
+#define SET_ARG(n, val) \
+ (is_a64(env) ? \
+ put_user_u64(val, args + (n) * 8) : \
+ put_user_u32(val, args + (n) * 4))
+
+target_ulong do_arm_semihosting(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ target_ulong args;
+ target_ulong arg0, arg1, arg2, arg3;
+ char * s;
+ int nr;
+ uint32_t ret;
+ uint32_t len;
+#ifdef CONFIG_USER_ONLY
+ TaskState *ts = cs->opaque;
+#else
+ CPUARMState *ts = env;
+#endif
+
+ if (is_a64(env)) {
+ /* Note that the syscall number is in W0, not X0 */
+ nr = env->xregs[0] & 0xffffffffU;
+ args = env->xregs[1];
+ } else {
+ nr = env->regs[0];
+ args = env->regs[1];
+ }
+
+ switch (nr) {
+ case TARGET_SYS_OPEN:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ s = lock_user_string(arg0);
+ if (!s) {
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ }
+ if (arg1 >= 12) {
+ unlock_user(s, arg0, 0);
+ return (uint32_t)-1;
+ }
+ if (strcmp(s, ":tt") == 0) {
+ int result_fileno = arg1 < 4 ? STDIN_FILENO : STDOUT_FILENO;
+ unlock_user(s, arg0, 0);
+ return result_fileno;
+ }
+ if (use_gdb_syscalls()) {
+ ret = arm_gdb_syscall(cpu, arm_semi_cb, "open,%s,%x,1a4", arg0,
+ (int)arg2+1, gdb_open_modeflags[arg1]);
+ } else {
+ ret = set_swi_errno(ts, open(s, open_modeflags[arg1], 0644));
+ }
+ unlock_user(s, arg0, 0);
+ return ret;
+ case TARGET_SYS_CLOSE:
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "close,%x", arg0);
+ } else {
+ return set_swi_errno(ts, close(arg0));
+ }
+ case TARGET_SYS_WRITEC:
+ {
+ char c;
+
+ if (get_user_u8(c, args))
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ /* Write to debug console. stderr is near enough. */
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "write,2,%x,1", args);
+ } else {
+ return write(STDERR_FILENO, &c, 1);
+ }
+ }
+ case TARGET_SYS_WRITE0:
+ if (!(s = lock_user_string(args)))
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ len = strlen(s);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "write,2,%x,%x",
+ args, len);
+ } else {
+ ret = write(STDERR_FILENO, s, len);
+ }
+ unlock_user(s, args, 0);
+ return ret;
+ case TARGET_SYS_WRITE:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ arm_semi_syscall_len = len;
+ return arm_gdb_syscall(cpu, arm_semi_cb, "write,%x,%x,%x",
+ arg0, arg1, len);
+ } else {
+ s = lock_user(VERIFY_READ, arg1, len, 1);
+ if (!s) {
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ }
+ ret = set_swi_errno(ts, write(arg0, s, len));
+ unlock_user(s, arg1, 0);
+ if (ret == (uint32_t)-1)
+ return -1;
+ return len - ret;
+ }
+ case TARGET_SYS_READ:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ arm_semi_syscall_len = len;
+ return arm_gdb_syscall(cpu, arm_semi_cb, "read,%x,%x,%x",
+ arg0, arg1, len);
+ } else {
+ s = lock_user(VERIFY_WRITE, arg1, len, 0);
+ if (!s) {
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ }
+ do {
+ ret = set_swi_errno(ts, read(arg0, s, len));
+ } while (ret == -1 && errno == EINTR);
+ unlock_user(s, arg1, len);
+ if (ret == (uint32_t)-1)
+ return -1;
+ return len - ret;
+ }
+ case TARGET_SYS_READC:
+ /* XXX: Read from debug console. Not implemented. */
+ return 0;
+ case TARGET_SYS_ISTTY:
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "isatty,%x", arg0);
+ } else {
+ return isatty(arg0);
+ }
+ case TARGET_SYS_SEEK:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "lseek,%x,%x,0",
+ arg0, arg1);
+ } else {
+ ret = set_swi_errno(ts, lseek(arg0, arg1, SEEK_SET));
+ if (ret == (uint32_t)-1)
+ return -1;
+ return 0;
+ }
+ case TARGET_SYS_FLEN:
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_flen_cb, "fstat,%x,%x",
+ arg0, arm_flen_buf(cpu));
+ } else {
+ struct stat buf;
+ ret = set_swi_errno(ts, fstat(arg0, &buf));
+ if (ret == (uint32_t)-1)
+ return -1;
+ return buf.st_size;
+ }
+ case TARGET_SYS_TMPNAM:
+ /* XXX: Not implemented. */
+ return -1;
+ case TARGET_SYS_REMOVE:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ ret = arm_gdb_syscall(cpu, arm_semi_cb, "unlink,%s",
+ arg0, (int)arg1+1);
+ } else {
+ s = lock_user_string(arg0);
+ if (!s) {
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ }
+ ret = set_swi_errno(ts, remove(s));
+ unlock_user(s, arg0, 0);
+ }
+ return ret;
+ case TARGET_SYS_RENAME:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "rename,%s,%s",
+ arg0, (int)arg1+1, arg2, (int)arg3+1);
+ } else {
+ char *s2;
+ s = lock_user_string(arg0);
+ s2 = lock_user_string(arg2);
+ if (!s || !s2)
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ ret = (uint32_t)-1;
+ else
+ ret = set_swi_errno(ts, rename(s, s2));
+ if (s2)
+ unlock_user(s2, arg2, 0);
+ if (s)
+ unlock_user(s, arg0, 0);
+ return ret;
+ }
+ case TARGET_SYS_CLOCK:
+ return clock() / (CLOCKS_PER_SEC / 100);
+ case TARGET_SYS_TIME:
+ return set_swi_errno(ts, time(NULL));
+ case TARGET_SYS_SYSTEM:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ return arm_gdb_syscall(cpu, arm_semi_cb, "system,%s",
+ arg0, (int)arg1+1);
+ } else {
+ s = lock_user_string(arg0);
+ if (!s) {
+ /* FIXME - should this error code be -TARGET_EFAULT ? */
+ return (uint32_t)-1;
+ }
+ ret = set_swi_errno(ts, system(s));
+ unlock_user(s, arg0, 0);
+ return ret;
+ }
+ case TARGET_SYS_ERRNO:
+#ifdef CONFIG_USER_ONLY
+ return ts->swi_errno;
+#else
+ return syscall_err;
+#endif
+ case TARGET_SYS_GET_CMDLINE:
+ {
+ /* Build a command-line from the original argv.
+ *
+ * The inputs are:
+ * * arg0, pointer to a buffer of at least the size
+ * specified in arg1.
+ * * arg1, size of the buffer pointed to by arg0 in
+ * bytes.
+ *
+ * The outputs are:
+ * * arg0, pointer to null-terminated string of the
+ * command line.
+ * * arg1, length of the string pointed to by arg0.
+ */
+
+ char *output_buffer;
+ size_t input_size;
+ size_t output_size;
+ int status = 0;
+#if !defined(CONFIG_USER_ONLY)
+ const char *cmdline;
+#endif
+ GET_ARG(0);
+ GET_ARG(1);
+ input_size = arg1;
+ /* Compute the size of the output string. */
+#if !defined(CONFIG_USER_ONLY)
+ cmdline = semihosting_get_cmdline();
+ if (cmdline == NULL) {
+ cmdline = ""; /* Default to an empty line. */
+ }
+ output_size = strlen(cmdline) + 1; /* Count terminating 0. */
+#else
+ unsigned int i;
+
+ output_size = ts->info->arg_end - ts->info->arg_start;
+ if (!output_size) {
+ /* We special-case the "empty command line" case (argc==0).
+ Just provide the terminating 0. */
+ output_size = 1;
+ }
+#endif
+
+ if (output_size > input_size) {
+ /* Not enough space to store command-line arguments. */
+ return -1;
+ }
+
+ /* Adjust the command-line length. */
+ if (SET_ARG(1, output_size - 1)) {
+ /* Couldn't write back to argument block */
+ return -1;
+ }
+
+ /* Lock the buffer on the ARM side. */
+ output_buffer = lock_user(VERIFY_WRITE, arg0, output_size, 0);
+ if (!output_buffer) {
+ return -1;
+ }
+
+ /* Copy the command-line arguments. */
+#if !defined(CONFIG_USER_ONLY)
+ pstrcpy(output_buffer, output_size, cmdline);
+#else
+ if (output_size == 1) {
+ /* Empty command-line. */
+ output_buffer[0] = '\0';
+ goto out;
+ }
+
+ if (copy_from_user(output_buffer, ts->info->arg_start,
+ output_size)) {
+ status = -1;
+ goto out;
+ }
+
+ /* Separate arguments by white spaces. */
+ for (i = 0; i < output_size - 1; i++) {
+ if (output_buffer[i] == 0) {
+ output_buffer[i] = ' ';
+ }
+ }
+ out:
+#endif
+ /* Unlock the buffer on the ARM side. */
+ unlock_user(output_buffer, arg0, output_size);
+
+ return status;
+ }
+ case TARGET_SYS_HEAPINFO:
+ {
+ target_ulong retvals[4];
+ target_ulong limit;
+ int i;
+
+ GET_ARG(0);
+
+#ifdef CONFIG_USER_ONLY
+ /* Some C libraries assume the heap immediately follows .bss, so
+ allocate it using sbrk. */
+ if (!ts->heap_limit) {
+ abi_ulong ret;
+
+ ts->heap_base = do_brk(0);
+ limit = ts->heap_base + ARM_ANGEL_HEAP_SIZE;
+ /* Try a big heap, and reduce the size if that fails. */
+ for (;;) {
+ ret = do_brk(limit);
+ if (ret >= limit) {
+ break;
+ }
+ limit = (ts->heap_base >> 1) + (limit >> 1);
+ }
+ ts->heap_limit = limit;
+ }
+
+ retvals[0] = ts->heap_base;
+ retvals[1] = ts->heap_limit;
+ retvals[2] = ts->stack_base;
+ retvals[3] = 0; /* Stack limit. */
+#else
+ limit = ram_size;
+ /* TODO: Make this use the limit of the loaded application. */
+ retvals[0] = limit / 2;
+ retvals[1] = limit;
+ retvals[2] = limit; /* Stack base */
+ retvals[3] = 0; /* Stack limit. */
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(retvals); i++) {
+ bool fail;
+
+ if (is_a64(env)) {
+ fail = put_user_u64(retvals[i], arg0 + i * 8);
+ } else {
+ fail = put_user_u32(retvals[i], arg0 + i * 4);
+ }
+
+ if (fail) {
+ /* Couldn't write back to argument block */
+ return -1;
+ }
+ }
+ return 0;
+ }
+ case TARGET_SYS_EXIT:
+ if (is_a64(env)) {
+ /* The A64 version of this call takes a parameter block,
+ * so the application-exit type can return a subcode which
+ * is the exit status code from the application.
+ */
+ GET_ARG(0);
+ GET_ARG(1);
+
+ if (arg0 == ADP_Stopped_ApplicationExit) {
+ ret = arg1;
+ } else {
+ ret = 1;
+ }
+ } else {
+ /* ARM specifies only Stopped_ApplicationExit as normal
+ * exit, everything else is considered an error */
+ ret = (args == ADP_Stopped_ApplicationExit) ? 0 : 1;
+ }
+ gdb_exit(env, ret);
+ exit(ret);
+ case TARGET_SYS_SYNCCACHE:
+ /* Clean the D-cache and invalidate the I-cache for the specified
+ * virtual address range. This is a nop for us since we don't
+ * implement caches. This is only present on A64.
+ */
+ if (is_a64(env)) {
+ return 0;
+ }
+ /* fall through -- invalid for A32/T32 */
+ default:
+ fprintf(stderr, "qemu: Unsupported SemiHosting SWI 0x%02x\n", nr);
+ cpu_dump_state(cs, stderr, fprintf, 0);
+ abort();
+ }
+}
diff --git a/target/arm/arm_ldst.h b/target/arm/arm_ldst.h
new file mode 100644
index 0000000000..a76d89f62c
--- /dev/null
+++ b/target/arm/arm_ldst.h
@@ -0,0 +1,49 @@
+/*
+ * ARM load/store instructions for code (armeb-user support)
+ *
+ * Copyright (c) 2012 CodeSourcery, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ARM_LDST_H
+#define ARM_LDST_H
+
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/bswap.h"
+
+/* Load an instruction and return it in the standard little-endian order */
+static inline uint32_t arm_ldl_code(CPUARMState *env, target_ulong addr,
+ bool sctlr_b)
+{
+ uint32_t insn = cpu_ldl_code(env, addr);
+ if (bswap_code(sctlr_b)) {
+ return bswap32(insn);
+ }
+ return insn;
+}
+
+/* Ditto, for a halfword (Thumb) instruction */
+static inline uint16_t arm_lduw_code(CPUARMState *env, target_ulong addr,
+ bool sctlr_b)
+{
+ uint16_t insn = cpu_lduw_code(env, addr);
+ if (bswap_code(sctlr_b)) {
+ return bswap16(insn);
+ }
+ return insn;
+}
+
+#endif
diff --git a/target/arm/cpu-qom.h b/target/arm/cpu-qom.h
new file mode 100644
index 0000000000..a42495bac9
--- /dev/null
+++ b/target/arm/cpu-qom.h
@@ -0,0 +1,90 @@
+/*
+ * QEMU ARM CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+#ifndef QEMU_ARM_CPU_QOM_H
+#define QEMU_ARM_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+struct arm_boot_info;
+
+#define TYPE_ARM_CPU "arm-cpu"
+
+#define ARM_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(ARMCPUClass, (klass), TYPE_ARM_CPU)
+#define ARM_CPU(obj) \
+ OBJECT_CHECK(ARMCPU, (obj), TYPE_ARM_CPU)
+#define ARM_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(ARMCPUClass, (obj), TYPE_ARM_CPU)
+
+/**
+ * ARMCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An ARM CPU model.
+ */
+typedef struct ARMCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} ARMCPUClass;
+
+typedef struct ARMCPU ARMCPU;
+
+#define TYPE_AARCH64_CPU "aarch64-cpu"
+#define AARCH64_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(AArch64CPUClass, (klass), TYPE_AARCH64_CPU)
+#define AARCH64_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(AArch64CPUClass, (obj), TYPE_AArch64_CPU)
+
+typedef struct AArch64CPUClass {
+ /*< private >*/
+ ARMCPUClass parent_class;
+ /*< public >*/
+} AArch64CPUClass;
+
+void register_cp_regs_for_features(ARMCPU *cpu);
+void init_cpreg_list(ARMCPU *cpu);
+
+/* Callback functions for the generic timer's timers. */
+void arm_gt_ptimer_cb(void *opaque);
+void arm_gt_vtimer_cb(void *opaque);
+void arm_gt_htimer_cb(void *opaque);
+void arm_gt_stimer_cb(void *opaque);
+
+#define ARM_AFF0_SHIFT 0
+#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
+#define ARM_AFF1_SHIFT 8
+#define ARM_AFF1_MASK (0xFFULL << ARM_AFF1_SHIFT)
+#define ARM_AFF2_SHIFT 16
+#define ARM_AFF2_MASK (0xFFULL << ARM_AFF2_SHIFT)
+#define ARM_AFF3_SHIFT 32
+#define ARM_AFF3_MASK (0xFFULL << ARM_AFF3_SHIFT)
+#define ARM_DEFAULT_CPUS_PER_CLUSTER 8
+
+#define ARM32_AFFINITY_MASK (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK)
+#define ARM64_AFFINITY_MASK \
+ (ARM_AFF0_MASK|ARM_AFF1_MASK|ARM_AFF2_MASK|ARM_AFF3_MASK)
+#define ARM64_AFFINITY_INVALID (~ARM64_AFFINITY_MASK)
+
+#endif
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
new file mode 100644
index 0000000000..99f0dbebb9
--- /dev/null
+++ b/target/arm/cpu.c
@@ -0,0 +1,1622 @@
+/*
+ * QEMU ARM CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "internals.h"
+#include "qemu-common.h"
+#include "exec/exec-all.h"
+#include "hw/qdev-properties.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#endif
+#include "hw/arm/arm.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+
+static void arm_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ cpu->env.regs[15] = value;
+}
+
+static bool arm_cpu_has_work(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return !cpu->powered_off
+ && cs->interrupt_request &
+ (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
+ | CPU_INTERRUPT_EXITTB);
+}
+
+void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook,
+ void *opaque)
+{
+ /* We currently only support registering a single hook function */
+ assert(!cpu->el_change_hook);
+ cpu->el_change_hook = hook;
+ cpu->el_change_hook_opaque = opaque;
+}
+
+static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
+{
+ /* Reset a single ARMCPRegInfo register */
+ ARMCPRegInfo *ri = value;
+ ARMCPU *cpu = opaque;
+
+ if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
+ return;
+ }
+
+ if (ri->resetfn) {
+ ri->resetfn(&cpu->env, ri);
+ return;
+ }
+
+ /* A zero offset is never possible as it would be regs[0]
+ * so we use it to indicate that reset is being handled elsewhere.
+ * This is basically only used for fields in non-core coprocessors
+ * (like the pxa2xx ones).
+ */
+ if (!ri->fieldoffset) {
+ return;
+ }
+
+ if (cpreg_field_is_64bit(ri)) {
+ CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
+ } else {
+ CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
+ }
+}
+
+static void cp_reg_check_reset(gpointer key, gpointer value, gpointer opaque)
+{
+ /* Purely an assertion check: we've already done reset once,
+ * so now check that running the reset for the cpreg doesn't
+ * change its value. This traps bugs where two different cpregs
+ * both try to reset the same state field but to different values.
+ */
+ ARMCPRegInfo *ri = value;
+ ARMCPU *cpu = opaque;
+ uint64_t oldvalue, newvalue;
+
+ if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
+ return;
+ }
+
+ oldvalue = read_raw_cp_reg(&cpu->env, ri);
+ cp_reg_reset(key, value, opaque);
+ newvalue = read_raw_cp_reg(&cpu->env, ri);
+ assert(oldvalue == newvalue);
+}
+
+/* CPUClass::reset() */
+static void arm_cpu_reset(CPUState *s)
+{
+ ARMCPU *cpu = ARM_CPU(s);
+ ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
+ CPUARMState *env = &cpu->env;
+
+ acc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUARMState, features));
+ g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
+ g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);
+
+ env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
+ env->vfp.xregs[ARM_VFP_MVFR0] = cpu->mvfr0;
+ env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1;
+ env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2;
+
+ cpu->powered_off = cpu->start_powered_off;
+ s->halted = cpu->start_powered_off;
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 64 bit CPUs always start in 64 bit mode */
+ env->aarch64 = 1;
+#if defined(CONFIG_USER_ONLY)
+ env->pstate = PSTATE_MODE_EL0t;
+ /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
+ env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
+ /* and to the FP/Neon instructions */
+ env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
+#else
+ /* Reset into the highest available EL */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ env->pstate = PSTATE_MODE_EL3h;
+ } else if (arm_feature(env, ARM_FEATURE_EL2)) {
+ env->pstate = PSTATE_MODE_EL2h;
+ } else {
+ env->pstate = PSTATE_MODE_EL1h;
+ }
+ env->pc = cpu->rvbar;
+#endif
+ } else {
+#if defined(CONFIG_USER_ONLY)
+ /* Userspace expects access to cp10 and cp11 for FP/Neon */
+ env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
+#endif
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ env->uncached_cpsr = ARM_CPU_MODE_USR;
+ /* For user mode we must enable access to coprocessors */
+ env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ env->cp15.c15_cpar = 3;
+ } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ env->cp15.c15_cpar = 1;
+ }
+#else
+ /* SVC mode with interrupts disabled. */
+ env->uncached_cpsr = ARM_CPU_MODE_SVC;
+ env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
+ /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
+ * clear at reset. Initial SP and PC are loaded from ROM.
+ */
+ if (IS_M(env)) {
+ uint32_t initial_msp; /* Loaded from 0x0 */
+ uint32_t initial_pc; /* Loaded from 0x4 */
+ uint8_t *rom;
+
+ env->daif &= ~PSTATE_I;
+ rom = rom_ptr(0);
+ if (rom) {
+ /* Address zero is covered by ROM which hasn't yet been
+ * copied into physical memory.
+ */
+ initial_msp = ldl_p(rom);
+ initial_pc = ldl_p(rom + 4);
+ } else {
+ /* Address zero not covered by a ROM blob, or the ROM blob
+ * is in non-modifiable memory and this is a second reset after
+ * it got copied into memory. In the latter case, rom_ptr
+ * will return a NULL pointer and we should use ldl_phys instead.
+ */
+ initial_msp = ldl_phys(s->as, 0);
+ initial_pc = ldl_phys(s->as, 4);
+ }
+
+ env->regs[13] = initial_msp & 0xFFFFFFFC;
+ env->regs[15] = initial_pc & ~1;
+ env->thumb = initial_pc & 1;
+ }
+
+ /* AArch32 has a hard highvec setting of 0xFFFF0000. If we are currently
+ * executing as AArch32 then check if highvecs are enabled and
+ * adjust the PC accordingly.
+ */
+ if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
+ env->regs[15] = 0xFFFF0000;
+ }
+
+ env->vfp.xregs[ARM_VFP_FPEXC] = 0;
+#endif
+ set_flush_to_zero(1, &env->vfp.standard_fp_status);
+ set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
+ set_default_nan_mode(1, &env->vfp.standard_fp_status);
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->vfp.fp_status);
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->vfp.standard_fp_status);
+ tlb_flush(s, 1);
+
+#ifndef CONFIG_USER_ONLY
+ if (kvm_enabled()) {
+ kvm_arm_reset_vcpu(cpu);
+ }
+#endif
+
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+}
+
+bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUARMState *env = cs->env_ptr;
+ uint32_t cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+ uint32_t target_el;
+ uint32_t excp_idx;
+ bool ret = false;
+
+ if (interrupt_request & CPU_INTERRUPT_FIQ) {
+ excp_idx = EXCP_FIQ;
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+ if (arm_excp_unmasked(cs, excp_idx, target_el)) {
+ cs->exception_index = excp_idx;
+ env->exception.target_el = target_el;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ excp_idx = EXCP_IRQ;
+ target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
+ if (arm_excp_unmasked(cs, excp_idx, target_el)) {
+ cs->exception_index = excp_idx;
+ env->exception.target_el = target_el;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_VIRQ) {
+ excp_idx = EXCP_VIRQ;
+ target_el = 1;
+ if (arm_excp_unmasked(cs, excp_idx, target_el)) {
+ cs->exception_index = excp_idx;
+ env->exception.target_el = target_el;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ }
+ if (interrupt_request & CPU_INTERRUPT_VFIQ) {
+ excp_idx = EXCP_VFIQ;
+ target_el = 1;
+ if (arm_excp_unmasked(cs, excp_idx, target_el)) {
+ cs->exception_index = excp_idx;
+ env->exception.target_el = target_el;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
+static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ bool ret = false;
+
+
+ if (interrupt_request & CPU_INTERRUPT_FIQ
+ && !(env->daif & PSTATE_F)) {
+ cs->exception_index = EXCP_FIQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ /* ARMv7-M interrupt return works by loading a magic value
+ * into the PC. On real hardware the load causes the
+ * return to occur. The qemu implementation performs the
+ * jump normally, then does the exception return when the
+ * CPU tries to execute code at the magic address.
+ * This will cause the magic PC value to be pushed to
+ * the stack if an interrupt occurred at the wrong time.
+ * We avoid this by disabling interrupts when
+ * pc contains a magic address.
+ */
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && !(env->daif & PSTATE_I)
+ && (env->regs[15] < 0xfffffff0)) {
+ cs->exception_index = EXCP_IRQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+static void arm_cpu_set_irq(void *opaque, int irq, int level)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ static const int mask[] = {
+ [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
+ [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
+ [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
+ [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
+ };
+
+ switch (irq) {
+ case ARM_CPU_VIRQ:
+ case ARM_CPU_VFIQ:
+ assert(arm_feature(env, ARM_FEATURE_EL2));
+ /* fall through */
+ case ARM_CPU_IRQ:
+ case ARM_CPU_FIQ:
+ if (level) {
+ cpu_interrupt(cs, mask[irq]);
+ } else {
+ cpu_reset_interrupt(cs, mask[irq]);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
+{
+#ifdef CONFIG_KVM
+ ARMCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ int kvm_irq = KVM_ARM_IRQ_TYPE_CPU << KVM_ARM_IRQ_TYPE_SHIFT;
+
+ switch (irq) {
+ case ARM_CPU_IRQ:
+ kvm_irq |= KVM_ARM_IRQ_CPU_IRQ;
+ break;
+ case ARM_CPU_FIQ:
+ kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
+ kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
+#endif
+}
+
+static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ cpu_synchronize_state(cs);
+ return arm_cpu_data_is_big_endian(env);
+}
+
+#endif
+
+static inline void set_feature(CPUARMState *env, int feature)
+{
+ env->features |= 1ULL << feature;
+}
+
+static inline void unset_feature(CPUARMState *env, int feature)
+{
+ env->features &= ~(1ULL << feature);
+}
+
+static int
+print_insn_thumb1(bfd_vma pc, disassemble_info *info)
+{
+ return print_insn_arm(pc | 1, info);
+}
+
+static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ ARMCPU *ac = ARM_CPU(cpu);
+ CPUARMState *env = &ac->env;
+
+ if (is_a64(env)) {
+ /* We might not be compiled with the A64 disassembler
+ * because it needs a C++ compiler. Leave print_insn
+ * unset in this case to use the caller default behaviour.
+ */
+#if defined(CONFIG_ARM_A64_DIS)
+ info->print_insn = print_insn_arm_a64;
+#endif
+ } else if (env->thumb) {
+ info->print_insn = print_insn_thumb1;
+ } else {
+ info->print_insn = print_insn_arm;
+ }
+ if (bswap_code(arm_sctlr_b(env))) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ info->endian = BFD_ENDIAN_LITTLE;
+#else
+ info->endian = BFD_ENDIAN_BIG;
+#endif
+ }
+}
+
+static void arm_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ ARMCPU *cpu = ARM_CPU(obj);
+ static bool inited;
+
+ cs->env_ptr = &cpu->env;
+ cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
+ g_free, g_free);
+
+#ifndef CONFIG_USER_ONLY
+ /* Our inbound IRQ and FIQ lines */
+ if (kvm_enabled()) {
+ /* VIRQ and VFIQ are unused with KVM but we add them to maintain
+ * the same interface as non-KVM CPUs.
+ */
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
+ } else {
+ qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
+ }
+
+ cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
+ arm_gt_ptimer_cb, cpu);
+ cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
+ arm_gt_vtimer_cb, cpu);
+ cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
+ arm_gt_htimer_cb, cpu);
+ cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
+ arm_gt_stimer_cb, cpu);
+ qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
+ ARRAY_SIZE(cpu->gt_timer_outputs));
+#endif
+
+ /* DTB consumers generally don't in fact care what the 'compatible'
+ * string is, so always provide some string and trust that a hypothetical
+ * picky DTB consumer will also provide a helpful error message.
+ */
+ cpu->dtb_compatible = "qemu,unknown";
+ cpu->psci_version = 1; /* By default assume PSCI v0.1 */
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
+
+ if (tcg_enabled()) {
+ cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
+ if (!inited) {
+ inited = true;
+ arm_translate_init();
+ }
+ }
+}
+
+static Property arm_cpu_reset_cbar_property =
+ DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
+
+static Property arm_cpu_reset_hivecs_property =
+ DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
+
+static Property arm_cpu_rvbar_property =
+ DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);
+
+static Property arm_cpu_has_el3_property =
+ DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);
+
+/* use property name "pmu" to match other archs and virt tools */
+static Property arm_cpu_has_pmu_property =
+ DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);
+
+static Property arm_cpu_has_mpu_property =
+ DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
+
+static Property arm_cpu_pmsav7_dregion_property =
+ DEFINE_PROP_UINT32("pmsav7-dregion", ARMCPU, pmsav7_dregion, 16);
+
+static void arm_cpu_post_init(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
+ arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
+ &error_abort);
+ }
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
+ &error_abort);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
+ &error_abort);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
+ /* Add the has_el3 state CPU property only if EL3 is allowed. This will
+ * prevent "has_el3" from existing on CPUs which cannot support EL3.
+ */
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
+ &error_abort);
+
+#ifndef CONFIG_USER_ONLY
+ object_property_add_link(obj, "secure-memory",
+ TYPE_MEMORY_REGION,
+ (Object **)&cpu->secure_memory,
+ qdev_prop_allow_set_link_before_realize,
+ OBJ_PROP_LINK_UNREF_ON_RELEASE,
+ &error_abort);
+#endif
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
+ &error_abort);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_MPU)) {
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
+ &error_abort);
+ if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ qdev_property_add_static(DEVICE(obj),
+ &arm_cpu_pmsav7_dregion_property,
+ &error_abort);
+ }
+ }
+
+}
+
+static void arm_cpu_finalizefn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ g_hash_table_destroy(cpu->cp_regs);
+}
+
+static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ ARMCPU *cpu = ARM_CPU(dev);
+ ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
+ CPUARMState *env = &cpu->env;
+ int pagebits;
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* Some features automatically imply others: */
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ set_feature(env, ARM_FEATURE_V7);
+ set_feature(env, ARM_FEATURE_ARM_DIV);
+ set_feature(env, ARM_FEATURE_LPAE);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ set_feature(env, ARM_FEATURE_VAPA);
+ set_feature(env, ARM_FEATURE_THUMB2);
+ set_feature(env, ARM_FEATURE_MPIDR);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_V6K);
+ } else {
+ set_feature(env, ARM_FEATURE_V6);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_V6K)) {
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_MVFR);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ set_feature(env, ARM_FEATURE_V5);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_AUXCR);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_V5)) {
+ set_feature(env, ARM_FEATURE_V4T);
+ }
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_THUMB_DIV);
+ }
+ if (arm_feature(env, ARM_FEATURE_ARM_DIV)) {
+ set_feature(env, ARM_FEATURE_THUMB_DIV);
+ }
+ if (arm_feature(env, ARM_FEATURE_VFP4)) {
+ set_feature(env, ARM_FEATURE_VFP3);
+ set_feature(env, ARM_FEATURE_VFP_FP16);
+ }
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ set_feature(env, ARM_FEATURE_VFP);
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ set_feature(env, ARM_FEATURE_V7MP);
+ set_feature(env, ARM_FEATURE_PXN);
+ }
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ set_feature(env, ARM_FEATURE_CBAR);
+ }
+ if (arm_feature(env, ARM_FEATURE_THUMB2) &&
+ !arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_THUMB_DSP);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V7) &&
+ !arm_feature(env, ARM_FEATURE_M) &&
+ !arm_feature(env, ARM_FEATURE_MPU)) {
+ /* v7VMSA drops support for the old ARMv5 tiny pages, so we
+ * can use 4K pages.
+ */
+ pagebits = 12;
+ } else {
+ /* For CPUs which might have tiny 1K pages, or which have an
+ * MPU and might have small region sizes, stick with 1K pages.
+ */
+ pagebits = 10;
+ }
+ if (!set_preferred_target_page_bits(pagebits)) {
+ /* This can only ever happen for hotplugging a CPU, or if
+ * the board code incorrectly creates a CPU which it has
+ * promised via minimum_page_size that it will not.
+ */
+ error_setg(errp, "This CPU requires a smaller page size than the "
+ "system is using");
+ return;
+ }
+
+ /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
+ * We don't support setting cluster ID ([16..23]) (known as Aff2
+ * in later ARM ARM versions), or any of the higher affinity level fields,
+ * so these bits always RAZ.
+ */
+ if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
+ uint32_t Aff1 = cs->cpu_index / ARM_DEFAULT_CPUS_PER_CLUSTER;
+ uint32_t Aff0 = cs->cpu_index % ARM_DEFAULT_CPUS_PER_CLUSTER;
+ cpu->mp_affinity = (Aff1 << ARM_AFF1_SHIFT) | Aff0;
+ }
+
+ if (cpu->reset_hivecs) {
+ cpu->reset_sctlr |= (1 << 13);
+ }
+
+ if (!cpu->has_el3) {
+ /* If the has_el3 CPU property is disabled then we need to disable the
+ * feature.
+ */
+ unset_feature(env, ARM_FEATURE_EL3);
+
+ /* Disable the security extension feature bits in the processor feature
+ * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
+ */
+ cpu->id_pfr1 &= ~0xf0;
+ cpu->id_aa64pfr0 &= ~0xf000;
+ }
+
+ if (!cpu->has_pmu || !kvm_enabled()) {
+ cpu->has_pmu = false;
+ unset_feature(env, ARM_FEATURE_PMU);
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ /* Disable the hypervisor feature bits in the processor feature
+ * registers if we don't have EL2. These are id_pfr1[15:12] and
+ * id_aa64pfr0_el1[11:8].
+ */
+ cpu->id_aa64pfr0 &= ~0xf00;
+ cpu->id_pfr1 &= ~0xf000;
+ }
+
+ if (!cpu->has_mpu) {
+ unset_feature(env, ARM_FEATURE_MPU);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPU) &&
+ arm_feature(env, ARM_FEATURE_V7)) {
+ uint32_t nr = cpu->pmsav7_dregion;
+
+ if (nr > 0xff) {
+ error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
+ return;
+ }
+
+ if (nr) {
+ env->pmsav7.drbar = g_new0(uint32_t, nr);
+ env->pmsav7.drsr = g_new0(uint32_t, nr);
+ env->pmsav7.dracr = g_new0(uint32_t, nr);
+ }
+ }
+
+ register_cp_regs_for_features(cpu);
+ arm_cpu_register_gdb_regs_for_features(cpu);
+
+ init_cpreg_list(cpu);
+
+#ifndef CONFIG_USER_ONLY
+ if (cpu->has_el3) {
+ cs->num_ases = 2;
+ } else {
+ cs->num_ases = 1;
+ }
+
+ if (cpu->has_el3) {
+ AddressSpace *as;
+
+ if (!cpu->secure_memory) {
+ cpu->secure_memory = cs->memory;
+ }
+ as = address_space_init_shareable(cpu->secure_memory,
+ "cpu-secure-memory");
+ cpu_address_space_init(cs, as, ARMASIdx_S);
+ }
+ cpu_address_space_init(cs,
+ address_space_init_shareable(cs->memory,
+ "cpu-memory"),
+ ARMASIdx_NS);
+#endif
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ acc->parent_realize(dev, errp);
+}
+
+static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+ char **cpuname;
+
+ if (!cpu_model) {
+ return NULL;
+ }
+
+ cpuname = g_strsplit(cpu_model, ",", 1);
+ typename = g_strdup_printf("%s-" TYPE_ARM_CPU, cpuname[0]);
+ oc = object_class_by_name(typename);
+ g_strfreev(cpuname);
+ g_free(typename);
+ if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+/* CPU models. These are not needed for the AArch64 linux-user build. */
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
+
+static void arm926_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm926";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
+ cpu->midr = 0x41069265;
+ cpu->reset_fpsid = 0x41011090;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00090078;
+}
+
+static void arm946_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm946";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_MPU);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x41059461;
+ cpu->ctr = 0x0f004006;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void arm1026_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm1026";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
+ set_feature(&cpu->env, ARM_FEATURE_AUXCR);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
+ cpu->midr = 0x4106a262;
+ cpu->reset_fpsid = 0x410110a0;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00090078;
+ cpu->reset_auxcr = 1;
+ {
+ /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
+ ARMCPRegInfo ifar = {
+ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
+ .resetvalue = 0
+ };
+ define_one_arm_cp_reg(cpu, &ifar);
+ }
+}
+
+static void arm1136_r2_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
+ * older core than plain "arm1136". In particular this does not
+ * have the v6K features.
+ * These ID register values are correct for 1136 but may be wrong
+ * for 1136_r2 (in particular r0p2 does not actually implement most
+ * of the ID registers).
+ */
+
+ cpu->dtb_compatible = "arm,arm1136";
+ set_feature(&cpu->env, ARM_FEATURE_V6);
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ cpu->midr = 0x4107b362;
+ cpu->reset_fpsid = 0x410120b4;
+ cpu->mvfr0 = 0x11111111;
+ cpu->mvfr1 = 0x00000000;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00050078;
+ cpu->id_pfr0 = 0x111;
+ cpu->id_pfr1 = 0x1;
+ cpu->id_dfr0 = 0x2;
+ cpu->id_afr0 = 0x3;
+ cpu->id_mmfr0 = 0x01130003;
+ cpu->id_mmfr1 = 0x10030302;
+ cpu->id_mmfr2 = 0x01222110;
+ cpu->id_isar0 = 0x00140011;
+ cpu->id_isar1 = 0x12002111;
+ cpu->id_isar2 = 0x11231111;
+ cpu->id_isar3 = 0x01102131;
+ cpu->id_isar4 = 0x141;
+ cpu->reset_auxcr = 7;
+}
+
+static void arm1136_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm1136";
+ set_feature(&cpu->env, ARM_FEATURE_V6K);
+ set_feature(&cpu->env, ARM_FEATURE_V6);
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ cpu->midr = 0x4117b363;
+ cpu->reset_fpsid = 0x410120b4;
+ cpu->mvfr0 = 0x11111111;
+ cpu->mvfr1 = 0x00000000;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00050078;
+ cpu->id_pfr0 = 0x111;
+ cpu->id_pfr1 = 0x1;
+ cpu->id_dfr0 = 0x2;
+ cpu->id_afr0 = 0x3;
+ cpu->id_mmfr0 = 0x01130003;
+ cpu->id_mmfr1 = 0x10030302;
+ cpu->id_mmfr2 = 0x01222110;
+ cpu->id_isar0 = 0x00140011;
+ cpu->id_isar1 = 0x12002111;
+ cpu->id_isar2 = 0x11231111;
+ cpu->id_isar3 = 0x01102131;
+ cpu->id_isar4 = 0x141;
+ cpu->reset_auxcr = 7;
+}
+
+static void arm1176_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm1176";
+ set_feature(&cpu->env, ARM_FEATURE_V6K);
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
+ set_feature(&cpu->env, ARM_FEATURE_VAPA);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
+ set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->midr = 0x410fb767;
+ cpu->reset_fpsid = 0x410120b5;
+ cpu->mvfr0 = 0x11111111;
+ cpu->mvfr1 = 0x00000000;
+ cpu->ctr = 0x1dd20d2;
+ cpu->reset_sctlr = 0x00050078;
+ cpu->id_pfr0 = 0x111;
+ cpu->id_pfr1 = 0x11;
+ cpu->id_dfr0 = 0x33;
+ cpu->id_afr0 = 0;
+ cpu->id_mmfr0 = 0x01130003;
+ cpu->id_mmfr1 = 0x10030302;
+ cpu->id_mmfr2 = 0x01222100;
+ cpu->id_isar0 = 0x0140011;
+ cpu->id_isar1 = 0x12002111;
+ cpu->id_isar2 = 0x11231121;
+ cpu->id_isar3 = 0x01102131;
+ cpu->id_isar4 = 0x01141;
+ cpu->reset_auxcr = 7;
+}
+
+static void arm11mpcore_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,arm11mpcore";
+ set_feature(&cpu->env, ARM_FEATURE_V6K);
+ set_feature(&cpu->env, ARM_FEATURE_VFP);
+ set_feature(&cpu->env, ARM_FEATURE_VAPA);
+ set_feature(&cpu->env, ARM_FEATURE_MPIDR);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x410fb022;
+ cpu->reset_fpsid = 0x410120b4;
+ cpu->mvfr0 = 0x11111111;
+ cpu->mvfr1 = 0x00000000;
+ cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
+ cpu->id_pfr0 = 0x111;
+ cpu->id_pfr1 = 0x1;
+ cpu->id_dfr0 = 0;
+ cpu->id_afr0 = 0x2;
+ cpu->id_mmfr0 = 0x01100103;
+ cpu->id_mmfr1 = 0x10020302;
+ cpu->id_mmfr2 = 0x01222000;
+ cpu->id_isar0 = 0x00100011;
+ cpu->id_isar1 = 0x12002111;
+ cpu->id_isar2 = 0x11221011;
+ cpu->id_isar3 = 0x01102131;
+ cpu->id_isar4 = 0x141;
+ cpu->reset_auxcr = 1;
+}
+
+static void cortex_m3_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ cpu->midr = 0x410fc231;
+}
+
+static void cortex_m4_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_M);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
+ cpu->midr = 0x410fc240; /* r0p0 */
+}
+static void arm_v7m_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+
+#ifndef CONFIG_USER_ONLY
+ cc->do_interrupt = arm_v7m_cpu_do_interrupt;
+#endif
+
+ cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
+}
+
+static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
+ /* Dummy the TCM region regs for the moment */
+ { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST },
+ { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST },
+ REGINFO_SENTINEL
+};
+
+static void cortex_r5_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB_DIV);
+ set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
+ set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_MPU);
+ cpu->midr = 0x411fc153; /* r1p3 */
+ cpu->id_pfr0 = 0x0131;
+ cpu->id_pfr1 = 0x001;
+ cpu->id_dfr0 = 0x010400;
+ cpu->id_afr0 = 0x0;
+ cpu->id_mmfr0 = 0x0210030;
+ cpu->id_mmfr1 = 0x00000000;
+ cpu->id_mmfr2 = 0x01200000;
+ cpu->id_mmfr3 = 0x0211;
+ cpu->id_isar0 = 0x2101111;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232141;
+ cpu->id_isar3 = 0x01112131;
+ cpu->id_isar4 = 0x0010142;
+ cpu->id_isar5 = 0x0;
+ cpu->mp_is_up = true;
+ define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
+}
+
+static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
+ { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a8_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a8";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_VFP3);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->midr = 0x410fc080;
+ cpu->reset_fpsid = 0x410330c0;
+ cpu->mvfr0 = 0x11110222;
+ cpu->mvfr1 = 0x00011100;
+ cpu->ctr = 0x82048004;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->id_pfr0 = 0x1031;
+ cpu->id_pfr1 = 0x11;
+ cpu->id_dfr0 = 0x400;
+ cpu->id_afr0 = 0;
+ cpu->id_mmfr0 = 0x31100003;
+ cpu->id_mmfr1 = 0x20000000;
+ cpu->id_mmfr2 = 0x01202000;
+ cpu->id_mmfr3 = 0x11;
+ cpu->id_isar0 = 0x00101111;
+ cpu->id_isar1 = 0x12112111;
+ cpu->id_isar2 = 0x21232031;
+ cpu->id_isar3 = 0x11112131;
+ cpu->id_isar4 = 0x00111142;
+ cpu->dbgdidr = 0x15141000;
+ cpu->clidr = (1 << 27) | (2 << 24) | 3;
+ cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
+ cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
+ cpu->reset_auxcr = 2;
+ define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
+}
+
+static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
+ /* power_control should be set to maximum latency. Again,
+ * default to 0 and set by private hook
+ */
+ { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
+ { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
+ { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
+ { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ /* TLB lockdown control */
+ { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
+ .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
+ .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
+ { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a9_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a9";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_VFP3);
+ set_feature(&cpu->env, ARM_FEATURE_VFP_FP16);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ /* Note that A9 supports the MP extensions even for
+ * A9UP and single-core A9MP (which are both different
+ * and valid configurations; we don't model A9UP).
+ */
+ set_feature(&cpu->env, ARM_FEATURE_V7MP);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR);
+ cpu->midr = 0x410fc090;
+ cpu->reset_fpsid = 0x41033090;
+ cpu->mvfr0 = 0x11110222;
+ cpu->mvfr1 = 0x01111111;
+ cpu->ctr = 0x80038003;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->id_pfr0 = 0x1031;
+ cpu->id_pfr1 = 0x11;
+ cpu->id_dfr0 = 0x000;
+ cpu->id_afr0 = 0;
+ cpu->id_mmfr0 = 0x00100103;
+ cpu->id_mmfr1 = 0x20000000;
+ cpu->id_mmfr2 = 0x01230000;
+ cpu->id_mmfr3 = 0x00002111;
+ cpu->id_isar0 = 0x00101111;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232041;
+ cpu->id_isar3 = 0x11112131;
+ cpu->id_isar4 = 0x00111142;
+ cpu->dbgdidr = 0x35141000;
+ cpu->clidr = (1 << 27) | (1 << 24) | 3;
+ cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
+ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
+ define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
+}
+
+#ifndef CONFIG_USER_ONLY
+static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Linux wants the number of processors from here.
+ * Might as well set the interrupt-controller bit too.
+ */
+ return ((smp_cpus - 1) << 24) | (1 << 23);
+}
+#endif
+
+static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
+#ifndef CONFIG_USER_ONLY
+ { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
+ .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
+ .writefn = arm_cp_write_ignore, },
+#endif
+ { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void cortex_a7_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a7";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_LPAE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
+ cpu->midr = 0x410fc075;
+ cpu->reset_fpsid = 0x41023075;
+ cpu->mvfr0 = 0x10110222;
+ cpu->mvfr1 = 0x11111111;
+ cpu->ctr = 0x84448003;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->id_pfr0 = 0x00001131;
+ cpu->id_pfr1 = 0x00011011;
+ cpu->id_dfr0 = 0x02010555;
+ cpu->pmceid0 = 0x00000000;
+ cpu->pmceid1 = 0x00000000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->id_mmfr0 = 0x10101105;
+ cpu->id_mmfr1 = 0x40000000;
+ cpu->id_mmfr2 = 0x01240000;
+ cpu->id_mmfr3 = 0x02102211;
+ cpu->id_isar0 = 0x01101110;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232041;
+ cpu->id_isar3 = 0x11112131;
+ cpu->id_isar4 = 0x10011142;
+ cpu->dbgdidr = 0x3515f005;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
+ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
+}
+
+static void cortex_a15_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a15";
+ set_feature(&cpu->env, ARM_FEATURE_V7);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_ARM_DIV);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_LPAE);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
+ cpu->midr = 0x412fc0f1;
+ cpu->reset_fpsid = 0x410430f0;
+ cpu->mvfr0 = 0x10110222;
+ cpu->mvfr1 = 0x11111111;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50078;
+ cpu->id_pfr0 = 0x00001131;
+ cpu->id_pfr1 = 0x00011011;
+ cpu->id_dfr0 = 0x02010555;
+ cpu->pmceid0 = 0x0000000;
+ cpu->pmceid1 = 0x00000000;
+ cpu->id_afr0 = 0x00000000;
+ cpu->id_mmfr0 = 0x10201105;
+ cpu->id_mmfr1 = 0x20000000;
+ cpu->id_mmfr2 = 0x01240000;
+ cpu->id_mmfr3 = 0x02102211;
+ cpu->id_isar0 = 0x02101110;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232041;
+ cpu->id_isar3 = 0x11112131;
+ cpu->id_isar4 = 0x10011142;
+ cpu->dbgdidr = 0x3515f021;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
+ cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
+}
+
+static void ti925t_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_V4T);
+ set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
+ cpu->midr = ARM_CPUID_TI925T;
+ cpu->ctr = 0x5109149;
+ cpu->reset_sctlr = 0x00000070;
+}
+
+static void sa1100_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "intel,sa1100";
+ set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x4401A11B;
+ cpu->reset_sctlr = 0x00000070;
+}
+
+static void sa1110_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
+ set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ cpu->midr = 0x6901B119;
+ cpu->reset_sctlr = 0x00000070;
+}
+
+static void pxa250_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052100;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa255_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052d00;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa260_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052903;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa261_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052d05;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa262_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ cpu->midr = 0x69052d06;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270a0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054110;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270a1_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054111;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270b0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054112;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270b1_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054113;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270c0_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054114;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+static void pxa270c5_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "marvell,xscale";
+ set_feature(&cpu->env, ARM_FEATURE_V5);
+ set_feature(&cpu->env, ARM_FEATURE_XSCALE);
+ set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
+ cpu->midr = 0x69054117;
+ cpu->ctr = 0xd172172;
+ cpu->reset_sctlr = 0x00000078;
+}
+
+#ifdef CONFIG_USER_ONLY
+static void arm_any_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
+ set_feature(&cpu->env, ARM_FEATURE_V8_AES);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
+ set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
+ set_feature(&cpu->env, ARM_FEATURE_CRC);
+ cpu->midr = 0xffffffff;
+}
+#endif
+
+#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */
+
+typedef struct ARMCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} ARMCPUInfo;
+
+static const ARMCPUInfo arm_cpus[] = {
+#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
+ { .name = "arm926", .initfn = arm926_initfn },
+ { .name = "arm946", .initfn = arm946_initfn },
+ { .name = "arm1026", .initfn = arm1026_initfn },
+ /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
+ * older core than plain "arm1136". In particular this does not
+ * have the v6K features.
+ */
+ { .name = "arm1136-r2", .initfn = arm1136_r2_initfn },
+ { .name = "arm1136", .initfn = arm1136_initfn },
+ { .name = "arm1176", .initfn = arm1176_initfn },
+ { .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
+ { .name = "cortex-m3", .initfn = cortex_m3_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-m4", .initfn = cortex_m4_initfn,
+ .class_init = arm_v7m_class_init },
+ { .name = "cortex-r5", .initfn = cortex_r5_initfn },
+ { .name = "cortex-a7", .initfn = cortex_a7_initfn },
+ { .name = "cortex-a8", .initfn = cortex_a8_initfn },
+ { .name = "cortex-a9", .initfn = cortex_a9_initfn },
+ { .name = "cortex-a15", .initfn = cortex_a15_initfn },
+ { .name = "ti925t", .initfn = ti925t_initfn },
+ { .name = "sa1100", .initfn = sa1100_initfn },
+ { .name = "sa1110", .initfn = sa1110_initfn },
+ { .name = "pxa250", .initfn = pxa250_initfn },
+ { .name = "pxa255", .initfn = pxa255_initfn },
+ { .name = "pxa260", .initfn = pxa260_initfn },
+ { .name = "pxa261", .initfn = pxa261_initfn },
+ { .name = "pxa262", .initfn = pxa262_initfn },
+ /* "pxa270" is an alias for "pxa270-a0" */
+ { .name = "pxa270", .initfn = pxa270a0_initfn },
+ { .name = "pxa270-a0", .initfn = pxa270a0_initfn },
+ { .name = "pxa270-a1", .initfn = pxa270a1_initfn },
+ { .name = "pxa270-b0", .initfn = pxa270b0_initfn },
+ { .name = "pxa270-b1", .initfn = pxa270b1_initfn },
+ { .name = "pxa270-c0", .initfn = pxa270c0_initfn },
+ { .name = "pxa270-c5", .initfn = pxa270c5_initfn },
+#ifdef CONFIG_USER_ONLY
+ { .name = "any", .initfn = arm_any_initfn },
+#endif
+#endif
+ { .name = NULL }
+};
+
+static Property arm_cpu_properties[] = {
+ DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
+ DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
+ DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
+ DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
+ mp_affinity, ARM64_AFFINITY_INVALID),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+#ifdef CONFIG_USER_ONLY
+static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ env->exception.vaddress = address;
+ if (rw == 2) {
+ cs->exception_index = EXCP_PREFETCH_ABORT;
+ } else {
+ cs->exception_index = EXCP_DATA_ABORT;
+ }
+ return 1;
+}
+#endif
+
+static gchar *arm_gdb_arch_name(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
+ return g_strdup("iwmmxt");
+ }
+ return g_strdup("arm");
+}
+
+static void arm_cpu_class_init(ObjectClass *oc, void *data)
+{
+ ARMCPUClass *acc = ARM_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(acc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ acc->parent_realize = dc->realize;
+ dc->realize = arm_cpu_realizefn;
+ dc->props = arm_cpu_properties;
+
+ acc->parent_reset = cc->reset;
+ cc->reset = arm_cpu_reset;
+
+ cc->class_by_name = arm_cpu_class_by_name;
+ cc->has_work = arm_cpu_has_work;
+ cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
+ cc->dump_state = arm_cpu_dump_state;
+ cc->set_pc = arm_cpu_set_pc;
+ cc->gdb_read_register = arm_cpu_gdb_read_register;
+ cc->gdb_write_register = arm_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
+#else
+ cc->do_interrupt = arm_cpu_do_interrupt;
+ cc->do_unaligned_access = arm_cpu_do_unaligned_access;
+ cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
+ cc->asidx_from_attrs = arm_asidx_from_attrs;
+ cc->vmsd = &vmstate_arm_cpu;
+ cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
+ cc->write_elf64_note = arm_cpu_write_elf64_note;
+ cc->write_elf32_note = arm_cpu_write_elf32_note;
+#endif
+ cc->gdb_num_core_regs = 26;
+ cc->gdb_core_xml_file = "arm-core.xml";
+ cc->gdb_arch_name = arm_gdb_arch_name;
+ cc->gdb_stop_before_watchpoint = true;
+ cc->debug_excp_handler = arm_debug_excp_handler;
+ cc->debug_check_watchpoint = arm_debug_check_watchpoint;
+
+ cc->disas_set_info = arm_disas_set_info;
+}
+
+static void cpu_register(const ARMCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_ARM_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = info->class_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo arm_cpu_type_info = {
+ .name = TYPE_ARM_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = arm_cpu_initfn,
+ .instance_post_init = arm_cpu_post_init,
+ .instance_finalize = arm_cpu_finalizefn,
+ .abstract = true,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = arm_cpu_class_init,
+};
+
+static void arm_cpu_register_types(void)
+{
+ const ARMCPUInfo *info = arm_cpus;
+
+ type_register_static(&arm_cpu_type_info);
+
+ while (info->name) {
+ cpu_register(info);
+ info++;
+ }
+}
+
+type_init(arm_cpu_register_types)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
new file mode 100644
index 0000000000..ca5c849ed6
--- /dev/null
+++ b/target/arm/cpu.h
@@ -0,0 +1,2466 @@
+/*
+ * ARM virtual CPU header
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ARM_CPU_H
+#define ARM_CPU_H
+
+#include "kvm-consts.h"
+
+#if defined(TARGET_AARCH64)
+ /* AArch64 definitions */
+# define TARGET_LONG_BITS 64
+#else
+# define TARGET_LONG_BITS 32
+#endif
+
+#define CPUArchState struct CPUARMState
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+#define EXCP_UDEF 1 /* undefined instruction */
+#define EXCP_SWI 2 /* software interrupt */
+#define EXCP_PREFETCH_ABORT 3
+#define EXCP_DATA_ABORT 4
+#define EXCP_IRQ 5
+#define EXCP_FIQ 6
+#define EXCP_BKPT 7
+#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
+#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
+#define EXCP_HVC 11 /* HyperVisor Call */
+#define EXCP_HYP_TRAP 12
+#define EXCP_SMC 13 /* Secure Monitor Call */
+#define EXCP_VIRQ 14
+#define EXCP_VFIQ 15
+#define EXCP_SEMIHOST 16 /* semihosting call */
+
+#define ARMV7M_EXCP_RESET 1
+#define ARMV7M_EXCP_NMI 2
+#define ARMV7M_EXCP_HARD 3
+#define ARMV7M_EXCP_MEM 4
+#define ARMV7M_EXCP_BUS 5
+#define ARMV7M_EXCP_USAGE 6
+#define ARMV7M_EXCP_SVC 11
+#define ARMV7M_EXCP_DEBUG 12
+#define ARMV7M_EXCP_PENDSV 14
+#define ARMV7M_EXCP_SYSTICK 15
+
+/* ARM-specific interrupt pending bits. */
+#define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
+
+/* The usual mapping for an AArch64 system register to its AArch32
+ * counterpart is for the 32 bit world to have access to the lower
+ * half only (with writes leaving the upper half untouched). It's
+ * therefore useful to be able to pass TCG the offset of the least
+ * significant half of a uint64_t struct member.
+ */
+#ifdef HOST_WORDS_BIGENDIAN
+#define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
+#define offsetofhigh32(S, M) offsetof(S, M)
+#else
+#define offsetoflow32(S, M) offsetof(S, M)
+#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
+#endif
+
+/* Meanings of the ARMCPU object's four inbound GPIO lines */
+#define ARM_CPU_IRQ 0
+#define ARM_CPU_FIQ 1
+#define ARM_CPU_VIRQ 2
+#define ARM_CPU_VFIQ 3
+
+#define NB_MMU_MODES 7
+/* ARM-specific extra insn start words:
+ * 1: Conditional execution bits
+ * 2: Partial exception syndrome for data aborts
+ */
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+/* The 2nd extra word holding syndrome info for data aborts does not use
+ * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
+ * help the sleb128 encoder do a better job.
+ * When restoring the CPU state, we shift it back up.
+ */
+#define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
+#define ARM_INSN_START_WORD2_SHIFT 14
+
+/* We currently assume float and double are IEEE single and double
+ precision respectively.
+ Doing runtime conversions is tricky because VFP registers may contain
+ integer values (eg. as the result of a FTOSI instruction).
+ s<2n> maps to the least significant half of d<n>
+ s<2n+1> maps to the most significant half of d<n>
+ */
+
+/* CPU state for each instance of a generic timer (in cp15 c14) */
+typedef struct ARMGenericTimer {
+ uint64_t cval; /* Timer CompareValue register */
+ uint64_t ctl; /* Timer Control register */
+} ARMGenericTimer;
+
+#define GTIMER_PHYS 0
+#define GTIMER_VIRT 1
+#define GTIMER_HYP 2
+#define GTIMER_SEC 3
+#define NUM_GTIMERS 4
+
+typedef struct {
+ uint64_t raw_tcr;
+ uint32_t mask;
+ uint32_t base_mask;
+} TCR;
+
+typedef struct CPUARMState {
+ /* Regs for current mode. */
+ uint32_t regs[16];
+
+ /* 32/64 switch only happens when taking and returning from
+ * exceptions so the overlap semantics are taken care of then
+ * instead of having a complicated union.
+ */
+ /* Regs for A64 mode. */
+ uint64_t xregs[32];
+ uint64_t pc;
+ /* PSTATE isn't an architectural register for ARMv8. However, it is
+ * convenient for us to assemble the underlying state into a 32 bit format
+ * identical to the architectural format used for the SPSR. (This is also
+ * what the Linux kernel's 'pstate' field in signal handlers and KVM's
+ * 'pstate' register are.) Of the PSTATE bits:
+ * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
+ * semantics as for AArch32, as described in the comments on each field)
+ * nRW (also known as M[4]) is kept, inverted, in env->aarch64
+ * DAIF (exception masks) are kept in env->daif
+ * all other bits are stored in their correct places in env->pstate
+ */
+ uint32_t pstate;
+ uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
+
+ /* Frequently accessed CPSR bits are stored separately for efficiency.
+ This contains all the other bits. Use cpsr_{read,write} to access
+ the whole CPSR. */
+ uint32_t uncached_cpsr;
+ uint32_t spsr;
+
+ /* Banked registers. */
+ uint64_t banked_spsr[8];
+ uint32_t banked_r13[8];
+ uint32_t banked_r14[8];
+
+ /* These hold r8-r12. */
+ uint32_t usr_regs[5];
+ uint32_t fiq_regs[5];
+
+ /* cpsr flag cache for faster execution */
+ uint32_t CF; /* 0 or 1 */
+ uint32_t VF; /* V is the bit 31. All other bits are undefined */
+ uint32_t NF; /* N is bit 31. All other bits are undefined. */
+ uint32_t ZF; /* Z set if zero. */
+ uint32_t QF; /* 0 or 1 */
+ uint32_t GE; /* cpsr[19:16] */
+ uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
+ uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
+ uint64_t daif; /* exception masks, in the bits they are in PSTATE */
+
+ uint64_t elr_el[4]; /* AArch64 exception link regs */
+ uint64_t sp_el[4]; /* AArch64 banked stack pointers */
+
+ /* System control coprocessor (cp15) */
+ struct {
+ uint32_t c0_cpuid;
+ union { /* Cache size selection */
+ struct {
+ uint64_t _unused_csselr0;
+ uint64_t csselr_ns;
+ uint64_t _unused_csselr1;
+ uint64_t csselr_s;
+ };
+ uint64_t csselr_el[4];
+ };
+ union { /* System control register. */
+ struct {
+ uint64_t _unused_sctlr;
+ uint64_t sctlr_ns;
+ uint64_t hsctlr;
+ uint64_t sctlr_s;
+ };
+ uint64_t sctlr_el[4];
+ };
+ uint64_t cpacr_el1; /* Architectural feature access control register */
+ uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
+ uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
+ uint64_t sder; /* Secure debug enable register. */
+ uint32_t nsacr; /* Non-secure access control register. */
+ union { /* MMU translation table base 0. */
+ struct {
+ uint64_t _unused_ttbr0_0;
+ uint64_t ttbr0_ns;
+ uint64_t _unused_ttbr0_1;
+ uint64_t ttbr0_s;
+ };
+ uint64_t ttbr0_el[4];
+ };
+ union { /* MMU translation table base 1. */
+ struct {
+ uint64_t _unused_ttbr1_0;
+ uint64_t ttbr1_ns;
+ uint64_t _unused_ttbr1_1;
+ uint64_t ttbr1_s;
+ };
+ uint64_t ttbr1_el[4];
+ };
+ uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
+ /* MMU translation table base control. */
+ TCR tcr_el[4];
+ TCR vtcr_el2; /* Virtualization Translation Control. */
+ uint32_t c2_data; /* MPU data cacheable bits. */
+ uint32_t c2_insn; /* MPU instruction cacheable bits. */
+ union { /* MMU domain access control register
+ * MPU write buffer control.
+ */
+ struct {
+ uint64_t dacr_ns;
+ uint64_t dacr_s;
+ };
+ struct {
+ uint64_t dacr32_el2;
+ };
+ };
+ uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
+ uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
+ uint64_t hcr_el2; /* Hypervisor configuration register */
+ uint64_t scr_el3; /* Secure configuration register. */
+ union { /* Fault status registers. */
+ struct {
+ uint64_t ifsr_ns;
+ uint64_t ifsr_s;
+ };
+ struct {
+ uint64_t ifsr32_el2;
+ };
+ };
+ union {
+ struct {
+ uint64_t _unused_dfsr;
+ uint64_t dfsr_ns;
+ uint64_t hsr;
+ uint64_t dfsr_s;
+ };
+ uint64_t esr_el[4];
+ };
+ uint32_t c6_region[8]; /* MPU base/size registers. */
+ union { /* Fault address registers. */
+ struct {
+ uint64_t _unused_far0;
+#ifdef HOST_WORDS_BIGENDIAN
+ uint32_t ifar_ns;
+ uint32_t dfar_ns;
+ uint32_t ifar_s;
+ uint32_t dfar_s;
+#else
+ uint32_t dfar_ns;
+ uint32_t ifar_ns;
+ uint32_t dfar_s;
+ uint32_t ifar_s;
+#endif
+ uint64_t _unused_far3;
+ };
+ uint64_t far_el[4];
+ };
+ uint64_t hpfar_el2;
+ uint64_t hstr_el2;
+ union { /* Translation result. */
+ struct {
+ uint64_t _unused_par_0;
+ uint64_t par_ns;
+ uint64_t _unused_par_1;
+ uint64_t par_s;
+ };
+ uint64_t par_el[4];
+ };
+
+ uint32_t c6_rgnr;
+
+ uint32_t c9_insn; /* Cache lockdown registers. */
+ uint32_t c9_data;
+ uint64_t c9_pmcr; /* performance monitor control register */
+ uint64_t c9_pmcnten; /* perf monitor counter enables */
+ uint32_t c9_pmovsr; /* perf monitor overflow status */
+ uint32_t c9_pmxevtyper; /* perf monitor event type */
+ uint32_t c9_pmuserenr; /* perf monitor user enable */
+ uint32_t c9_pminten; /* perf monitor interrupt enables */
+ union { /* Memory attribute redirection */
+ struct {
+#ifdef HOST_WORDS_BIGENDIAN
+ uint64_t _unused_mair_0;
+ uint32_t mair1_ns;
+ uint32_t mair0_ns;
+ uint64_t _unused_mair_1;
+ uint32_t mair1_s;
+ uint32_t mair0_s;
+#else
+ uint64_t _unused_mair_0;
+ uint32_t mair0_ns;
+ uint32_t mair1_ns;
+ uint64_t _unused_mair_1;
+ uint32_t mair0_s;
+ uint32_t mair1_s;
+#endif
+ };
+ uint64_t mair_el[4];
+ };
+ union { /* vector base address register */
+ struct {
+ uint64_t _unused_vbar;
+ uint64_t vbar_ns;
+ uint64_t hvbar;
+ uint64_t vbar_s;
+ };
+ uint64_t vbar_el[4];
+ };
+ uint32_t mvbar; /* (monitor) vector base address register */
+ struct { /* FCSE PID. */
+ uint32_t fcseidr_ns;
+ uint32_t fcseidr_s;
+ };
+ union { /* Context ID. */
+ struct {
+ uint64_t _unused_contextidr_0;
+ uint64_t contextidr_ns;
+ uint64_t _unused_contextidr_1;
+ uint64_t contextidr_s;
+ };
+ uint64_t contextidr_el[4];
+ };
+ union { /* User RW Thread register. */
+ struct {
+ uint64_t tpidrurw_ns;
+ uint64_t tpidrprw_ns;
+ uint64_t htpidr;
+ uint64_t _tpidr_el3;
+ };
+ uint64_t tpidr_el[4];
+ };
+ /* The secure banks of these registers don't map anywhere */
+ uint64_t tpidrurw_s;
+ uint64_t tpidrprw_s;
+ uint64_t tpidruro_s;
+
+ union { /* User RO Thread register. */
+ uint64_t tpidruro_ns;
+ uint64_t tpidrro_el[1];
+ };
+ uint64_t c14_cntfrq; /* Counter Frequency register */
+ uint64_t c14_cntkctl; /* Timer Control register */
+ uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
+ uint64_t cntvoff_el2; /* Counter Virtual Offset register */
+ ARMGenericTimer c14_timer[NUM_GTIMERS];
+ uint32_t c15_cpar; /* XScale Coprocessor Access Register */
+ uint32_t c15_ticonfig; /* TI925T configuration byte. */
+ uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
+ uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
+ uint32_t c15_threadid; /* TI debugger thread-ID. */
+ uint32_t c15_config_base_address; /* SCU base address. */
+ uint32_t c15_diagnostic; /* diagnostic register */
+ uint32_t c15_power_diagnostic;
+ uint32_t c15_power_control; /* power control */
+ uint64_t dbgbvr[16]; /* breakpoint value registers */
+ uint64_t dbgbcr[16]; /* breakpoint control registers */
+ uint64_t dbgwvr[16]; /* watchpoint value registers */
+ uint64_t dbgwcr[16]; /* watchpoint control registers */
+ uint64_t mdscr_el1;
+ uint64_t oslsr_el1; /* OS Lock Status */
+ uint64_t mdcr_el2;
+ uint64_t mdcr_el3;
+ /* If the counter is enabled, this stores the last time the counter
+ * was reset. Otherwise it stores the counter value
+ */
+ uint64_t c15_ccnt;
+ uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
+ uint64_t vpidr_el2; /* Virtualization Processor ID Register */
+ uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
+ } cp15;
+
+ struct {
+ uint32_t other_sp;
+ uint32_t vecbase;
+ uint32_t basepri;
+ uint32_t control;
+ int current_sp;
+ int exception;
+ } v7m;
+
+ /* Information associated with an exception about to be taken:
+ * code which raises an exception must set cs->exception_index and
+ * the relevant parts of this structure; the cpu_do_interrupt function
+ * will then set the guest-visible registers as part of the exception
+ * entry process.
+ */
+ struct {
+ uint32_t syndrome; /* AArch64 format syndrome register */
+ uint32_t fsr; /* AArch32 format fault status register info */
+ uint64_t vaddress; /* virtual addr associated with exception, if any */
+ uint32_t target_el; /* EL the exception should be targeted for */
+ /* If we implement EL2 we will also need to store information
+ * about the intermediate physical address for stage 2 faults.
+ */
+ } exception;
+
+ /* Thumb-2 EE state. */
+ uint32_t teecr;
+ uint32_t teehbr;
+
+ /* VFP coprocessor state. */
+ struct {
+ /* VFP/Neon register state. Note that the mapping between S, D and Q
+ * views of the register bank differs between AArch64 and AArch32:
+ * In AArch32:
+ * Qn = regs[2n+1]:regs[2n]
+ * Dn = regs[n]
+ * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
+ * (and regs[32] to regs[63] are inaccessible)
+ * In AArch64:
+ * Qn = regs[2n+1]:regs[2n]
+ * Dn = regs[2n]
+ * Sn = regs[2n] bits 31..0
+ * This corresponds to the architecturally defined mapping between
+ * the two execution states, and means we do not need to explicitly
+ * map these registers when changing states.
+ */
+ float64 regs[64];
+
+ uint32_t xregs[16];
+ /* We store these fpcsr fields separately for convenience. */
+ int vec_len;
+ int vec_stride;
+
+ /* scratch space when Tn are not sufficient. */
+ uint32_t scratch[8];
+
+ /* fp_status is the "normal" fp status. standard_fp_status retains
+ * values corresponding to the ARM "Standard FPSCR Value", ie
+ * default-NaN, flush-to-zero, round-to-nearest and is used by
+ * any operations (generally Neon) which the architecture defines
+ * as controlled by the standard FPSCR value rather than the FPSCR.
+ *
+ * To avoid having to transfer exception bits around, we simply
+ * say that the FPSCR cumulative exception flags are the logical
+ * OR of the flags in the two fp statuses. This relies on the
+ * only thing which needs to read the exception flags being
+ * an explicit FPSCR read.
+ */
+ float_status fp_status;
+ float_status standard_fp_status;
+ } vfp;
+ uint64_t exclusive_addr;
+ uint64_t exclusive_val;
+ uint64_t exclusive_high;
+
+ /* iwMMXt coprocessor state. */
+ struct {
+ uint64_t regs[16];
+ uint64_t val;
+
+ uint32_t cregs[16];
+ } iwmmxt;
+
+#if defined(CONFIG_USER_ONLY)
+ /* For usermode syscall translation. */
+ int eabi;
+#endif
+
+ struct CPUBreakpoint *cpu_breakpoint[16];
+ struct CPUWatchpoint *cpu_watchpoint[16];
+
+ CPU_COMMON
+
+ /* These fields after the common ones so they are preserved on reset. */
+
+ /* Internal CPU feature flags. */
+ uint64_t features;
+
+ /* PMSAv7 MPU */
+ struct {
+ uint32_t *drbar;
+ uint32_t *drsr;
+ uint32_t *dracr;
+ } pmsav7;
+
+ void *nvic;
+ const struct arm_boot_info *boot_info;
+} CPUARMState;
+
+/**
+ * ARMELChangeHook:
+ * type of a function which can be registered via arm_register_el_change_hook()
+ * to get callbacks when the CPU changes its exception level or mode.
+ */
+typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque);
+
+/**
+ * ARMCPU:
+ * @env: #CPUARMState
+ *
+ * An ARM CPU core.
+ */
+struct ARMCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUARMState env;
+
+ /* Coprocessor information */
+ GHashTable *cp_regs;
+ /* For marshalling (mostly coprocessor) register state between the
+ * kernel and QEMU (for KVM) and between two QEMUs (for migration),
+ * we use these arrays.
+ */
+ /* List of register indexes managed via these arrays; (full KVM style
+ * 64 bit indexes, not CPRegInfo 32 bit indexes)
+ */
+ uint64_t *cpreg_indexes;
+ /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
+ uint64_t *cpreg_values;
+ /* Length of the indexes, values, reset_values arrays */
+ int32_t cpreg_array_len;
+ /* These are used only for migration: incoming data arrives in
+ * these fields and is sanity checked in post_load before copying
+ * to the working data structures above.
+ */
+ uint64_t *cpreg_vmstate_indexes;
+ uint64_t *cpreg_vmstate_values;
+ int32_t cpreg_vmstate_array_len;
+
+ /* Timers used by the generic (architected) timer */
+ QEMUTimer *gt_timer[NUM_GTIMERS];
+ /* GPIO outputs for generic timer */
+ qemu_irq gt_timer_outputs[NUM_GTIMERS];
+
+ /* MemoryRegion to use for secure physical accesses */
+ MemoryRegion *secure_memory;
+
+ /* 'compatible' string for this CPU for Linux device trees */
+ const char *dtb_compatible;
+
+ /* PSCI version for this CPU
+ * Bits[31:16] = Major Version
+ * Bits[15:0] = Minor Version
+ */
+ uint32_t psci_version;
+
+ /* Should CPU start in PSCI powered-off state? */
+ bool start_powered_off;
+ /* CPU currently in PSCI powered-off state */
+ bool powered_off;
+ /* CPU has security extension */
+ bool has_el3;
+ /* CPU has PMU (Performance Monitor Unit) */
+ bool has_pmu;
+
+ /* CPU has memory protection unit */
+ bool has_mpu;
+ /* PMSAv7 MPU number of supported regions */
+ uint32_t pmsav7_dregion;
+
+ /* PSCI conduit used to invoke PSCI methods
+ * 0 - disabled, 1 - smc, 2 - hvc
+ */
+ uint32_t psci_conduit;
+
+ /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
+ * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
+ */
+ uint32_t kvm_target;
+
+ /* KVM init features for this CPU */
+ uint32_t kvm_init_features[7];
+
+ /* Uniprocessor system with MP extensions */
+ bool mp_is_up;
+
+ /* The instance init functions for implementation-specific subclasses
+ * set these fields to specify the implementation-dependent values of
+ * various constant registers and reset values of non-constant
+ * registers.
+ * Some of these might become QOM properties eventually.
+ * Field names match the official register names as defined in the
+ * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
+ * is used for reset values of non-constant registers; no reset_
+ * prefix means a constant register.
+ */
+ uint32_t midr;
+ uint32_t revidr;
+ uint32_t reset_fpsid;
+ uint32_t mvfr0;
+ uint32_t mvfr1;
+ uint32_t mvfr2;
+ uint32_t ctr;
+ uint32_t reset_sctlr;
+ uint32_t id_pfr0;
+ uint32_t id_pfr1;
+ uint32_t id_dfr0;
+ uint32_t pmceid0;
+ uint32_t pmceid1;
+ uint32_t id_afr0;
+ uint32_t id_mmfr0;
+ uint32_t id_mmfr1;
+ uint32_t id_mmfr2;
+ uint32_t id_mmfr3;
+ uint32_t id_mmfr4;
+ uint32_t id_isar0;
+ uint32_t id_isar1;
+ uint32_t id_isar2;
+ uint32_t id_isar3;
+ uint32_t id_isar4;
+ uint32_t id_isar5;
+ uint64_t id_aa64pfr0;
+ uint64_t id_aa64pfr1;
+ uint64_t id_aa64dfr0;
+ uint64_t id_aa64dfr1;
+ uint64_t id_aa64afr0;
+ uint64_t id_aa64afr1;
+ uint64_t id_aa64isar0;
+ uint64_t id_aa64isar1;
+ uint64_t id_aa64mmfr0;
+ uint64_t id_aa64mmfr1;
+ uint32_t dbgdidr;
+ uint32_t clidr;
+ uint64_t mp_affinity; /* MP ID without feature bits */
+ /* The elements of this array are the CCSIDR values for each cache,
+ * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
+ */
+ uint32_t ccsidr[16];
+ uint64_t reset_cbar;
+ uint32_t reset_auxcr;
+ bool reset_hivecs;
+ /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
+ uint32_t dcz_blocksize;
+ uint64_t rvbar;
+
+ ARMELChangeHook *el_change_hook;
+ void *el_change_hook_opaque;
+};
+
+static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
+{
+ return container_of(env, ARMCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(ARMCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_arm_cpu;
+#endif
+
+void arm_cpu_do_interrupt(CPUState *cpu);
+void arm_v7m_cpu_do_interrupt(CPUState *cpu);
+bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
+ MemTxAttrs *attrs);
+
+int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+
+#ifdef TARGET_AARCH64
+int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+#endif
+
+ARMCPU *cpu_arm_init(const char *cpu_model);
+target_ulong do_arm_semihosting(CPUARMState *env);
+void aarch64_sync_32_to_64(CPUARMState *env);
+void aarch64_sync_64_to_32(CPUARMState *env);
+
+static inline bool is_a64(CPUARMState *env)
+{
+ return env->aarch64;
+}
+
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_arm_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+
+/**
+ * pmccntr_sync
+ * @env: CPUARMState
+ *
+ * Synchronises the counter in the PMCCNTR. This must always be called twice,
+ * once before any action that might affect the timer and again afterwards.
+ * The function is used to swap the state of the register if required.
+ * This only happens when not in user mode (!CONFIG_USER_ONLY)
+ */
+void pmccntr_sync(CPUARMState *env);
+
+/* SCTLR bit meanings. Several bits have been reused in newer
+ * versions of the architecture; in that case we define constants
+ * for both old and new bit meanings. Code which tests against those
+ * bits should probably check or otherwise arrange that the CPU
+ * is the architectural version it expects.
+ */
+#define SCTLR_M (1U << 0)
+#define SCTLR_A (1U << 1)
+#define SCTLR_C (1U << 2)
+#define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
+#define SCTLR_SA (1U << 3)
+#define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
+#define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
+#define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
+#define SCTLR_CP15BEN (1U << 5) /* v7 onward */
+#define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
+#define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
+#define SCTLR_ITD (1U << 7) /* v8 onward */
+#define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
+#define SCTLR_SED (1U << 8) /* v8 onward */
+#define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
+#define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
+#define SCTLR_F (1U << 10) /* up to v6 */
+#define SCTLR_SW (1U << 10) /* v7 onward */
+#define SCTLR_Z (1U << 11)
+#define SCTLR_I (1U << 12)
+#define SCTLR_V (1U << 13)
+#define SCTLR_RR (1U << 14) /* up to v7 */
+#define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
+#define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
+#define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
+#define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
+#define SCTLR_nTWI (1U << 16) /* v8 onward */
+#define SCTLR_HA (1U << 17)
+#define SCTLR_BR (1U << 17) /* PMSA only */
+#define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
+#define SCTLR_nTWE (1U << 18) /* v8 onward */
+#define SCTLR_WXN (1U << 19)
+#define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
+#define SCTLR_UWXN (1U << 20) /* v7 onward */
+#define SCTLR_FI (1U << 21)
+#define SCTLR_U (1U << 22)
+#define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
+#define SCTLR_VE (1U << 24) /* up to v7 */
+#define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
+#define SCTLR_EE (1U << 25)
+#define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
+#define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
+#define SCTLR_NMFI (1U << 27)
+#define SCTLR_TRE (1U << 28)
+#define SCTLR_AFE (1U << 29)
+#define SCTLR_TE (1U << 30)
+
+#define CPTR_TCPAC (1U << 31)
+#define CPTR_TTA (1U << 20)
+#define CPTR_TFP (1U << 10)
+
+#define MDCR_EPMAD (1U << 21)
+#define MDCR_EDAD (1U << 20)
+#define MDCR_SPME (1U << 17)
+#define MDCR_SDD (1U << 16)
+#define MDCR_SPD (3U << 14)
+#define MDCR_TDRA (1U << 11)
+#define MDCR_TDOSA (1U << 10)
+#define MDCR_TDA (1U << 9)
+#define MDCR_TDE (1U << 8)
+#define MDCR_HPME (1U << 7)
+#define MDCR_TPM (1U << 6)
+#define MDCR_TPMCR (1U << 5)
+
+/* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
+#define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
+
+#define CPSR_M (0x1fU)
+#define CPSR_T (1U << 5)
+#define CPSR_F (1U << 6)
+#define CPSR_I (1U << 7)
+#define CPSR_A (1U << 8)
+#define CPSR_E (1U << 9)
+#define CPSR_IT_2_7 (0xfc00U)
+#define CPSR_GE (0xfU << 16)
+#define CPSR_IL (1U << 20)
+/* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
+ * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
+ * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
+ * where it is live state but not accessible to the AArch32 code.
+ */
+#define CPSR_RESERVED (0x7U << 21)
+#define CPSR_J (1U << 24)
+#define CPSR_IT_0_1 (3U << 25)
+#define CPSR_Q (1U << 27)
+#define CPSR_V (1U << 28)
+#define CPSR_C (1U << 29)
+#define CPSR_Z (1U << 30)
+#define CPSR_N (1U << 31)
+#define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
+#define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
+
+#define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
+#define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
+ | CPSR_NZCV)
+/* Bits writable in user mode. */
+#define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
+/* Execution state bits. MRS read as zero, MSR writes ignored. */
+#define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
+/* Mask of bits which may be set by exception return copying them from SPSR */
+#define CPSR_ERET_MASK (~CPSR_RESERVED)
+
+#define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
+#define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
+#define TTBCR_PD0 (1U << 4)
+#define TTBCR_PD1 (1U << 5)
+#define TTBCR_EPD0 (1U << 7)
+#define TTBCR_IRGN0 (3U << 8)
+#define TTBCR_ORGN0 (3U << 10)
+#define TTBCR_SH0 (3U << 12)
+#define TTBCR_T1SZ (3U << 16)
+#define TTBCR_A1 (1U << 22)
+#define TTBCR_EPD1 (1U << 23)
+#define TTBCR_IRGN1 (3U << 24)
+#define TTBCR_ORGN1 (3U << 26)
+#define TTBCR_SH1 (1U << 28)
+#define TTBCR_EAE (1U << 31)
+
+/* Bit definitions for ARMv8 SPSR (PSTATE) format.
+ * Only these are valid when in AArch64 mode; in
+ * AArch32 mode SPSRs are basically CPSR-format.
+ */
+#define PSTATE_SP (1U)
+#define PSTATE_M (0xFU)
+#define PSTATE_nRW (1U << 4)
+#define PSTATE_F (1U << 6)
+#define PSTATE_I (1U << 7)
+#define PSTATE_A (1U << 8)
+#define PSTATE_D (1U << 9)
+#define PSTATE_IL (1U << 20)
+#define PSTATE_SS (1U << 21)
+#define PSTATE_V (1U << 28)
+#define PSTATE_C (1U << 29)
+#define PSTATE_Z (1U << 30)
+#define PSTATE_N (1U << 31)
+#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
+#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
+#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
+/* Mode values for AArch64 */
+#define PSTATE_MODE_EL3h 13
+#define PSTATE_MODE_EL3t 12
+#define PSTATE_MODE_EL2h 9
+#define PSTATE_MODE_EL2t 8
+#define PSTATE_MODE_EL1h 5
+#define PSTATE_MODE_EL1t 4
+#define PSTATE_MODE_EL0t 0
+
+/* Map EL and handler into a PSTATE_MODE. */
+static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
+{
+ return (el << 2) | handler;
+}
+
+/* Return the current PSTATE value. For the moment we don't support 32<->64 bit
+ * interprocessing, so we don't attempt to sync with the cpsr state used by
+ * the 32 bit decoder.
+ */
+static inline uint32_t pstate_read(CPUARMState *env)
+{
+ int ZF;
+
+ ZF = (env->ZF == 0);
+ return (env->NF & 0x80000000) | (ZF << 30)
+ | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
+ | env->pstate | env->daif;
+}
+
+static inline void pstate_write(CPUARMState *env, uint32_t val)
+{
+ env->ZF = (~val) & PSTATE_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ env->daif = val & PSTATE_DAIF;
+ env->pstate = val & ~CACHED_PSTATE_BITS;
+}
+
+/* Return the current CPSR value. */
+uint32_t cpsr_read(CPUARMState *env);
+
+typedef enum CPSRWriteType {
+ CPSRWriteByInstr = 0, /* from guest MSR or CPS */
+ CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
+ CPSRWriteRaw = 2, /* trust values, do not switch reg banks */
+ CPSRWriteByGDBStub = 3, /* from the GDB stub */
+} CPSRWriteType;
+
+/* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type);
+
+/* Return the current xPSR value. */
+static inline uint32_t xpsr_read(CPUARMState *env)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return (env->NF & 0x80000000) | (ZF << 30)
+ | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+ | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
+ | ((env->condexec_bits & 0xfc) << 8)
+ | env->v7m.exception;
+}
+
+/* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
+static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ if (mask & CPSR_NZCV) {
+ env->ZF = (~val) & CPSR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+ if (mask & CPSR_Q)
+ env->QF = ((val & CPSR_Q) != 0);
+ if (mask & (1 << 24))
+ env->thumb = ((val & (1 << 24)) != 0);
+ if (mask & CPSR_IT_0_1) {
+ env->condexec_bits &= ~3;
+ env->condexec_bits |= (val >> 25) & 3;
+ }
+ if (mask & CPSR_IT_2_7) {
+ env->condexec_bits &= 3;
+ env->condexec_bits |= (val >> 8) & 0xfc;
+ }
+ if (mask & 0x1ff) {
+ env->v7m.exception = val & 0x1ff;
+ }
+}
+
+#define HCR_VM (1ULL << 0)
+#define HCR_SWIO (1ULL << 1)
+#define HCR_PTW (1ULL << 2)
+#define HCR_FMO (1ULL << 3)
+#define HCR_IMO (1ULL << 4)
+#define HCR_AMO (1ULL << 5)
+#define HCR_VF (1ULL << 6)
+#define HCR_VI (1ULL << 7)
+#define HCR_VSE (1ULL << 8)
+#define HCR_FB (1ULL << 9)
+#define HCR_BSU_MASK (3ULL << 10)
+#define HCR_DC (1ULL << 12)
+#define HCR_TWI (1ULL << 13)
+#define HCR_TWE (1ULL << 14)
+#define HCR_TID0 (1ULL << 15)
+#define HCR_TID1 (1ULL << 16)
+#define HCR_TID2 (1ULL << 17)
+#define HCR_TID3 (1ULL << 18)
+#define HCR_TSC (1ULL << 19)
+#define HCR_TIDCP (1ULL << 20)
+#define HCR_TACR (1ULL << 21)
+#define HCR_TSW (1ULL << 22)
+#define HCR_TPC (1ULL << 23)
+#define HCR_TPU (1ULL << 24)
+#define HCR_TTLB (1ULL << 25)
+#define HCR_TVM (1ULL << 26)
+#define HCR_TGE (1ULL << 27)
+#define HCR_TDZ (1ULL << 28)
+#define HCR_HCD (1ULL << 29)
+#define HCR_TRVM (1ULL << 30)
+#define HCR_RW (1ULL << 31)
+#define HCR_CD (1ULL << 32)
+#define HCR_ID (1ULL << 33)
+#define HCR_MASK ((1ULL << 34) - 1)
+
+#define SCR_NS (1U << 0)
+#define SCR_IRQ (1U << 1)
+#define SCR_FIQ (1U << 2)
+#define SCR_EA (1U << 3)
+#define SCR_FW (1U << 4)
+#define SCR_AW (1U << 5)
+#define SCR_NET (1U << 6)
+#define SCR_SMD (1U << 7)
+#define SCR_HCE (1U << 8)
+#define SCR_SIF (1U << 9)
+#define SCR_RW (1U << 10)
+#define SCR_ST (1U << 11)
+#define SCR_TWI (1U << 12)
+#define SCR_TWE (1U << 13)
+#define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
+#define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
+
+/* Return the current FPSCR value. */
+uint32_t vfp_get_fpscr(CPUARMState *env);
+void vfp_set_fpscr(CPUARMState *env, uint32_t val);
+
+/* For A64 the FPSCR is split into two logically distinct registers,
+ * FPCR and FPSR. However since they still use non-overlapping bits
+ * we store the underlying state in fpscr and just mask on read/write.
+ */
+#define FPSR_MASK 0xf800009f
+#define FPCR_MASK 0x07f79f00
+static inline uint32_t vfp_get_fpsr(CPUARMState *env)
+{
+ return vfp_get_fpscr(env) & FPSR_MASK;
+}
+
+static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
+{
+ uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
+ vfp_set_fpscr(env, new_fpscr);
+}
+
+static inline uint32_t vfp_get_fpcr(CPUARMState *env)
+{
+ return vfp_get_fpscr(env) & FPCR_MASK;
+}
+
+static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
+{
+ uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
+ vfp_set_fpscr(env, new_fpscr);
+}
+
+enum arm_cpu_mode {
+ ARM_CPU_MODE_USR = 0x10,
+ ARM_CPU_MODE_FIQ = 0x11,
+ ARM_CPU_MODE_IRQ = 0x12,
+ ARM_CPU_MODE_SVC = 0x13,
+ ARM_CPU_MODE_MON = 0x16,
+ ARM_CPU_MODE_ABT = 0x17,
+ ARM_CPU_MODE_HYP = 0x1a,
+ ARM_CPU_MODE_UND = 0x1b,
+ ARM_CPU_MODE_SYS = 0x1f
+};
+
+/* VFP system registers. */
+#define ARM_VFP_FPSID 0
+#define ARM_VFP_FPSCR 1
+#define ARM_VFP_MVFR2 5
+#define ARM_VFP_MVFR1 6
+#define ARM_VFP_MVFR0 7
+#define ARM_VFP_FPEXC 8
+#define ARM_VFP_FPINST 9
+#define ARM_VFP_FPINST2 10
+
+/* iwMMXt coprocessor control registers. */
+#define ARM_IWMMXT_wCID 0
+#define ARM_IWMMXT_wCon 1
+#define ARM_IWMMXT_wCSSF 2
+#define ARM_IWMMXT_wCASF 3
+#define ARM_IWMMXT_wCGR0 8
+#define ARM_IWMMXT_wCGR1 9
+#define ARM_IWMMXT_wCGR2 10
+#define ARM_IWMMXT_wCGR3 11
+
+/* If adding a feature bit which corresponds to a Linux ELF
+ * HWCAP bit, remember to update the feature-bit-to-hwcap
+ * mapping in linux-user/elfload.c:get_elf_hwcap().
+ */
+enum arm_features {
+ ARM_FEATURE_VFP,
+ ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
+ ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
+ ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
+ ARM_FEATURE_V6,
+ ARM_FEATURE_V6K,
+ ARM_FEATURE_V7,
+ ARM_FEATURE_THUMB2,
+ ARM_FEATURE_MPU, /* Only has Memory Protection Unit, not full MMU. */
+ ARM_FEATURE_VFP3,
+ ARM_FEATURE_VFP_FP16,
+ ARM_FEATURE_NEON,
+ ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
+ ARM_FEATURE_M, /* Microcontroller profile. */
+ ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
+ ARM_FEATURE_THUMB2EE,
+ ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
+ ARM_FEATURE_V4T,
+ ARM_FEATURE_V5,
+ ARM_FEATURE_STRONGARM,
+ ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
+ ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
+ ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
+ ARM_FEATURE_GENERIC_TIMER,
+ ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
+ ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
+ ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
+ ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
+ ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
+ ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
+ ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
+ ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
+ ARM_FEATURE_V8,
+ ARM_FEATURE_AARCH64, /* supports 64 bit mode */
+ ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
+ ARM_FEATURE_CBAR, /* has cp15 CBAR */
+ ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
+ ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
+ ARM_FEATURE_EL2, /* has EL2 Virtualization support */
+ ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
+ ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
+ ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
+ ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
+ ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
+ ARM_FEATURE_PMU, /* has PMU support */
+};
+
+static inline int arm_feature(CPUARMState *env, int feature)
+{
+ return (env->features & (1ULL << feature)) != 0;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* Return true if exception levels below EL3 are in secure state,
+ * or would be following an exception return to that level.
+ * Unlike arm_is_secure() (which is always a question about the
+ * _current_ state of the CPU) this doesn't care about the current
+ * EL or mode.
+ */
+static inline bool arm_is_secure_below_el3(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return !(env->cp15.scr_el3 & SCR_NS);
+ } else {
+ /* If EL3 is not supported then the secure state is implementation
+ * defined, in which case QEMU defaults to non-secure.
+ */
+ return false;
+ }
+}
+
+/* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
+static inline bool arm_is_el3_or_mon(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
+ /* CPU currently in AArch64 state and EL3 */
+ return true;
+ } else if (!is_a64(env) &&
+ (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
+ /* CPU currently in AArch32 state and monitor mode */
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Return true if the processor is in secure state */
+static inline bool arm_is_secure(CPUARMState *env)
+{
+ if (arm_is_el3_or_mon(env)) {
+ return true;
+ }
+ return arm_is_secure_below_el3(env);
+}
+
+#else
+static inline bool arm_is_secure_below_el3(CPUARMState *env)
+{
+ return false;
+}
+
+static inline bool arm_is_secure(CPUARMState *env)
+{
+ return false;
+}
+#endif
+
+/* Return true if the specified exception level is running in AArch64 state. */
+static inline bool arm_el_is_aa64(CPUARMState *env, int el)
+{
+ /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
+ * and if we're not in EL0 then the state of EL0 isn't well defined.)
+ */
+ assert(el >= 1 && el <= 3);
+ bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
+
+ /* The highest exception level is always at the maximum supported
+ * register width, and then lower levels have a register width controlled
+ * by bits in the SCR or HCR registers.
+ */
+ if (el == 3) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
+ }
+
+ if (el == 2) {
+ return aa64;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
+ aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
+ }
+
+ return aa64;
+}
+
+/* Function for determing whether guest cp register reads and writes should
+ * access the secure or non-secure bank of a cp register. When EL3 is
+ * operating in AArch32 state, the NS-bit determines whether the secure
+ * instance of a cp register should be used. When EL3 is AArch64 (or if
+ * it doesn't exist at all) then there is no register banking, and all
+ * accesses are to the non-secure version.
+ */
+static inline bool access_secure_reg(CPUARMState *env)
+{
+ bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) &&
+ !(env->cp15.scr_el3 & SCR_NS));
+
+ return ret;
+}
+
+/* Macros for accessing a specified CP register bank */
+#define A32_BANKED_REG_GET(_env, _regname, _secure) \
+ ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
+
+#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
+ do { \
+ if (_secure) { \
+ (_env)->cp15._regname##_s = (_val); \
+ } else { \
+ (_env)->cp15._regname##_ns = (_val); \
+ } \
+ } while (0)
+
+/* Macros for automatically accessing a specific CP register bank depending on
+ * the current secure state of the system. These macros are not intended for
+ * supporting instruction translation reads/writes as these are dependent
+ * solely on the SCR.NS bit and not the mode.
+ */
+#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
+ A32_BANKED_REG_GET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
+
+#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
+ A32_BANKED_REG_SET((_env), _regname, \
+ (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
+ (_val))
+
+void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure);
+
+/* Interface between CPU and Interrupt controller. */
+void armv7m_nvic_set_pending(void *opaque, int irq);
+int armv7m_nvic_acknowledge_irq(void *opaque);
+void armv7m_nvic_complete_irq(void *opaque, int irq);
+
+/* Interface for defining coprocessor registers.
+ * Registers are defined in tables of arm_cp_reginfo structs
+ * which are passed to define_arm_cp_regs().
+ */
+
+/* When looking up a coprocessor register we look for it
+ * via an integer which encodes all of:
+ * coprocessor number
+ * Crn, Crm, opc1, opc2 fields
+ * 32 or 64 bit register (ie is it accessed via MRC/MCR
+ * or via MRRC/MCRR?)
+ * non-secure/secure bank (AArch32 only)
+ * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
+ * (In this case crn and opc2 should be zero.)
+ * For AArch64, there is no 32/64 bit size distinction;
+ * instead all registers have a 2 bit op0, 3 bit op1 and op2,
+ * and 4 bit CRn and CRm. The encoding patterns are chosen
+ * to be easy to convert to and from the KVM encodings, and also
+ * so that the hashtable can contain both AArch32 and AArch64
+ * registers (to allow for interprocessing where we might run
+ * 32 bit code on a 64 bit core).
+ */
+/* This bit is private to our hashtable cpreg; in KVM register
+ * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
+ * in the upper bits of the 64 bit ID.
+ */
+#define CP_REG_AA64_SHIFT 28
+#define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
+
+/* To enable banking of coprocessor registers depending on ns-bit we
+ * add a bit to distinguish between secure and non-secure cpregs in the
+ * hashtable.
+ */
+#define CP_REG_NS_SHIFT 29
+#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
+
+#define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
+ ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
+ ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
+
+#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
+ (CP_REG_AA64_MASK | \
+ ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
+ ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
+ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
+ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
+ ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
+ ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
+
+/* Convert a full 64 bit KVM register ID to the truncated 32 bit
+ * version used as a key for the coprocessor register hashtable
+ */
+static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
+{
+ uint32_t cpregid = kvmid;
+ if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
+ cpregid |= CP_REG_AA64_MASK;
+ } else {
+ if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
+ cpregid |= (1 << 15);
+ }
+
+ /* KVM is always non-secure so add the NS flag on AArch32 register
+ * entries.
+ */
+ cpregid |= 1 << CP_REG_NS_SHIFT;
+ }
+ return cpregid;
+}
+
+/* Convert a truncated 32 bit hashtable key into the full
+ * 64 bit KVM register ID.
+ */
+static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
+{
+ uint64_t kvmid;
+
+ if (cpregid & CP_REG_AA64_MASK) {
+ kvmid = cpregid & ~CP_REG_AA64_MASK;
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
+ } else {
+ kvmid = cpregid & ~(1 << 15);
+ if (cpregid & (1 << 15)) {
+ kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
+ } else {
+ kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
+ }
+ }
+ return kvmid;
+}
+
+/* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
+ * special-behaviour cp reg and bits [15..8] indicate what behaviour
+ * it has. Otherwise it is a simple cp reg, where CONST indicates that
+ * TCG can assume the value to be constant (ie load at translate time)
+ * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
+ * indicates that the TB should not be ended after a write to this register
+ * (the default is that the TB ends after cp writes). OVERRIDE permits
+ * a register definition to override a previous definition for the
+ * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
+ * old must have the OVERRIDE bit set.
+ * ALIAS indicates that this register is an alias view of some underlying
+ * state which is also visible via another register, and that the other
+ * register is handling migration and reset; registers marked ALIAS will not be
+ * migrated but may have their state set by syncing of register state from KVM.
+ * NO_RAW indicates that this register has no underlying state and does not
+ * support raw access for state saving/loading; it will not be used for either
+ * migration or KVM state synchronization. (Typically this is for "registers"
+ * which are actually used as instructions for cache maintenance and so on.)
+ * IO indicates that this register does I/O and therefore its accesses
+ * need to be surrounded by gen_io_start()/gen_io_end(). In particular,
+ * registers which implement clocks or timers require this.
+ */
+#define ARM_CP_SPECIAL 1
+#define ARM_CP_CONST 2
+#define ARM_CP_64BIT 4
+#define ARM_CP_SUPPRESS_TB_END 8
+#define ARM_CP_OVERRIDE 16
+#define ARM_CP_ALIAS 32
+#define ARM_CP_IO 64
+#define ARM_CP_NO_RAW 128
+#define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
+#define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
+#define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
+#define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8))
+#define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8))
+#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
+/* Used only as a terminator for ARMCPRegInfo lists */
+#define ARM_CP_SENTINEL 0xffff
+/* Mask of only the flag bits in a type field */
+#define ARM_CP_FLAG_MASK 0xff
+
+/* Valid values for ARMCPRegInfo state field, indicating which of
+ * the AArch32 and AArch64 execution states this register is visible in.
+ * If the reginfo doesn't explicitly specify then it is AArch32 only.
+ * If the reginfo is declared to be visible in both states then a second
+ * reginfo is synthesised for the AArch32 view of the AArch64 register,
+ * such that the AArch32 view is the lower 32 bits of the AArch64 one.
+ * Note that we rely on the values of these enums as we iterate through
+ * the various states in some places.
+ */
+enum {
+ ARM_CP_STATE_AA32 = 0,
+ ARM_CP_STATE_AA64 = 1,
+ ARM_CP_STATE_BOTH = 2,
+};
+
+/* ARM CP register secure state flags. These flags identify security state
+ * attributes for a given CP register entry.
+ * The existence of both or neither secure and non-secure flags indicates that
+ * the register has both a secure and non-secure hash entry. A single one of
+ * these flags causes the register to only be hashed for the specified
+ * security state.
+ * Although definitions may have any combination of the S/NS bits, each
+ * registered entry will only have one to identify whether the entry is secure
+ * or non-secure.
+ */
+enum {
+ ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
+ ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
+};
+
+/* Return true if cptype is a valid type field. This is used to try to
+ * catch errors where the sentinel has been accidentally left off the end
+ * of a list of registers.
+ */
+static inline bool cptype_valid(int cptype)
+{
+ return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
+ || ((cptype & ARM_CP_SPECIAL) &&
+ ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
+}
+
+/* Access rights:
+ * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
+ * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
+ * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
+ * (ie any of the privileged modes in Secure state, or Monitor mode).
+ * If a register is accessible in one privilege level it's always accessible
+ * in higher privilege levels too. Since "Secure PL1" also follows this rule
+ * (ie anything visible in PL2 is visible in S-PL1, some things are only
+ * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
+ * terminology a little and call this PL3.
+ * In AArch64 things are somewhat simpler as the PLx bits line up exactly
+ * with the ELx exception levels.
+ *
+ * If access permissions for a register are more complex than can be
+ * described with these bits, then use a laxer set of restrictions, and
+ * do the more restrictive/complex check inside a helper function.
+ */
+#define PL3_R 0x80
+#define PL3_W 0x40
+#define PL2_R (0x20 | PL3_R)
+#define PL2_W (0x10 | PL3_W)
+#define PL1_R (0x08 | PL2_R)
+#define PL1_W (0x04 | PL2_W)
+#define PL0_R (0x02 | PL1_R)
+#define PL0_W (0x01 | PL1_W)
+
+#define PL3_RW (PL3_R | PL3_W)
+#define PL2_RW (PL2_R | PL2_W)
+#define PL1_RW (PL1_R | PL1_W)
+#define PL0_RW (PL0_R | PL0_W)
+
+/* Return the highest implemented Exception Level */
+static inline int arm_highest_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ return 3;
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ return 2;
+ }
+ return 1;
+}
+
+/* Return the current Exception Level (as per ARMv8; note that this differs
+ * from the ARMv7 Privilege Level).
+ */
+static inline int arm_current_el(CPUARMState *env)
+{
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return !((env->v7m.exception == 0) && (env->v7m.control & 1));
+ }
+
+ if (is_a64(env)) {
+ return extract32(env->pstate, 2, 2);
+ }
+
+ switch (env->uncached_cpsr & 0x1f) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_MON:
+ return 3;
+ default:
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ /* If EL3 is 32-bit then all secure privileged modes run in
+ * EL3
+ */
+ return 3;
+ }
+
+ return 1;
+ }
+}
+
+typedef struct ARMCPRegInfo ARMCPRegInfo;
+
+typedef enum CPAccessResult {
+ /* Access is permitted */
+ CP_ACCESS_OK = 0,
+ /* Access fails due to a configurable trap or enable which would
+ * result in a categorized exception syndrome giving information about
+ * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
+ * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
+ * PL1 if in EL0, otherwise to the current EL).
+ */
+ CP_ACCESS_TRAP = 1,
+ /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
+ * Note that this is not a catch-all case -- the set of cases which may
+ * result in this failure is specifically defined by the architecture.
+ */
+ CP_ACCESS_TRAP_UNCATEGORIZED = 2,
+ /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_EL2 = 3,
+ CP_ACCESS_TRAP_EL3 = 4,
+ /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
+ CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
+ /* Access fails and results in an exception syndrome for an FP access,
+ * trapped directly to EL2 or EL3
+ */
+ CP_ACCESS_TRAP_FP_EL2 = 7,
+ CP_ACCESS_TRAP_FP_EL3 = 8,
+} CPAccessResult;
+
+/* Access functions for coprocessor registers. These cannot fail and
+ * may not raise exceptions.
+ */
+typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
+typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
+ uint64_t value);
+/* Access permission check functions for coprocessor registers. */
+typedef CPAccessResult CPAccessFn(CPUARMState *env,
+ const ARMCPRegInfo *opaque,
+ bool isread);
+/* Hook function for register reset */
+typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
+
+#define CP_ANY 0xff
+
+/* Definition of an ARM coprocessor register */
+struct ARMCPRegInfo {
+ /* Name of register (useful mainly for debugging, need not be unique) */
+ const char *name;
+ /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
+ * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
+ * 'wildcard' field -- any value of that field in the MRC/MCR insn
+ * will be decoded to this register. The register read and write
+ * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
+ * used by the program, so it is possible to register a wildcard and
+ * then behave differently on read/write if necessary.
+ * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
+ * must both be zero.
+ * For AArch64-visible registers, opc0 is also used.
+ * Since there are no "coprocessors" in AArch64, cp is purely used as a
+ * way to distinguish (for KVM's benefit) guest-visible system registers
+ * from demuxed ones provided to preserve the "no side effects on
+ * KVM register read/write from QEMU" semantics. cp==0x13 is guest
+ * visible (to match KVM's encoding); cp==0 will be converted to
+ * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
+ */
+ uint8_t cp;
+ uint8_t crn;
+ uint8_t crm;
+ uint8_t opc0;
+ uint8_t opc1;
+ uint8_t opc2;
+ /* Execution state in which this register is visible: ARM_CP_STATE_* */
+ int state;
+ /* Register type: ARM_CP_* bits/values */
+ int type;
+ /* Access rights: PL*_[RW] */
+ int access;
+ /* Security state: ARM_CP_SECSTATE_* bits/values */
+ int secure;
+ /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
+ * this register was defined: can be used to hand data through to the
+ * register read/write functions, since they are passed the ARMCPRegInfo*.
+ */
+ void *opaque;
+ /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
+ * fieldoffset is non-zero, the reset value of the register.
+ */
+ uint64_t resetvalue;
+ /* Offset of the field in CPUARMState for this register.
+ *
+ * This is not needed if either:
+ * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
+ * 2. both readfn and writefn are specified
+ */
+ ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
+
+ /* Offsets of the secure and non-secure fields in CPUARMState for the
+ * register if it is banked. These fields are only used during the static
+ * registration of a register. During hashing the bank associated
+ * with a given security state is copied to fieldoffset which is used from
+ * there on out.
+ *
+ * It is expected that register definitions use either fieldoffset or
+ * bank_fieldoffsets in the definition but not both. It is also expected
+ * that both bank offsets are set when defining a banked register. This
+ * use indicates that a register is banked.
+ */
+ ptrdiff_t bank_fieldoffsets[2];
+
+ /* Function for making any access checks for this register in addition to
+ * those specified by the 'access' permissions bits. If NULL, no extra
+ * checks required. The access check is performed at runtime, not at
+ * translate time.
+ */
+ CPAccessFn *accessfn;
+ /* Function for handling reads of this register. If NULL, then reads
+ * will be done by loading from the offset into CPUARMState specified
+ * by fieldoffset.
+ */
+ CPReadFn *readfn;
+ /* Function for handling writes of this register. If NULL, then writes
+ * will be done by writing to the offset into CPUARMState specified
+ * by fieldoffset.
+ */
+ CPWriteFn *writefn;
+ /* Function for doing a "raw" read; used when we need to copy
+ * coprocessor state to the kernel for KVM or out for
+ * migration. This only needs to be provided if there is also a
+ * readfn and it has side effects (for instance clear-on-read bits).
+ */
+ CPReadFn *raw_readfn;
+ /* Function for doing a "raw" write; used when we need to copy KVM
+ * kernel coprocessor state into userspace, or for inbound
+ * migration. This only needs to be provided if there is also a
+ * writefn and it masks out "unwritable" bits or has write-one-to-clear
+ * or similar behaviour.
+ */
+ CPWriteFn *raw_writefn;
+ /* Function for resetting the register. If NULL, then reset will be done
+ * by writing resetvalue to the field specified in fieldoffset. If
+ * fieldoffset is 0 then no reset will be done.
+ */
+ CPResetFn *resetfn;
+};
+
+/* Macros which are lvalues for the field in CPUARMState for the
+ * ARMCPRegInfo *ri.
+ */
+#define CPREG_FIELD32(env, ri) \
+ (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
+#define CPREG_FIELD64(env, ri) \
+ (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
+
+#define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
+
+void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *regs, void *opaque);
+void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *regs, void *opaque);
+static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
+{
+ define_arm_cp_regs_with_opaque(cpu, regs, 0);
+}
+static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
+{
+ define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
+}
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
+
+/* CPWriteFn that can be used to implement writes-ignored behaviour */
+void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value);
+/* CPReadFn that can be used for read-as-zero behaviour */
+uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
+
+/* CPResetFn that does nothing, for use if no reset is required even
+ * if fieldoffset is non zero.
+ */
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
+
+/* Return true if this reginfo struct's field in the cpu state struct
+ * is 64 bits wide.
+ */
+static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
+{
+ return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
+}
+
+static inline bool cp_access_ok(int current_el,
+ const ARMCPRegInfo *ri, int isread)
+{
+ return (ri->access >> ((current_el * 2) + isread)) & 1;
+}
+
+/* Raw read of a coprocessor register (as needed for migration, etc) */
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
+
+/**
+ * write_list_to_cpustate
+ * @cpu: ARMCPU
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the cpreg_values list into the ARMCPUState structure.
+ * This updates TCG's working data structures from KVM data or
+ * from incoming migration state.
+ *
+ * Returns: true if all register values were updated correctly,
+ * false if some register was unknown or could not be written.
+ * Note that we do not stop early on failure -- we will attempt
+ * writing all registers in the list.
+ */
+bool write_list_to_cpustate(ARMCPU *cpu);
+
+/**
+ * write_cpustate_to_list:
+ * @cpu: ARMCPU
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the ARMCPUState structure into the cpreg_values list.
+ * This is used to copy info from TCG's working data structures into
+ * KVM or for outbound migration.
+ *
+ * Returns: true if all register values were read correctly,
+ * false if some register was unknown or could not be read.
+ * Note that we do not stop early on failure -- we will attempt
+ * reading all registers in the list.
+ */
+bool write_cpustate_to_list(ARMCPU *cpu);
+
+/* Does the core conform to the "MicroController" profile. e.g. Cortex-M3.
+ Note the M in older cores (eg. ARM7TDMI) stands for Multiply. These are
+ conventional cores (ie. Application or Realtime profile). */
+
+#define IS_M(env) arm_feature(env, ARM_FEATURE_M)
+
+#define ARM_CPUID_TI915T 0x54029152
+#define ARM_CPUID_TI925T 0x54029252
+
+#if defined(CONFIG_USER_ONLY)
+#define TARGET_PAGE_BITS 12
+#else
+/* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6
+ * have to support 1K tiny pages.
+ */
+#define TARGET_PAGE_BITS_VARY
+#define TARGET_PAGE_BITS_MIN 10
+#endif
+
+#if defined(TARGET_AARCH64)
+# define TARGET_PHYS_ADDR_SPACE_BITS 48
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+#else
+# define TARGET_PHYS_ADDR_SPACE_BITS 40
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
+ unsigned int target_el)
+{
+ CPUARMState *env = cs->env_ptr;
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+ bool pstate_unmasked;
+ int8_t unmasked = 0;
+
+ /* Don't take exceptions if they target a lower EL.
+ * This check should catch any exceptions that would not be taken but left
+ * pending.
+ */
+ if (cur_el > target_el) {
+ return false;
+ }
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ pstate_unmasked = !(env->daif & PSTATE_F);
+ break;
+
+ case EXCP_IRQ:
+ pstate_unmasked = !(env->daif & PSTATE_I);
+ break;
+
+ case EXCP_VFIQ:
+ if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
+ /* VFIQs are only taken when hypervized and non-secure. */
+ return false;
+ }
+ return !(env->daif & PSTATE_F);
+ case EXCP_VIRQ:
+ if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
+ /* VIRQs are only taken when hypervized and non-secure. */
+ return false;
+ }
+ return !(env->daif & PSTATE_I);
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Use the target EL, current execution state and SCR/HCR settings to
+ * determine whether the corresponding CPSR bit is used to mask the
+ * interrupt.
+ */
+ if ((target_el > cur_el) && (target_el != 1)) {
+ /* Exceptions targeting a higher EL may not be maskable */
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 64-bit masking rules are simple: exceptions to EL3
+ * can't be masked, and exceptions to EL2 can only be
+ * masked from Secure state. The HCR and SCR settings
+ * don't affect the masking logic, only the interrupt routing.
+ */
+ if (target_el == 3 || !secure) {
+ unmasked = 1;
+ }
+ } else {
+ /* The old 32-bit-only environment has a more complicated
+ * masking setup. HCR and SCR bits not only affect interrupt
+ * routing but also change the behaviour of masking.
+ */
+ bool hcr, scr;
+
+ switch (excp_idx) {
+ case EXCP_FIQ:
+ /* If FIQs are routed to EL3 or EL2 then there are cases where
+ * we override the CPSR.F in determining if the exception is
+ * masked or not. If neither of these are set then we fall back
+ * to the CPSR.F setting otherwise we further assess the state
+ * below.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ scr = (env->cp15.scr_el3 & SCR_FIQ);
+
+ /* When EL3 is 32-bit, the SCR.FW bit controls whether the
+ * CPSR.F bit masks FIQ interrupts when taken in non-secure
+ * state. If SCR.FW is set then FIQs can be masked by CPSR.F
+ * when non-secure but only when FIQs are only routed to EL3.
+ */
+ scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
+ break;
+ case EXCP_IRQ:
+ /* When EL3 execution state is 32-bit, if HCR.IMO is set then
+ * we may override the CPSR.I masking when in non-secure state.
+ * The SCR.IRQ setting has already been taken into consideration
+ * when setting the target EL, so it does not have a further
+ * affect here.
+ */
+ hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ scr = false;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if ((scr || hcr) && !secure) {
+ unmasked = 1;
+ }
+ }
+ }
+
+ /* The PSTATE bits only mask the interrupt if we have not overriden the
+ * ability above.
+ */
+ return unmasked || pstate_unmasked;
+}
+
+#define cpu_init(cpu_model) CPU(cpu_arm_init(cpu_model))
+
+#define cpu_signal_handler cpu_arm_signal_handler
+#define cpu_list arm_cpu_list
+
+/* ARM has the following "translation regimes" (as the ARM ARM calls them):
+ *
+ * If EL3 is 64-bit:
+ * + NonSecure EL1 & 0 stage 1
+ * + NonSecure EL1 & 0 stage 2
+ * + NonSecure EL2
+ * + Secure EL1 & EL0
+ * + Secure EL3
+ * If EL3 is 32-bit:
+ * + NonSecure PL1 & 0 stage 1
+ * + NonSecure PL1 & 0 stage 2
+ * + NonSecure PL2
+ * + Secure PL0 & PL1
+ * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
+ *
+ * For QEMU, an mmu_idx is not quite the same as a translation regime because:
+ * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
+ * may differ in access permissions even if the VA->PA map is the same
+ * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
+ * translation, which means that we have one mmu_idx that deals with two
+ * concatenated translation regimes [this sort of combined s1+2 TLB is
+ * architecturally permitted]
+ * 3. we don't need to allocate an mmu_idx to translations that we won't be
+ * handling via the TLB. The only way to do a stage 1 translation without
+ * the immediate stage 2 translation is via the ATS or AT system insns,
+ * which can be slow-pathed and always do a page table walk.
+ * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
+ * translation regimes, because they map reasonably well to each other
+ * and they can't both be active at the same time.
+ * This gives us the following list of mmu_idx values:
+ *
+ * NS EL0 (aka NS PL0) stage 1+2
+ * NS EL1 (aka NS PL1) stage 1+2
+ * NS EL2 (aka NS PL2)
+ * S EL3 (aka S PL1)
+ * S EL0 (aka S PL0)
+ * S EL1 (not used if EL3 is 32 bit)
+ * NS EL0+1 stage 2
+ *
+ * (The last of these is an mmu_idx because we want to be able to use the TLB
+ * for the accesses done as part of a stage 1 page table walk, rather than
+ * having to walk the stage 2 page table over and over.)
+ *
+ * Our enumeration includes at the end some entries which are not "true"
+ * mmu_idx values in that they don't have corresponding TLBs and are only
+ * valid for doing slow path page table walks.
+ *
+ * The constant names here are patterned after the general style of the names
+ * of the AT/ATS operations.
+ * The values used are carefully arranged to make mmu_idx => EL lookup easy.
+ */
+typedef enum ARMMMUIdx {
+ ARMMMUIdx_S12NSE0 = 0,
+ ARMMMUIdx_S12NSE1 = 1,
+ ARMMMUIdx_S1E2 = 2,
+ ARMMMUIdx_S1E3 = 3,
+ ARMMMUIdx_S1SE0 = 4,
+ ARMMMUIdx_S1SE1 = 5,
+ ARMMMUIdx_S2NS = 6,
+ /* Indexes below here don't have TLBs and are used only for AT system
+ * instructions or for the first stage of an S12 page table walk.
+ */
+ ARMMMUIdx_S1NSE0 = 7,
+ ARMMMUIdx_S1NSE1 = 8,
+} ARMMMUIdx;
+
+#define MMU_USER_IDX 0
+
+/* Return the exception level we're running at if this is our mmu_idx */
+static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
+{
+ assert(mmu_idx < ARMMMUIdx_S2NS);
+ return mmu_idx & 3;
+}
+
+/* Determine the current mmu_idx to use for normal loads/stores */
+static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && arm_is_secure_below_el3(env)) {
+ return ARMMMUIdx_S1SE0 + el;
+ }
+ return el;
+}
+
+/* Indexes used when registering address spaces with cpu_address_space_init */
+typedef enum ARMASIdx {
+ ARMASIdx_NS = 0,
+ ARMASIdx_S = 1,
+} ARMASIdx;
+
+/* Return the Exception Level targeted by debug exceptions. */
+static inline int arm_debug_target_el(CPUARMState *env)
+{
+ bool secure = arm_is_secure(env);
+ bool route_to_el2 = false;
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
+ route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
+ env->cp15.mdcr_el2 & (1 << 8);
+ }
+
+ if (route_to_el2) {
+ return 2;
+ } else if (arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3) && secure) {
+ return 3;
+ } else {
+ return 1;
+ }
+}
+
+static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
+{
+ if (arm_is_secure(env)) {
+ /* MDCR_EL3.SDD disables debug events from Secure state */
+ if (extract32(env->cp15.mdcr_el3, 16, 1) != 0
+ || arm_current_el(env) == 3) {
+ return false;
+ }
+ }
+
+ if (arm_current_el(env) == arm_debug_target_el(env)) {
+ if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
+ || (env->daif & PSTATE_D)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el == 0 && arm_el_is_aa64(env, 1)) {
+ return aa64_generate_debug_exceptions(env);
+ }
+
+ if (arm_is_secure(env)) {
+ int spd;
+
+ if (el == 0 && (env->cp15.sder & 1)) {
+ /* SDER.SUIDEN means debug exceptions from Secure EL0
+ * are always enabled. Otherwise they are controlled by
+ * SDCR.SPD like those from other Secure ELs.
+ */
+ return true;
+ }
+
+ spd = extract32(env->cp15.mdcr_el3, 14, 2);
+ switch (spd) {
+ case 1:
+ /* SPD == 0b01 is reserved, but behaves as 0b00. */
+ case 0:
+ /* For 0b00 we return true if external secure invasive debug
+ * is enabled. On real hardware this is controlled by external
+ * signals to the core. QEMU always permits debug, and behaves
+ * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
+ */
+ return true;
+ case 2:
+ return false;
+ case 3:
+ return true;
+ }
+ }
+
+ return el != 2;
+}
+
+/* Return true if debugging exceptions are currently enabled.
+ * This corresponds to what in ARM ARM pseudocode would be
+ * if UsingAArch32() then
+ * return AArch32.GenerateDebugExceptions()
+ * else
+ * return AArch64.GenerateDebugExceptions()
+ * We choose to push the if() down into this function for clarity,
+ * since the pseudocode has it at all callsites except for the one in
+ * CheckSoftwareStep(), where it is elided because both branches would
+ * always return the same value.
+ *
+ * Parts of the pseudocode relating to EL2 and EL3 are omitted because we
+ * don't yet implement those exception levels or their associated trap bits.
+ */
+static inline bool arm_generate_debug_exceptions(CPUARMState *env)
+{
+ if (env->aarch64) {
+ return aa64_generate_debug_exceptions(env);
+ } else {
+ return aa32_generate_debug_exceptions(env);
+ }
+}
+
+/* Is single-stepping active? (Note that the "is EL_D AArch64?" check
+ * implicitly means this always returns false in pre-v8 CPUs.)
+ */
+static inline bool arm_singlestep_active(CPUARMState *env)
+{
+ return extract32(env->cp15.mdscr_el1, 0, 1)
+ && arm_el_is_aa64(env, arm_debug_target_el(env))
+ && arm_generate_debug_exceptions(env);
+}
+
+static inline bool arm_sctlr_b(CPUARMState *env)
+{
+ return
+ /* We need not implement SCTLR.ITD in user-mode emulation, so
+ * let linux-user ignore the fact that it conflicts with SCTLR_B.
+ * This lets people run BE32 binaries with "-cpu any".
+ */
+#ifndef CONFIG_USER_ONLY
+ !arm_feature(env, ARM_FEATURE_V7) &&
+#endif
+ (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
+}
+
+/* Return true if the processor is in big-endian mode. */
+static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
+{
+ int cur_el;
+
+ /* In 32bit endianness is determined by looking at CPSR's E bit */
+ if (!is_a64(env)) {
+ return
+#ifdef CONFIG_USER_ONLY
+ /* In system mode, BE32 is modelled in line with the
+ * architecture (as word-invariant big-endianness), where loads
+ * and stores are done little endian but from addresses which
+ * are adjusted by XORing with the appropriate constant. So the
+ * endianness to use for the raw data access is not affected by
+ * SCTLR.B.
+ * In user mode, however, we model BE32 as byte-invariant
+ * big-endianness (because user-only code cannot tell the
+ * difference), and so we need to use a data access endianness
+ * that depends on SCTLR.B.
+ */
+ arm_sctlr_b(env) ||
+#endif
+ ((env->uncached_cpsr & CPSR_E) ? 1 : 0);
+ }
+
+ cur_el = arm_current_el(env);
+
+ if (cur_el == 0) {
+ return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
+ }
+
+ return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
+}
+
+#include "exec/cpu-all.h"
+
+/* Bit usage in the TB flags field: bit 31 indicates whether we are
+ * in 32 or 64 bit mode. The meaning of the other bits depends on that.
+ * We put flags which are shared between 32 and 64 bit mode at the top
+ * of the word, and flags which apply to only one mode at the bottom.
+ */
+#define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
+#define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
+#define ARM_TBFLAG_MMUIDX_SHIFT 28
+#define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
+#define ARM_TBFLAG_SS_ACTIVE_SHIFT 27
+#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
+#define ARM_TBFLAG_PSTATE_SS_SHIFT 26
+#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
+/* Target EL if we take a floating-point-disabled exception */
+#define ARM_TBFLAG_FPEXC_EL_SHIFT 24
+#define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT)
+
+/* Bit usage when in AArch32 state: */
+#define ARM_TBFLAG_THUMB_SHIFT 0
+#define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT)
+#define ARM_TBFLAG_VECLEN_SHIFT 1
+#define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
+#define ARM_TBFLAG_VECSTRIDE_SHIFT 4
+#define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
+#define ARM_TBFLAG_VFPEN_SHIFT 7
+#define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
+#define ARM_TBFLAG_CONDEXEC_SHIFT 8
+#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
+#define ARM_TBFLAG_SCTLR_B_SHIFT 16
+#define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT)
+/* We store the bottom two bits of the CPAR as TB flags and handle
+ * checks on the other bits at runtime
+ */
+#define ARM_TBFLAG_XSCALE_CPAR_SHIFT 17
+#define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
+/* Indicates whether cp register reads and writes by guest code should access
+ * the secure or nonsecure bank of banked registers; note that this is not
+ * the same thing as the current security state of the processor!
+ */
+#define ARM_TBFLAG_NS_SHIFT 19
+#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
+#define ARM_TBFLAG_BE_DATA_SHIFT 20
+#define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT)
+
+/* Bit usage when in AArch64 state */
+#define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */
+#define ARM_TBFLAG_TBI0_MASK (0x1ull << ARM_TBFLAG_TBI0_SHIFT)
+#define ARM_TBFLAG_TBI1_SHIFT 1 /* TBI1 for EL0/1 */
+#define ARM_TBFLAG_TBI1_MASK (0x1ull << ARM_TBFLAG_TBI1_SHIFT)
+
+/* some convenience accessor macros */
+#define ARM_TBFLAG_AARCH64_STATE(F) \
+ (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
+#define ARM_TBFLAG_MMUIDX(F) \
+ (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
+#define ARM_TBFLAG_SS_ACTIVE(F) \
+ (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
+#define ARM_TBFLAG_PSTATE_SS(F) \
+ (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
+#define ARM_TBFLAG_FPEXC_EL(F) \
+ (((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT)
+#define ARM_TBFLAG_THUMB(F) \
+ (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
+#define ARM_TBFLAG_VECLEN(F) \
+ (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
+#define ARM_TBFLAG_VECSTRIDE(F) \
+ (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
+#define ARM_TBFLAG_VFPEN(F) \
+ (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
+#define ARM_TBFLAG_CONDEXEC(F) \
+ (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
+#define ARM_TBFLAG_SCTLR_B(F) \
+ (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT)
+#define ARM_TBFLAG_XSCALE_CPAR(F) \
+ (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
+#define ARM_TBFLAG_NS(F) \
+ (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
+#define ARM_TBFLAG_BE_DATA(F) \
+ (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
+#define ARM_TBFLAG_TBI0(F) \
+ (((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
+#define ARM_TBFLAG_TBI1(F) \
+ (((F) & ARM_TBFLAG_TBI1_MASK) >> ARM_TBFLAG_TBI1_SHIFT)
+
+static inline bool bswap_code(bool sctlr_b)
+{
+#ifdef CONFIG_USER_ONLY
+ /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
+ * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
+ * would also end up as a mixed-endian mode with BE code, LE data.
+ */
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ sctlr_b;
+#else
+ /* All code access in ARM is little endian, and there are no loaders
+ * doing swaps that need to be reversed
+ */
+ return 0;
+#endif
+}
+
+/* Return the exception level to which FP-disabled exceptions should
+ * be taken, or 0 if FP is enabled.
+ */
+static inline int fp_exception_el(CPUARMState *env)
+{
+ int fpen;
+ int cur_el = arm_current_el(env);
+
+ /* CPACR and the CPTR registers don't exist before v6, so FP is
+ * always accessible
+ */
+ if (!arm_feature(env, ARM_FEATURE_V6)) {
+ return 0;
+ }
+
+ /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
+ * 0, 2 : trap EL0 and EL1/PL1 accesses
+ * 1 : trap only EL0 accesses
+ * 3 : trap no accesses
+ */
+ fpen = extract32(env->cp15.cpacr_el1, 20, 2);
+ switch (fpen) {
+ case 0:
+ case 2:
+ if (cur_el == 0 || cur_el == 1) {
+ /* Trap to PL1, which might be EL1 or EL3 */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ return 3;
+ }
+ return 1;
+ }
+ if (cur_el == 3 && !is_a64(env)) {
+ /* Secure PL1 running at EL3 */
+ return 3;
+ }
+ break;
+ case 1:
+ if (cur_el == 0) {
+ return 1;
+ }
+ break;
+ case 3:
+ break;
+ }
+
+ /* For the CPTR registers we don't need to guard with an ARM_FEATURE
+ * check because zero bits in the registers mean "don't trap".
+ */
+
+ /* CPTR_EL2 : present in v7VE or v8 */
+ if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
+ && !arm_is_secure_below_el3(env)) {
+ /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
+ return 2;
+ }
+
+ /* CPTR_EL3 : present in v8 */
+ if (extract32(env->cp15.cptr_el[3], 10, 1)) {
+ /* Trap all FP ops to EL3 */
+ return 3;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_cpu_bswap_data(CPUARMState *env)
+{
+ return
+#ifdef TARGET_WORDS_BIGENDIAN
+ 1 ^
+#endif
+ arm_cpu_data_is_big_endian(env);
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+/**
+ * arm_regime_tbi0:
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ *
+ * Extracts the TBI0 value from the appropriate TCR for the current EL
+ *
+ * Returns: the TBI0 value.
+ */
+uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx);
+
+/**
+ * arm_regime_tbi1:
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ *
+ * Extracts the TBI1 value from the appropriate TCR for the current EL
+ *
+ * Returns: the TBI1 value.
+ */
+uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx);
+#else
+/* We can't handle tagged addresses properly in user-only mode */
+static inline uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return 0;
+}
+
+static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return 0;
+}
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ ARMMMUIdx mmu_idx = cpu_mmu_index(env, false);
+ if (is_a64(env)) {
+ *pc = env->pc;
+ *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
+ /* Get control bits for tagged addresses */
+ *flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
+ *flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
+ } else {
+ *pc = env->regs[15];
+ *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
+ | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
+ | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
+ | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
+ | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
+ if (!(access_secure_reg(env))) {
+ *flags |= ARM_TBFLAG_NS_MASK;
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
+ || arm_el_is_aa64(env, 1)) {
+ *flags |= ARM_TBFLAG_VFPEN_MASK;
+ }
+ *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
+ << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
+ }
+
+ *flags |= (mmu_idx << ARM_TBFLAG_MMUIDX_SHIFT);
+
+ /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ */
+ if (arm_singlestep_active(env)) {
+ *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
+ if (is_a64(env)) {
+ if (env->pstate & PSTATE_SS) {
+ *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
+ }
+ } else {
+ if (env->uncached_cpsr & PSTATE_SS) {
+ *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
+ }
+ }
+ }
+ if (arm_cpu_data_is_big_endian(env)) {
+ *flags |= ARM_TBFLAG_BE_DATA_MASK;
+ }
+ *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
+
+ *cs_base = 0;
+}
+
+enum {
+ QEMU_PSCI_CONDUIT_DISABLED = 0,
+ QEMU_PSCI_CONDUIT_SMC = 1,
+ QEMU_PSCI_CONDUIT_HVC = 2,
+};
+
+#ifndef CONFIG_USER_ONLY
+/* Return the address space index to use for a memory access */
+static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
+{
+ return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
+}
+
+/* Return the AddressSpace to use for a memory access
+ * (which depends on whether the access is S or NS, and whether
+ * the board gave us a separate AddressSpace for S accesses).
+ */
+static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
+{
+ return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
+}
+#endif
+
+/**
+ * arm_register_el_change_hook:
+ * Register a hook function which will be called back whenever this
+ * CPU changes exception level or mode. The hook function will be
+ * passed a pointer to the ARMCPU and the opaque data pointer passed
+ * to this function when the hook was registered.
+ *
+ * Note that we currently only support registering a single hook function,
+ * and will assert if this function is called twice.
+ * This facility is intended for the use of the GICv3 emulation.
+ */
+void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook,
+ void *opaque);
+
+/**
+ * arm_get_el_change_hook_opaque:
+ * Return the opaque data that will be used by the el_change_hook
+ * for this CPU.
+ */
+static inline void *arm_get_el_change_hook_opaque(ARMCPU *cpu)
+{
+ return cpu->el_change_hook_opaque;
+}
+
+#endif
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
new file mode 100644
index 0000000000..549cb1ee93
--- /dev/null
+++ b/target/arm/cpu64.c
@@ -0,0 +1,353 @@
+/*
+ * QEMU AArch64 CPU
+ *
+ * Copyright (c) 2013 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#endif
+#include "hw/arm/arm.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+
+static inline void set_feature(CPUARMState *env, int feature)
+{
+ env->features |= 1ULL << feature;
+}
+
+static inline void unset_feature(CPUARMState *env, int feature)
+{
+ env->features &= ~(1ULL << feature);
+}
+
+#ifndef CONFIG_USER_ONLY
+static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Number of processors is in [25:24]; otherwise we RAZ */
+ return (smp_cpus - 1) << 24;
+}
+#endif
+
+static const ARMCPRegInfo cortex_a57_a53_cp_reginfo[] = {
+#ifndef CONFIG_USER_ONLY
+ { .name = "L2CTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 2,
+ .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
+ .writefn = arm_cp_write_ignore },
+ { .name = "L2CTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 2,
+ .access = PL1_RW, .readfn = a57_a53_l2ctlr_read,
+ .writefn = arm_cp_write_ignore },
+#endif
+ { .name = "L2ECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 11, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2ECTLR",
+ .cp = 15, .opc1 = 1, .crn = 9, .crm = 0, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2ACTLR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUACTLR",
+ .cp = 15, .opc1 = 0, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUECTLR",
+ .cp = 15, .opc1 = 1, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "CPUMERRSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CPUMERRSR",
+ .cp = 15, .opc1 = 2, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "L2MERRSR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 2, .opc2 = 3,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "L2MERRSR",
+ .cp = 15, .opc1 = 3, .crm = 15,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void aarch64_a57_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a57";
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_V8_AES);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
+ set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
+ set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A57;
+ cpu->midr = 0x411fd070;
+ cpu->revidr = 0x00000000;
+ cpu->reset_fpsid = 0x41034070;
+ cpu->mvfr0 = 0x10110222;
+ cpu->mvfr1 = 0x12111111;
+ cpu->mvfr2 = 0x00000043;
+ cpu->ctr = 0x8444c004;
+ cpu->reset_sctlr = 0x00c50838;
+ cpu->id_pfr0 = 0x00000131;
+ cpu->id_pfr1 = 0x00011011;
+ cpu->id_dfr0 = 0x03010066;
+ cpu->id_afr0 = 0x00000000;
+ cpu->id_mmfr0 = 0x10101105;
+ cpu->id_mmfr1 = 0x40000000;
+ cpu->id_mmfr2 = 0x01260000;
+ cpu->id_mmfr3 = 0x02102211;
+ cpu->id_isar0 = 0x02101110;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232042;
+ cpu->id_isar3 = 0x01112131;
+ cpu->id_isar4 = 0x00011142;
+ cpu->id_isar5 = 0x00011121;
+ cpu->id_aa64pfr0 = 0x00002222;
+ cpu->id_aa64dfr0 = 0x10305106;
+ cpu->pmceid0 = 0x00000000;
+ cpu->pmceid1 = 0x00000000;
+ cpu->id_aa64isar0 = 0x00011120;
+ cpu->id_aa64mmfr0 = 0x00001124;
+ cpu->dbgdidr = 0x3516d000;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
+ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
+ cpu->ccsidr[2] = 0x70ffe07a; /* 2048KB L2 cache */
+ cpu->dcz_blocksize = 4; /* 64 bytes */
+ define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
+}
+
+static void aarch64_a53_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ cpu->dtb_compatible = "arm,cortex-a53";
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
+ set_feature(&cpu->env, ARM_FEATURE_V8_AES);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
+ set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
+ set_feature(&cpu->env, ARM_FEATURE_CRC);
+ set_feature(&cpu->env, ARM_FEATURE_EL3);
+ set_feature(&cpu->env, ARM_FEATURE_PMU);
+ cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A53;
+ cpu->midr = 0x410fd034;
+ cpu->revidr = 0x00000000;
+ cpu->reset_fpsid = 0x41034070;
+ cpu->mvfr0 = 0x10110222;
+ cpu->mvfr1 = 0x12111111;
+ cpu->mvfr2 = 0x00000043;
+ cpu->ctr = 0x84448004; /* L1Ip = VIPT */
+ cpu->reset_sctlr = 0x00c50838;
+ cpu->id_pfr0 = 0x00000131;
+ cpu->id_pfr1 = 0x00011011;
+ cpu->id_dfr0 = 0x03010066;
+ cpu->id_afr0 = 0x00000000;
+ cpu->id_mmfr0 = 0x10101105;
+ cpu->id_mmfr1 = 0x40000000;
+ cpu->id_mmfr2 = 0x01260000;
+ cpu->id_mmfr3 = 0x02102211;
+ cpu->id_isar0 = 0x02101110;
+ cpu->id_isar1 = 0x13112111;
+ cpu->id_isar2 = 0x21232042;
+ cpu->id_isar3 = 0x01112131;
+ cpu->id_isar4 = 0x00011142;
+ cpu->id_isar5 = 0x00011121;
+ cpu->id_aa64pfr0 = 0x00002222;
+ cpu->id_aa64dfr0 = 0x10305106;
+ cpu->id_aa64isar0 = 0x00011120;
+ cpu->id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
+ cpu->dbgdidr = 0x3516d000;
+ cpu->clidr = 0x0a200023;
+ cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
+ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
+ cpu->ccsidr[2] = 0x707fe07a; /* 1024KB L2 cache */
+ cpu->dcz_blocksize = 4; /* 64 bytes */
+ define_arm_cp_regs(cpu, cortex_a57_a53_cp_reginfo);
+}
+
+#ifdef CONFIG_USER_ONLY
+static void aarch64_any_initfn(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ set_feature(&cpu->env, ARM_FEATURE_V8);
+ set_feature(&cpu->env, ARM_FEATURE_VFP4);
+ set_feature(&cpu->env, ARM_FEATURE_NEON);
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ set_feature(&cpu->env, ARM_FEATURE_V8_AES);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA1);
+ set_feature(&cpu->env, ARM_FEATURE_V8_SHA256);
+ set_feature(&cpu->env, ARM_FEATURE_V8_PMULL);
+ set_feature(&cpu->env, ARM_FEATURE_CRC);
+ cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
+ cpu->dcz_blocksize = 7; /* 512 bytes */
+}
+#endif
+
+typedef struct ARMCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} ARMCPUInfo;
+
+static const ARMCPUInfo aarch64_cpus[] = {
+ { .name = "cortex-a57", .initfn = aarch64_a57_initfn },
+ { .name = "cortex-a53", .initfn = aarch64_a53_initfn },
+#ifdef CONFIG_USER_ONLY
+ { .name = "any", .initfn = aarch64_any_initfn },
+#endif
+ { .name = NULL }
+};
+
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /* At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (!kvm_enabled()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled");
+ return;
+ }
+
+ if (value == false) {
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
+static void aarch64_cpu_initfn(Object *obj)
+{
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64, NULL);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ",
+ NULL);
+}
+
+static void aarch64_cpu_finalizefn(Object *obj)
+{
+}
+
+static void aarch64_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ /* It's OK to look at env for the current mode here, because it's
+ * never possible for an AArch64 TB to chain to an AArch32 TB.
+ * (Otherwise we would need to use synchronize_from_tb instead.)
+ */
+ if (is_a64(&cpu->env)) {
+ cpu->env.pc = value;
+ } else {
+ cpu->env.regs[15] = value;
+ }
+}
+
+static gchar *aarch64_gdb_arch_name(CPUState *cs)
+{
+ return g_strdup("aarch64");
+}
+
+static void aarch64_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+
+ cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
+ cc->set_pc = aarch64_cpu_set_pc;
+ cc->gdb_read_register = aarch64_cpu_gdb_read_register;
+ cc->gdb_write_register = aarch64_cpu_gdb_write_register;
+ cc->gdb_num_core_regs = 34;
+ cc->gdb_core_xml_file = "aarch64-core.xml";
+ cc->gdb_arch_name = aarch64_gdb_arch_name;
+}
+
+static void aarch64_cpu_register(const ARMCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_AARCH64_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(ARMCPUClass),
+ .class_init = info->class_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo aarch64_cpu_type_info = {
+ .name = TYPE_AARCH64_CPU,
+ .parent = TYPE_ARM_CPU,
+ .instance_size = sizeof(ARMCPU),
+ .instance_init = aarch64_cpu_initfn,
+ .instance_finalize = aarch64_cpu_finalizefn,
+ .abstract = true,
+ .class_size = sizeof(AArch64CPUClass),
+ .class_init = aarch64_cpu_class_init,
+};
+
+static void aarch64_cpu_register_types(void)
+{
+ const ARMCPUInfo *info = aarch64_cpus;
+
+ type_register_static(&aarch64_cpu_type_info);
+
+ while (info->name) {
+ aarch64_cpu_register(info);
+ info++;
+ }
+}
+
+type_init(aarch64_cpu_register_types)
diff --git a/target/arm/crypto_helper.c b/target/arm/crypto_helper.c
new file mode 100644
index 0000000000..3b6df3f41a
--- /dev/null
+++ b/target/arm/crypto_helper.c
@@ -0,0 +1,465 @@
+/*
+ * crypto_helper.c - emulate v8 Crypto Extensions instructions
+ *
+ * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "crypto/aes.h"
+
+union CRYPTO_STATE {
+ uint8_t bytes[16];
+ uint32_t words[4];
+ uint64_t l[2];
+};
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define CR_ST_BYTE(state, i) (state.bytes[(15 - (i)) ^ 8])
+#define CR_ST_WORD(state, i) (state.words[(3 - (i)) ^ 2])
+#else
+#define CR_ST_BYTE(state, i) (state.bytes[i])
+#define CR_ST_WORD(state, i) (state.words[i])
+#endif
+
+void HELPER(crypto_aese)(CPUARMState *env, uint32_t rd, uint32_t rm,
+ uint32_t decrypt)
+{
+ static uint8_t const * const sbox[2] = { AES_sbox, AES_isbox };
+ static uint8_t const * const shift[2] = { AES_shifts, AES_ishifts };
+
+ union CRYPTO_STATE rk = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+ union CRYPTO_STATE st = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ int i;
+
+ assert(decrypt < 2);
+
+ /* xor state vector with round key */
+ rk.l[0] ^= st.l[0];
+ rk.l[1] ^= st.l[1];
+
+ /* combine ShiftRows operation and sbox substitution */
+ for (i = 0; i < 16; i++) {
+ CR_ST_BYTE(st, i) = sbox[decrypt][CR_ST_BYTE(rk, shift[decrypt][i])];
+ }
+
+ env->vfp.regs[rd] = make_float64(st.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(st.l[1]);
+}
+
+void HELPER(crypto_aesmc)(CPUARMState *env, uint32_t rd, uint32_t rm,
+ uint32_t decrypt)
+{
+ static uint32_t const mc[][256] = { {
+ /* MixColumns lookup table */
+ 0x00000000, 0x03010102, 0x06020204, 0x05030306,
+ 0x0c040408, 0x0f05050a, 0x0a06060c, 0x0907070e,
+ 0x18080810, 0x1b090912, 0x1e0a0a14, 0x1d0b0b16,
+ 0x140c0c18, 0x170d0d1a, 0x120e0e1c, 0x110f0f1e,
+ 0x30101020, 0x33111122, 0x36121224, 0x35131326,
+ 0x3c141428, 0x3f15152a, 0x3a16162c, 0x3917172e,
+ 0x28181830, 0x2b191932, 0x2e1a1a34, 0x2d1b1b36,
+ 0x241c1c38, 0x271d1d3a, 0x221e1e3c, 0x211f1f3e,
+ 0x60202040, 0x63212142, 0x66222244, 0x65232346,
+ 0x6c242448, 0x6f25254a, 0x6a26264c, 0x6927274e,
+ 0x78282850, 0x7b292952, 0x7e2a2a54, 0x7d2b2b56,
+ 0x742c2c58, 0x772d2d5a, 0x722e2e5c, 0x712f2f5e,
+ 0x50303060, 0x53313162, 0x56323264, 0x55333366,
+ 0x5c343468, 0x5f35356a, 0x5a36366c, 0x5937376e,
+ 0x48383870, 0x4b393972, 0x4e3a3a74, 0x4d3b3b76,
+ 0x443c3c78, 0x473d3d7a, 0x423e3e7c, 0x413f3f7e,
+ 0xc0404080, 0xc3414182, 0xc6424284, 0xc5434386,
+ 0xcc444488, 0xcf45458a, 0xca46468c, 0xc947478e,
+ 0xd8484890, 0xdb494992, 0xde4a4a94, 0xdd4b4b96,
+ 0xd44c4c98, 0xd74d4d9a, 0xd24e4e9c, 0xd14f4f9e,
+ 0xf05050a0, 0xf35151a2, 0xf65252a4, 0xf55353a6,
+ 0xfc5454a8, 0xff5555aa, 0xfa5656ac, 0xf95757ae,
+ 0xe85858b0, 0xeb5959b2, 0xee5a5ab4, 0xed5b5bb6,
+ 0xe45c5cb8, 0xe75d5dba, 0xe25e5ebc, 0xe15f5fbe,
+ 0xa06060c0, 0xa36161c2, 0xa66262c4, 0xa56363c6,
+ 0xac6464c8, 0xaf6565ca, 0xaa6666cc, 0xa96767ce,
+ 0xb86868d0, 0xbb6969d2, 0xbe6a6ad4, 0xbd6b6bd6,
+ 0xb46c6cd8, 0xb76d6dda, 0xb26e6edc, 0xb16f6fde,
+ 0x907070e0, 0x937171e2, 0x967272e4, 0x957373e6,
+ 0x9c7474e8, 0x9f7575ea, 0x9a7676ec, 0x997777ee,
+ 0x887878f0, 0x8b7979f2, 0x8e7a7af4, 0x8d7b7bf6,
+ 0x847c7cf8, 0x877d7dfa, 0x827e7efc, 0x817f7ffe,
+ 0x9b80801b, 0x98818119, 0x9d82821f, 0x9e83831d,
+ 0x97848413, 0x94858511, 0x91868617, 0x92878715,
+ 0x8388880b, 0x80898909, 0x858a8a0f, 0x868b8b0d,
+ 0x8f8c8c03, 0x8c8d8d01, 0x898e8e07, 0x8a8f8f05,
+ 0xab90903b, 0xa8919139, 0xad92923f, 0xae93933d,
+ 0xa7949433, 0xa4959531, 0xa1969637, 0xa2979735,
+ 0xb398982b, 0xb0999929, 0xb59a9a2f, 0xb69b9b2d,
+ 0xbf9c9c23, 0xbc9d9d21, 0xb99e9e27, 0xba9f9f25,
+ 0xfba0a05b, 0xf8a1a159, 0xfda2a25f, 0xfea3a35d,
+ 0xf7a4a453, 0xf4a5a551, 0xf1a6a657, 0xf2a7a755,
+ 0xe3a8a84b, 0xe0a9a949, 0xe5aaaa4f, 0xe6abab4d,
+ 0xefacac43, 0xecadad41, 0xe9aeae47, 0xeaafaf45,
+ 0xcbb0b07b, 0xc8b1b179, 0xcdb2b27f, 0xceb3b37d,
+ 0xc7b4b473, 0xc4b5b571, 0xc1b6b677, 0xc2b7b775,
+ 0xd3b8b86b, 0xd0b9b969, 0xd5baba6f, 0xd6bbbb6d,
+ 0xdfbcbc63, 0xdcbdbd61, 0xd9bebe67, 0xdabfbf65,
+ 0x5bc0c09b, 0x58c1c199, 0x5dc2c29f, 0x5ec3c39d,
+ 0x57c4c493, 0x54c5c591, 0x51c6c697, 0x52c7c795,
+ 0x43c8c88b, 0x40c9c989, 0x45caca8f, 0x46cbcb8d,
+ 0x4fcccc83, 0x4ccdcd81, 0x49cece87, 0x4acfcf85,
+ 0x6bd0d0bb, 0x68d1d1b9, 0x6dd2d2bf, 0x6ed3d3bd,
+ 0x67d4d4b3, 0x64d5d5b1, 0x61d6d6b7, 0x62d7d7b5,
+ 0x73d8d8ab, 0x70d9d9a9, 0x75dadaaf, 0x76dbdbad,
+ 0x7fdcdca3, 0x7cdddda1, 0x79dedea7, 0x7adfdfa5,
+ 0x3be0e0db, 0x38e1e1d9, 0x3de2e2df, 0x3ee3e3dd,
+ 0x37e4e4d3, 0x34e5e5d1, 0x31e6e6d7, 0x32e7e7d5,
+ 0x23e8e8cb, 0x20e9e9c9, 0x25eaeacf, 0x26ebebcd,
+ 0x2fececc3, 0x2cededc1, 0x29eeeec7, 0x2aefefc5,
+ 0x0bf0f0fb, 0x08f1f1f9, 0x0df2f2ff, 0x0ef3f3fd,
+ 0x07f4f4f3, 0x04f5f5f1, 0x01f6f6f7, 0x02f7f7f5,
+ 0x13f8f8eb, 0x10f9f9e9, 0x15fafaef, 0x16fbfbed,
+ 0x1ffcfce3, 0x1cfdfde1, 0x19fefee7, 0x1affffe5,
+ }, {
+ /* Inverse MixColumns lookup table */
+ 0x00000000, 0x0b0d090e, 0x161a121c, 0x1d171b12,
+ 0x2c342438, 0x27392d36, 0x3a2e3624, 0x31233f2a,
+ 0x58684870, 0x5365417e, 0x4e725a6c, 0x457f5362,
+ 0x745c6c48, 0x7f516546, 0x62467e54, 0x694b775a,
+ 0xb0d090e0, 0xbbdd99ee, 0xa6ca82fc, 0xadc78bf2,
+ 0x9ce4b4d8, 0x97e9bdd6, 0x8afea6c4, 0x81f3afca,
+ 0xe8b8d890, 0xe3b5d19e, 0xfea2ca8c, 0xf5afc382,
+ 0xc48cfca8, 0xcf81f5a6, 0xd296eeb4, 0xd99be7ba,
+ 0x7bbb3bdb, 0x70b632d5, 0x6da129c7, 0x66ac20c9,
+ 0x578f1fe3, 0x5c8216ed, 0x41950dff, 0x4a9804f1,
+ 0x23d373ab, 0x28de7aa5, 0x35c961b7, 0x3ec468b9,
+ 0x0fe75793, 0x04ea5e9d, 0x19fd458f, 0x12f04c81,
+ 0xcb6bab3b, 0xc066a235, 0xdd71b927, 0xd67cb029,
+ 0xe75f8f03, 0xec52860d, 0xf1459d1f, 0xfa489411,
+ 0x9303e34b, 0x980eea45, 0x8519f157, 0x8e14f859,
+ 0xbf37c773, 0xb43ace7d, 0xa92dd56f, 0xa220dc61,
+ 0xf66d76ad, 0xfd607fa3, 0xe07764b1, 0xeb7a6dbf,
+ 0xda595295, 0xd1545b9b, 0xcc434089, 0xc74e4987,
+ 0xae053edd, 0xa50837d3, 0xb81f2cc1, 0xb31225cf,
+ 0x82311ae5, 0x893c13eb, 0x942b08f9, 0x9f2601f7,
+ 0x46bde64d, 0x4db0ef43, 0x50a7f451, 0x5baafd5f,
+ 0x6a89c275, 0x6184cb7b, 0x7c93d069, 0x779ed967,
+ 0x1ed5ae3d, 0x15d8a733, 0x08cfbc21, 0x03c2b52f,
+ 0x32e18a05, 0x39ec830b, 0x24fb9819, 0x2ff69117,
+ 0x8dd64d76, 0x86db4478, 0x9bcc5f6a, 0x90c15664,
+ 0xa1e2694e, 0xaaef6040, 0xb7f87b52, 0xbcf5725c,
+ 0xd5be0506, 0xdeb30c08, 0xc3a4171a, 0xc8a91e14,
+ 0xf98a213e, 0xf2872830, 0xef903322, 0xe49d3a2c,
+ 0x3d06dd96, 0x360bd498, 0x2b1ccf8a, 0x2011c684,
+ 0x1132f9ae, 0x1a3ff0a0, 0x0728ebb2, 0x0c25e2bc,
+ 0x656e95e6, 0x6e639ce8, 0x737487fa, 0x78798ef4,
+ 0x495ab1de, 0x4257b8d0, 0x5f40a3c2, 0x544daacc,
+ 0xf7daec41, 0xfcd7e54f, 0xe1c0fe5d, 0xeacdf753,
+ 0xdbeec879, 0xd0e3c177, 0xcdf4da65, 0xc6f9d36b,
+ 0xafb2a431, 0xa4bfad3f, 0xb9a8b62d, 0xb2a5bf23,
+ 0x83868009, 0x888b8907, 0x959c9215, 0x9e919b1b,
+ 0x470a7ca1, 0x4c0775af, 0x51106ebd, 0x5a1d67b3,
+ 0x6b3e5899, 0x60335197, 0x7d244a85, 0x7629438b,
+ 0x1f6234d1, 0x146f3ddf, 0x097826cd, 0x02752fc3,
+ 0x335610e9, 0x385b19e7, 0x254c02f5, 0x2e410bfb,
+ 0x8c61d79a, 0x876cde94, 0x9a7bc586, 0x9176cc88,
+ 0xa055f3a2, 0xab58faac, 0xb64fe1be, 0xbd42e8b0,
+ 0xd4099fea, 0xdf0496e4, 0xc2138df6, 0xc91e84f8,
+ 0xf83dbbd2, 0xf330b2dc, 0xee27a9ce, 0xe52aa0c0,
+ 0x3cb1477a, 0x37bc4e74, 0x2aab5566, 0x21a65c68,
+ 0x10856342, 0x1b886a4c, 0x069f715e, 0x0d927850,
+ 0x64d90f0a, 0x6fd40604, 0x72c31d16, 0x79ce1418,
+ 0x48ed2b32, 0x43e0223c, 0x5ef7392e, 0x55fa3020,
+ 0x01b79aec, 0x0aba93e2, 0x17ad88f0, 0x1ca081fe,
+ 0x2d83bed4, 0x268eb7da, 0x3b99acc8, 0x3094a5c6,
+ 0x59dfd29c, 0x52d2db92, 0x4fc5c080, 0x44c8c98e,
+ 0x75ebf6a4, 0x7ee6ffaa, 0x63f1e4b8, 0x68fcedb6,
+ 0xb1670a0c, 0xba6a0302, 0xa77d1810, 0xac70111e,
+ 0x9d532e34, 0x965e273a, 0x8b493c28, 0x80443526,
+ 0xe90f427c, 0xe2024b72, 0xff155060, 0xf418596e,
+ 0xc53b6644, 0xce366f4a, 0xd3217458, 0xd82c7d56,
+ 0x7a0ca137, 0x7101a839, 0x6c16b32b, 0x671bba25,
+ 0x5638850f, 0x5d358c01, 0x40229713, 0x4b2f9e1d,
+ 0x2264e947, 0x2969e049, 0x347efb5b, 0x3f73f255,
+ 0x0e50cd7f, 0x055dc471, 0x184adf63, 0x1347d66d,
+ 0xcadc31d7, 0xc1d138d9, 0xdcc623cb, 0xd7cb2ac5,
+ 0xe6e815ef, 0xede51ce1, 0xf0f207f3, 0xfbff0efd,
+ 0x92b479a7, 0x99b970a9, 0x84ae6bbb, 0x8fa362b5,
+ 0xbe805d9f, 0xb58d5491, 0xa89a4f83, 0xa397468d,
+ } };
+ union CRYPTO_STATE st = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+ int i;
+
+ assert(decrypt < 2);
+
+ for (i = 0; i < 16; i += 4) {
+ CR_ST_WORD(st, i >> 2) =
+ mc[decrypt][CR_ST_BYTE(st, i)] ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 1)], 8) ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 2)], 16) ^
+ rol32(mc[decrypt][CR_ST_BYTE(st, i + 3)], 24);
+ }
+
+ env->vfp.regs[rd] = make_float64(st.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(st.l[1]);
+}
+
+/*
+ * SHA-1 logical functions
+ */
+
+static uint32_t cho(uint32_t x, uint32_t y, uint32_t z)
+{
+ return (x & (y ^ z)) ^ z;
+}
+
+static uint32_t par(uint32_t x, uint32_t y, uint32_t z)
+{
+ return x ^ y ^ z;
+}
+
+static uint32_t maj(uint32_t x, uint32_t y, uint32_t z)
+{
+ return (x & y) | ((x | y) & z);
+}
+
+void HELPER(crypto_sha1_3reg)(CPUARMState *env, uint32_t rd, uint32_t rn,
+ uint32_t rm, uint32_t op)
+{
+ union CRYPTO_STATE d = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ union CRYPTO_STATE n = { .l = {
+ float64_val(env->vfp.regs[rn]),
+ float64_val(env->vfp.regs[rn + 1])
+ } };
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+
+ if (op == 3) { /* sha1su0 */
+ d.l[0] ^= d.l[1] ^ m.l[0];
+ d.l[1] ^= n.l[0] ^ m.l[1];
+ } else {
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t t;
+
+ switch (op) {
+ case 0: /* sha1c */
+ t = cho(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3));
+ break;
+ case 1: /* sha1p */
+ t = par(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3));
+ break;
+ case 2: /* sha1m */
+ t = maj(CR_ST_WORD(d, 1), CR_ST_WORD(d, 2), CR_ST_WORD(d, 3));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ t += rol32(CR_ST_WORD(d, 0), 5) + CR_ST_WORD(n, 0)
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3);
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = ror32(CR_ST_WORD(d, 1), 2);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = t;
+ }
+ }
+ env->vfp.regs[rd] = make_float64(d.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(d.l[1]);
+}
+
+void HELPER(crypto_sha1h)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+
+ CR_ST_WORD(m, 0) = ror32(CR_ST_WORD(m, 0), 2);
+ CR_ST_WORD(m, 1) = CR_ST_WORD(m, 2) = CR_ST_WORD(m, 3) = 0;
+
+ env->vfp.regs[rd] = make_float64(m.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(m.l[1]);
+}
+
+void HELPER(crypto_sha1su1)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ union CRYPTO_STATE d = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+
+ CR_ST_WORD(d, 0) = rol32(CR_ST_WORD(d, 0) ^ CR_ST_WORD(m, 1), 1);
+ CR_ST_WORD(d, 1) = rol32(CR_ST_WORD(d, 1) ^ CR_ST_WORD(m, 2), 1);
+ CR_ST_WORD(d, 2) = rol32(CR_ST_WORD(d, 2) ^ CR_ST_WORD(m, 3), 1);
+ CR_ST_WORD(d, 3) = rol32(CR_ST_WORD(d, 3) ^ CR_ST_WORD(d, 0), 1);
+
+ env->vfp.regs[rd] = make_float64(d.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(d.l[1]);
+}
+
+/*
+ * The SHA-256 logical functions, according to
+ * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
+ */
+
+static uint32_t S0(uint32_t x)
+{
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
+}
+
+static uint32_t S1(uint32_t x)
+{
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
+}
+
+static uint32_t s0(uint32_t x)
+{
+ return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3);
+}
+
+static uint32_t s1(uint32_t x)
+{
+ return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
+}
+
+void HELPER(crypto_sha256h)(CPUARMState *env, uint32_t rd, uint32_t rn,
+ uint32_t rm)
+{
+ union CRYPTO_STATE d = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ union CRYPTO_STATE n = { .l = {
+ float64_val(env->vfp.regs[rn]),
+ float64_val(env->vfp.regs[rn + 1])
+ } };
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t t = cho(CR_ST_WORD(n, 0), CR_ST_WORD(n, 1), CR_ST_WORD(n, 2))
+ + CR_ST_WORD(n, 3) + S1(CR_ST_WORD(n, 0))
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(n, 3) = CR_ST_WORD(n, 2);
+ CR_ST_WORD(n, 2) = CR_ST_WORD(n, 1);
+ CR_ST_WORD(n, 1) = CR_ST_WORD(n, 0);
+ CR_ST_WORD(n, 0) = CR_ST_WORD(d, 3) + t;
+
+ t += maj(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2))
+ + S0(CR_ST_WORD(d, 0));
+
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = t;
+ }
+
+ env->vfp.regs[rd] = make_float64(d.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(d.l[1]);
+}
+
+void HELPER(crypto_sha256h2)(CPUARMState *env, uint32_t rd, uint32_t rn,
+ uint32_t rm)
+{
+ union CRYPTO_STATE d = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ union CRYPTO_STATE n = { .l = {
+ float64_val(env->vfp.regs[rn]),
+ float64_val(env->vfp.regs[rn + 1])
+ } };
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ uint32_t t = cho(CR_ST_WORD(d, 0), CR_ST_WORD(d, 1), CR_ST_WORD(d, 2))
+ + CR_ST_WORD(d, 3) + S1(CR_ST_WORD(d, 0))
+ + CR_ST_WORD(m, i);
+
+ CR_ST_WORD(d, 3) = CR_ST_WORD(d, 2);
+ CR_ST_WORD(d, 2) = CR_ST_WORD(d, 1);
+ CR_ST_WORD(d, 1) = CR_ST_WORD(d, 0);
+ CR_ST_WORD(d, 0) = CR_ST_WORD(n, 3 - i) + t;
+ }
+
+ env->vfp.regs[rd] = make_float64(d.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(d.l[1]);
+}
+
+void HELPER(crypto_sha256su0)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ union CRYPTO_STATE d = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+
+ CR_ST_WORD(d, 0) += s0(CR_ST_WORD(d, 1));
+ CR_ST_WORD(d, 1) += s0(CR_ST_WORD(d, 2));
+ CR_ST_WORD(d, 2) += s0(CR_ST_WORD(d, 3));
+ CR_ST_WORD(d, 3) += s0(CR_ST_WORD(m, 0));
+
+ env->vfp.regs[rd] = make_float64(d.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(d.l[1]);
+}
+
+void HELPER(crypto_sha256su1)(CPUARMState *env, uint32_t rd, uint32_t rn,
+ uint32_t rm)
+{
+ union CRYPTO_STATE d = { .l = {
+ float64_val(env->vfp.regs[rd]),
+ float64_val(env->vfp.regs[rd + 1])
+ } };
+ union CRYPTO_STATE n = { .l = {
+ float64_val(env->vfp.regs[rn]),
+ float64_val(env->vfp.regs[rn + 1])
+ } };
+ union CRYPTO_STATE m = { .l = {
+ float64_val(env->vfp.regs[rm]),
+ float64_val(env->vfp.regs[rm + 1])
+ } };
+
+ CR_ST_WORD(d, 0) += s1(CR_ST_WORD(m, 2)) + CR_ST_WORD(n, 1);
+ CR_ST_WORD(d, 1) += s1(CR_ST_WORD(m, 3)) + CR_ST_WORD(n, 2);
+ CR_ST_WORD(d, 2) += s1(CR_ST_WORD(d, 0)) + CR_ST_WORD(n, 3);
+ CR_ST_WORD(d, 3) += s1(CR_ST_WORD(d, 1)) + CR_ST_WORD(m, 0);
+
+ env->vfp.regs[rd] = make_float64(d.l[0]);
+ env->vfp.regs[rd + 1] = make_float64(d.l[1]);
+}
diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c
new file mode 100644
index 0000000000..04c1208d03
--- /dev/null
+++ b/target/arm/gdbstub.c
@@ -0,0 +1,103 @@
+/*
+ * ARM gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+/* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
+ whatever the target description contains. Due to a historical mishap
+ the FPA registers appear in between core integer regs and the CPSR.
+ We hack round this by giving the FPA regs zero size when talking to a
+ newer gdb. */
+
+int arm_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (n < 16) {
+ /* Core integer register. */
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ }
+ if (n < 24) {
+ /* FPA registers. */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ memset(mem_buf, 0, 12);
+ return 12;
+ }
+ switch (n) {
+ case 24:
+ /* FPA status register. */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return gdb_get_reg32(mem_buf, 0);
+ case 25:
+ /* CPSR */
+ return gdb_get_reg32(mem_buf, cpsr_read(env));
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t tmp;
+
+ tmp = ldl_p(mem_buf);
+
+ /* Mask out low bit of PC to workaround gdb bugs. This will probably
+ cause problems if we ever implement the Jazelle DBX extensions. */
+ if (n == 15) {
+ tmp &= ~1;
+ }
+
+ if (n < 16) {
+ /* Core integer register. */
+ env->regs[n] = tmp;
+ return 4;
+ }
+ if (n < 24) { /* 16-23 */
+ /* FPA registers (ignored). */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return 12;
+ }
+ switch (n) {
+ case 24:
+ /* FPA status register (ignored). */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return 4;
+ case 25:
+ /* CPSR */
+ cpsr_write(env, tmp, 0xffffffff, CPSRWriteByGDBStub);
+ return 4;
+ }
+ /* Unknown register. */
+ return 0;
+}
diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c
new file mode 100644
index 0000000000..49bc3fc521
--- /dev/null
+++ b/target/arm/gdbstub64.c
@@ -0,0 +1,72 @@
+/*
+ * ARM gdb server stub: AArch64 specific functions.
+ *
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int aarch64_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (n < 31) {
+ /* Core integer register. */
+ return gdb_get_reg64(mem_buf, env->xregs[n]);
+ }
+ switch (n) {
+ case 31:
+ return gdb_get_reg64(mem_buf, env->xregs[31]);
+ case 32:
+ return gdb_get_reg64(mem_buf, env->pc);
+ case 33:
+ return gdb_get_reg32(mem_buf, pstate_read(env));
+ }
+ /* Unknown register. */
+ return 0;
+}
+
+int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t tmp;
+
+ tmp = ldq_p(mem_buf);
+
+ if (n < 31) {
+ /* Core integer register. */
+ env->xregs[n] = tmp;
+ return 8;
+ }
+ switch (n) {
+ case 31:
+ env->xregs[31] = tmp;
+ return 8;
+ case 32:
+ env->pc = tmp;
+ return 8;
+ case 33:
+ /* CPSR */
+ pstate_write(env, tmp);
+ return 4;
+ }
+ /* Unknown register. */
+ return 0;
+}
diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c
new file mode 100644
index 0000000000..98b97df461
--- /dev/null
+++ b/target/arm/helper-a64.c
@@ -0,0 +1,559 @@
+/*
+ * AArch64 specific helpers
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "qemu/bitops.h"
+#include "internals.h"
+#include "qemu/crc32c.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/int128.h"
+#include "tcg.h"
+#include <zlib.h> /* For crc32 */
+
+/* C2.4.7 Multiply and divide */
+/* special cases for 0 and LLONG_MIN are mandated by the standard */
+uint64_t HELPER(udiv64)(uint64_t num, uint64_t den)
+{
+ if (den == 0) {
+ return 0;
+ }
+ return num / den;
+}
+
+int64_t HELPER(sdiv64)(int64_t num, int64_t den)
+{
+ if (den == 0) {
+ return 0;
+ }
+ if (num == LLONG_MIN && den == -1) {
+ return LLONG_MIN;
+ }
+ return num / den;
+}
+
+uint64_t HELPER(clz64)(uint64_t x)
+{
+ return clz64(x);
+}
+
+uint64_t HELPER(cls64)(uint64_t x)
+{
+ return clrsb64(x);
+}
+
+uint32_t HELPER(cls32)(uint32_t x)
+{
+ return clrsb32(x);
+}
+
+uint32_t HELPER(clz32)(uint32_t x)
+{
+ return clz32(x);
+}
+
+uint64_t HELPER(rbit64)(uint64_t x)
+{
+ return revbit64(x);
+}
+
+/* Convert a softfloat float_relation_ (as returned by
+ * the float*_compare functions) to the correct ARM
+ * NZCV flag state.
+ */
+static inline uint32_t float_rel_to_flags(int res)
+{
+ uint64_t flags;
+ switch (res) {
+ case float_relation_equal:
+ flags = PSTATE_Z | PSTATE_C;
+ break;
+ case float_relation_less:
+ flags = PSTATE_N;
+ break;
+ case float_relation_greater:
+ flags = PSTATE_C;
+ break;
+ case float_relation_unordered:
+ default:
+ flags = PSTATE_C | PSTATE_V;
+ break;
+ }
+ return flags;
+}
+
+uint64_t HELPER(vfp_cmps_a64)(float32 x, float32 y, void *fp_status)
+{
+ return float_rel_to_flags(float32_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpes_a64)(float32 x, float32 y, void *fp_status)
+{
+ return float_rel_to_flags(float32_compare(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmpd_a64)(float64 x, float64 y, void *fp_status)
+{
+ return float_rel_to_flags(float64_compare_quiet(x, y, fp_status));
+}
+
+uint64_t HELPER(vfp_cmped_a64)(float64 x, float64 y, void *fp_status)
+{
+ return float_rel_to_flags(float64_compare(x, y, fp_status));
+}
+
+float32 HELPER(vfp_mulxs)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
+ if ((float32_is_zero(a) && float32_is_infinity(b)) ||
+ (float32_is_infinity(a) && float32_is_zero(b))) {
+ /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
+ return make_float32((1U << 30) |
+ ((float32_val(a) ^ float32_val(b)) & (1U << 31)));
+ }
+ return float32_mul(a, b, fpst);
+}
+
+float64 HELPER(vfp_mulxd)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
+ if ((float64_is_zero(a) && float64_is_infinity(b)) ||
+ (float64_is_infinity(a) && float64_is_zero(b))) {
+ /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
+ return make_float64((1ULL << 62) |
+ ((float64_val(a) ^ float64_val(b)) & (1ULL << 63)));
+ }
+ return float64_mul(a, b, fpst);
+}
+
+uint64_t HELPER(simd_tbl)(CPUARMState *env, uint64_t result, uint64_t indices,
+ uint32_t rn, uint32_t numregs)
+{
+ /* Helper function for SIMD TBL and TBX. We have to do the table
+ * lookup part for the 64 bits worth of indices we're passed in.
+ * result is the initial results vector (either zeroes for TBL
+ * or some guest values for TBX), rn the register number where
+ * the table starts, and numregs the number of registers in the table.
+ * We return the results of the lookups.
+ */
+ int shift;
+
+ for (shift = 0; shift < 64; shift += 8) {
+ int index = extract64(indices, shift, 8);
+ if (index < 16 * numregs) {
+ /* Convert index (a byte offset into the virtual table
+ * which is a series of 128-bit vectors concatenated)
+ * into the correct vfp.regs[] element plus a bit offset
+ * into that element, bearing in mind that the table
+ * can wrap around from V31 to V0.
+ */
+ int elt = (rn * 2 + (index >> 3)) % 64;
+ int bitidx = (index & 7) * 8;
+ uint64_t val = extract64(env->vfp.regs[elt], bitidx, 8);
+
+ result = deposit64(result, shift, 8, val);
+ }
+ }
+ return result;
+}
+
+/* 64bit/double versions of the neon float compare functions */
+uint64_t HELPER(neon_ceq_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float64_eq_quiet(a, b, fpst);
+}
+
+uint64_t HELPER(neon_cge_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float64_le(b, a, fpst);
+}
+
+uint64_t HELPER(neon_cgt_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float64_lt(b, a, fpst);
+}
+
+/* Reciprocal step and sqrt step. Note that unlike the A32/T32
+ * versions, these do a fully fused multiply-add or
+ * multiply-add-and-halve.
+ */
+#define float32_two make_float32(0x40000000)
+#define float32_three make_float32(0x40400000)
+#define float32_one_point_five make_float32(0x3fc00000)
+
+#define float64_two make_float64(0x4000000000000000ULL)
+#define float64_three make_float64(0x4008000000000000ULL)
+#define float64_one_point_five make_float64(0x3FF8000000000000ULL)
+
+float32 HELPER(recpsf_f32)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
+ a = float32_chs(a);
+ if ((float32_is_infinity(a) && float32_is_zero(b)) ||
+ (float32_is_infinity(b) && float32_is_zero(a))) {
+ return float32_two;
+ }
+ return float32_muladd(a, b, float32_two, 0, fpst);
+}
+
+float64 HELPER(recpsf_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
+ a = float64_chs(a);
+ if ((float64_is_infinity(a) && float64_is_zero(b)) ||
+ (float64_is_infinity(b) && float64_is_zero(a))) {
+ return float64_two;
+ }
+ return float64_muladd(a, b, float64_two, 0, fpst);
+}
+
+float32 HELPER(rsqrtsf_f32)(float32 a, float32 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float32_squash_input_denormal(a, fpst);
+ b = float32_squash_input_denormal(b, fpst);
+
+ a = float32_chs(a);
+ if ((float32_is_infinity(a) && float32_is_zero(b)) ||
+ (float32_is_infinity(b) && float32_is_zero(a))) {
+ return float32_one_point_five;
+ }
+ return float32_muladd(a, b, float32_three, float_muladd_halve_result, fpst);
+}
+
+float64 HELPER(rsqrtsf_f64)(float64 a, float64 b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+
+ a = float64_squash_input_denormal(a, fpst);
+ b = float64_squash_input_denormal(b, fpst);
+
+ a = float64_chs(a);
+ if ((float64_is_infinity(a) && float64_is_zero(b)) ||
+ (float64_is_infinity(b) && float64_is_zero(a))) {
+ return float64_one_point_five;
+ }
+ return float64_muladd(a, b, float64_three, float_muladd_halve_result, fpst);
+}
+
+/* Pairwise long add: add pairs of adjacent elements into
+ * double-width elements in the result (eg _s8 is an 8x8->16 op)
+ */
+uint64_t HELPER(neon_addlp_s8)(uint64_t a)
+{
+ uint64_t nsignmask = 0x0080008000800080ULL;
+ uint64_t wsignmask = 0x8000800080008000ULL;
+ uint64_t elementmask = 0x00ff00ff00ff00ffULL;
+ uint64_t tmp1, tmp2;
+ uint64_t res, signres;
+
+ /* Extract odd elements, sign extend each to a 16 bit field */
+ tmp1 = a & elementmask;
+ tmp1 ^= nsignmask;
+ tmp1 |= wsignmask;
+ tmp1 = (tmp1 - nsignmask) ^ wsignmask;
+ /* Ditto for the even elements */
+ tmp2 = (a >> 8) & elementmask;
+ tmp2 ^= nsignmask;
+ tmp2 |= wsignmask;
+ tmp2 = (tmp2 - nsignmask) ^ wsignmask;
+
+ /* calculate the result by summing bits 0..14, 16..22, etc,
+ * and then adjusting the sign bits 15, 23, etc manually.
+ * This ensures the addition can't overflow the 16 bit field.
+ */
+ signres = (tmp1 ^ tmp2) & wsignmask;
+ res = (tmp1 & ~wsignmask) + (tmp2 & ~wsignmask);
+ res ^= signres;
+
+ return res;
+}
+
+uint64_t HELPER(neon_addlp_u8)(uint64_t a)
+{
+ uint64_t tmp;
+
+ tmp = a & 0x00ff00ff00ff00ffULL;
+ tmp += (a >> 8) & 0x00ff00ff00ff00ffULL;
+ return tmp;
+}
+
+uint64_t HELPER(neon_addlp_s16)(uint64_t a)
+{
+ int32_t reslo, reshi;
+
+ reslo = (int32_t)(int16_t)a + (int32_t)(int16_t)(a >> 16);
+ reshi = (int32_t)(int16_t)(a >> 32) + (int32_t)(int16_t)(a >> 48);
+
+ return (uint32_t)reslo | (((uint64_t)reshi) << 32);
+}
+
+uint64_t HELPER(neon_addlp_u16)(uint64_t a)
+{
+ uint64_t tmp;
+
+ tmp = a & 0x0000ffff0000ffffULL;
+ tmp += (a >> 16) & 0x0000ffff0000ffffULL;
+ return tmp;
+}
+
+/* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
+float32 HELPER(frecpx_f32)(float32 a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint32_t val32, sbit;
+ int32_t exp;
+
+ if (float32_is_any_nan(a)) {
+ float32 nan = a;
+ if (float32_is_signaling_nan(a, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float32_maybe_silence_nan(a, fpst);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan(fpst);
+ }
+ return nan;
+ }
+
+ val32 = float32_val(a);
+ sbit = 0x80000000ULL & val32;
+ exp = extract32(val32, 23, 8);
+
+ if (exp == 0) {
+ return make_float32(sbit | (0xfe << 23));
+ } else {
+ return make_float32(sbit | (~exp & 0xff) << 23);
+ }
+}
+
+float64 HELPER(frecpx_f64)(float64 a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ uint64_t val64, sbit;
+ int64_t exp;
+
+ if (float64_is_any_nan(a)) {
+ float64 nan = a;
+ if (float64_is_signaling_nan(a, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float64_maybe_silence_nan(a, fpst);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan(fpst);
+ }
+ return nan;
+ }
+
+ val64 = float64_val(a);
+ sbit = 0x8000000000000000ULL & val64;
+ exp = extract64(float64_val(a), 52, 11);
+
+ if (exp == 0) {
+ return make_float64(sbit | (0x7feULL << 52));
+ } else {
+ return make_float64(sbit | (~exp & 0x7ffULL) << 52);
+ }
+}
+
+float32 HELPER(fcvtx_f64_to_f32)(float64 a, CPUARMState *env)
+{
+ /* Von Neumann rounding is implemented by using round-to-zero
+ * and then setting the LSB of the result if Inexact was raised.
+ */
+ float32 r;
+ float_status *fpst = &env->vfp.fp_status;
+ float_status tstat = *fpst;
+ int exflags;
+
+ set_float_rounding_mode(float_round_to_zero, &tstat);
+ set_float_exception_flags(0, &tstat);
+ r = float64_to_float32(a, &tstat);
+ r = float32_maybe_silence_nan(r, &tstat);
+ exflags = get_float_exception_flags(&tstat);
+ if (exflags & float_flag_inexact) {
+ r = make_float32(float32_val(r) | 1);
+ }
+ exflags |= get_float_exception_flags(fpst);
+ set_float_exception_flags(exflags, fpst);
+ return r;
+}
+
+/* 64-bit versions of the CRC helpers. Note that although the operation
+ * (and the prototypes of crc32c() and crc32() mean that only the bottom
+ * 32 bits of the accumulator and result are used, we pass and return
+ * uint64_t for convenience of the generated code. Unlike the 32-bit
+ * instruction set versions, val may genuinely have 64 bits of data in it.
+ * The upper bytes of val (above the number specified by 'bytes') must have
+ * been zeroed out by the caller.
+ */
+uint64_t HELPER(crc32_64)(uint64_t acc, uint64_t val, uint32_t bytes)
+{
+ uint8_t buf[8];
+
+ stq_le_p(buf, val);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes)
+{
+ uint8_t buf[8];
+
+ stq_le_p(buf, val);
+
+ /* Linux crc32c converts the output to one's complement. */
+ return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}
+
+/* Returns 0 on success; 1 otherwise. */
+uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ uintptr_t ra = GETPC();
+ Int128 oldv, cmpv, newv;
+ bool success;
+
+ cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
+ newv = int128_make128(new_lo, new_hi);
+
+ if (parallel_cpus) {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
+ oldv = helper_atomic_cmpxchgo_le_mmu(env, addr, cmpv, newv, oi, ra);
+ success = int128_eq(oldv, cmpv);
+#endif
+ } else {
+ uint64_t o0, o1;
+
+#ifdef CONFIG_USER_ONLY
+ /* ??? Enforce alignment. */
+ uint64_t *haddr = g2h(addr);
+ o0 = ldq_le_p(haddr + 0);
+ o1 = ldq_le_p(haddr + 1);
+ oldv = int128_make128(o0, o1);
+
+ success = int128_eq(oldv, cmpv);
+ if (success) {
+ stq_le_p(haddr + 0, int128_getlo(newv));
+ stq_le_p(haddr + 1, int128_gethi(newv));
+ }
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx);
+ TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx);
+
+ o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra);
+ o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra);
+ oldv = int128_make128(o0, o1);
+
+ success = int128_eq(oldv, cmpv);
+ if (success) {
+ helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra);
+ helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra);
+ }
+#endif
+ }
+
+ return !success;
+}
+
+uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr,
+ uint64_t new_lo, uint64_t new_hi)
+{
+ uintptr_t ra = GETPC();
+ Int128 oldv, cmpv, newv;
+ bool success;
+
+ cmpv = int128_make128(env->exclusive_val, env->exclusive_high);
+ newv = int128_make128(new_lo, new_hi);
+
+ if (parallel_cpus) {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
+ oldv = helper_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra);
+ success = int128_eq(oldv, cmpv);
+#endif
+ } else {
+ uint64_t o0, o1;
+
+#ifdef CONFIG_USER_ONLY
+ /* ??? Enforce alignment. */
+ uint64_t *haddr = g2h(addr);
+ o1 = ldq_be_p(haddr + 0);
+ o0 = ldq_be_p(haddr + 1);
+ oldv = int128_make128(o0, o1);
+
+ success = int128_eq(oldv, cmpv);
+ if (success) {
+ stq_be_p(haddr + 0, int128_gethi(newv));
+ stq_be_p(haddr + 1, int128_getlo(newv));
+ }
+#else
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx);
+ TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx);
+
+ o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra);
+ o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra);
+ oldv = int128_make128(o0, o1);
+
+ success = int128_eq(oldv, cmpv);
+ if (success) {
+ helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra);
+ helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra);
+ }
+#endif
+ }
+
+ return !success;
+}
diff --git a/target/arm/helper-a64.h b/target/arm/helper-a64.h
new file mode 100644
index 0000000000..dd32000e63
--- /dev/null
+++ b/target/arm/helper-a64.h
@@ -0,0 +1,50 @@
+/*
+ * AArch64 specific helper definitions
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+DEF_HELPER_FLAGS_2(udiv64, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sdiv64, TCG_CALL_NO_RWG_SE, s64, s64, s64)
+DEF_HELPER_FLAGS_1(clz64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(cls64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(cls32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clz32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(rbit64, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_3(vfp_cmps_a64, i64, f32, f32, ptr)
+DEF_HELPER_3(vfp_cmpes_a64, i64, f32, f32, ptr)
+DEF_HELPER_3(vfp_cmpd_a64, i64, f64, f64, ptr)
+DEF_HELPER_3(vfp_cmped_a64, i64, f64, f64, ptr)
+DEF_HELPER_FLAGS_5(simd_tbl, TCG_CALL_NO_RWG_SE, i64, env, i64, i64, i32, i32)
+DEF_HELPER_FLAGS_3(vfp_mulxs, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
+DEF_HELPER_FLAGS_3(vfp_mulxd, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_3(neon_ceq_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
+DEF_HELPER_FLAGS_3(neon_cge_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
+DEF_HELPER_FLAGS_3(neon_cgt_f64, TCG_CALL_NO_RWG, i64, i64, i64, ptr)
+DEF_HELPER_FLAGS_3(recpsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
+DEF_HELPER_FLAGS_3(recpsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_3(rsqrtsf_f32, TCG_CALL_NO_RWG, f32, f32, f32, ptr)
+DEF_HELPER_FLAGS_3(rsqrtsf_f64, TCG_CALL_NO_RWG, f64, f64, f64, ptr)
+DEF_HELPER_FLAGS_1(neon_addlp_s8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_u8, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_s16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(neon_addlp_u16, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(frecpx_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(frecpx_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(fcvtx_f64_to_f32, TCG_CALL_NO_RWG, f32, f64, env)
+DEF_HELPER_FLAGS_3(crc32_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_3(crc32c_64, TCG_CALL_NO_RWG_SE, i64, i64, i64, i32)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_le, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(paired_cmpxchg64_be, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
diff --git a/target/arm/helper.c b/target/arm/helper.c
new file mode 100644
index 0000000000..b5b65caadf
--- /dev/null
+++ b/target/arm/helper.c
@@ -0,0 +1,9623 @@
+#include "qemu/osdep.h"
+#include "trace.h"
+#include "cpu.h"
+#include "internals.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "sysemu/arch_init.h"
+#include "sysemu/sysemu.h"
+#include "qemu/bitops.h"
+#include "qemu/crc32c.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "arm_ldst.h"
+#include <zlib.h> /* For crc32 */
+#include "exec/semihost.h"
+#include "sysemu/kvm.h"
+
+#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+
+#ifndef CONFIG_USER_ONLY
+static bool get_phys_addr(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi);
+
+static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
+ target_ulong *page_size_ptr, uint32_t *fsr,
+ ARMMMUFaultInfo *fi);
+
+/* Definitions for the PMCCNTR and PMCR registers */
+#define PMCRD 0x8
+#define PMCRC 0x4
+#define PMCRE 0x1
+#endif
+
+static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ int nregs;
+
+ /* VFP data registers are always little-endian. */
+ nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
+ if (reg < nregs) {
+ stfq_le_p(buf, env->vfp.regs[reg]);
+ return 8;
+ }
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ /* Aliases for Q regs. */
+ nregs += 16;
+ if (reg < nregs) {
+ stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
+ stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
+ return 16;
+ }
+ }
+ switch (reg - nregs) {
+ case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
+ case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
+ case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
+ }
+ return 0;
+}
+
+static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ int nregs;
+
+ nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
+ if (reg < nregs) {
+ env->vfp.regs[reg] = ldfq_le_p(buf);
+ return 8;
+ }
+ if (arm_feature(env, ARM_FEATURE_NEON)) {
+ nregs += 16;
+ if (reg < nregs) {
+ env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
+ env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
+ return 16;
+ }
+ }
+ switch (reg - nregs) {
+ case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
+ case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
+ case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4;
+ }
+ return 0;
+}
+
+static int aarch64_fpu_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0 ... 31:
+ /* 128 bit FP register */
+ stfq_le_p(buf, env->vfp.regs[reg * 2]);
+ stfq_le_p(buf + 8, env->vfp.regs[reg * 2 + 1]);
+ return 16;
+ case 32:
+ /* FPSR */
+ stl_p(buf, vfp_get_fpsr(env));
+ return 4;
+ case 33:
+ /* FPCR */
+ stl_p(buf, vfp_get_fpcr(env));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg)
+{
+ switch (reg) {
+ case 0 ... 31:
+ /* 128 bit FP register */
+ env->vfp.regs[reg * 2] = ldfq_le_p(buf);
+ env->vfp.regs[reg * 2 + 1] = ldfq_le_p(buf + 8);
+ return 16;
+ case 32:
+ /* FPSR */
+ vfp_set_fpsr(env, ldl_p(buf));
+ return 4;
+ case 33:
+ /* FPCR */
+ vfp_set_fpcr(env, ldl_p(buf));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ assert(ri->fieldoffset);
+ if (cpreg_field_is_64bit(ri)) {
+ return CPREG_FIELD64(env, ri);
+ } else {
+ return CPREG_FIELD32(env, ri);
+ }
+}
+
+static void raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ assert(ri->fieldoffset);
+ if (cpreg_field_is_64bit(ri)) {
+ CPREG_FIELD64(env, ri) = value;
+ } else {
+ CPREG_FIELD32(env, ri) = value;
+ }
+}
+
+static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return (char *)env + ri->fieldoffset;
+}
+
+uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Raw read of a coprocessor register (as needed for migration, etc). */
+ if (ri->type & ARM_CP_CONST) {
+ return ri->resetvalue;
+ } else if (ri->raw_readfn) {
+ return ri->raw_readfn(env, ri);
+ } else if (ri->readfn) {
+ return ri->readfn(env, ri);
+ } else {
+ return raw_read(env, ri);
+ }
+}
+
+static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t v)
+{
+ /* Raw write of a coprocessor register (as needed for migration, etc).
+ * Note that constant registers are treated as write-ignored; the
+ * caller should check for success by whether a readback gives the
+ * value written.
+ */
+ if (ri->type & ARM_CP_CONST) {
+ return;
+ } else if (ri->raw_writefn) {
+ ri->raw_writefn(env, ri, v);
+ } else if (ri->writefn) {
+ ri->writefn(env, ri, v);
+ } else {
+ raw_write(env, ri, v);
+ }
+}
+
+static bool raw_accessors_invalid(const ARMCPRegInfo *ri)
+{
+ /* Return true if the regdef would cause an assertion if you called
+ * read_raw_cp_reg() or write_raw_cp_reg() on it (ie if it is a
+ * program bug for it not to have the NO_RAW flag).
+ * NB that returning false here doesn't necessarily mean that calling
+ * read/write_raw_cp_reg() is safe, because we can't distinguish "has
+ * read/write access functions which are safe for raw use" from "has
+ * read/write access functions which have side effects but has forgotten
+ * to provide raw access functions".
+ * The tests here line up with the conditions in read/write_raw_cp_reg()
+ * and assertions in raw_read()/raw_write().
+ */
+ if ((ri->type & ARM_CP_CONST) ||
+ ri->fieldoffset ||
+ ((ri->raw_writefn || ri->writefn) && (ri->raw_readfn || ri->readfn))) {
+ return false;
+ }
+ return true;
+}
+
+bool write_cpustate_to_list(ARMCPU *cpu)
+{
+ /* Write the coprocessor state from cpu->env to the (index,value) list. */
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
+ const ARMCPRegInfo *ri;
+
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+ if (!ri) {
+ ok = false;
+ continue;
+ }
+ if (ri->type & ARM_CP_NO_RAW) {
+ continue;
+ }
+ cpu->cpreg_values[i] = read_raw_cp_reg(&cpu->env, ri);
+ }
+ return ok;
+}
+
+bool write_list_to_cpustate(ARMCPU *cpu)
+{
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ uint32_t regidx = kvm_to_cpreg_id(cpu->cpreg_indexes[i]);
+ uint64_t v = cpu->cpreg_values[i];
+ const ARMCPRegInfo *ri;
+
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+ if (!ri) {
+ ok = false;
+ continue;
+ }
+ if (ri->type & ARM_CP_NO_RAW) {
+ continue;
+ }
+ /* Write value and confirm it reads back as written
+ * (to catch read-only registers and partially read-only
+ * registers where the incoming migration value doesn't match)
+ */
+ write_raw_cp_reg(&cpu->env, ri, v);
+ if (read_raw_cp_reg(&cpu->env, ri) != v) {
+ ok = false;
+ }
+ }
+ return ok;
+}
+
+static void add_cpreg_to_list(gpointer key, gpointer opaque)
+{
+ ARMCPU *cpu = opaque;
+ uint64_t regidx;
+ const ARMCPRegInfo *ri;
+
+ regidx = *(uint32_t *)key;
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
+ cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
+ /* The value array need not be initialized at this point */
+ cpu->cpreg_array_len++;
+ }
+}
+
+static void count_cpreg(gpointer key, gpointer opaque)
+{
+ ARMCPU *cpu = opaque;
+ uint64_t regidx;
+ const ARMCPRegInfo *ri;
+
+ regidx = *(uint32_t *)key;
+ ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+
+ if (!(ri->type & (ARM_CP_NO_RAW|ARM_CP_ALIAS))) {
+ cpu->cpreg_array_len++;
+ }
+}
+
+static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
+{
+ uint64_t aidx = cpreg_to_kvm_id(*(uint32_t *)a);
+ uint64_t bidx = cpreg_to_kvm_id(*(uint32_t *)b);
+
+ if (aidx > bidx) {
+ return 1;
+ }
+ if (aidx < bidx) {
+ return -1;
+ }
+ return 0;
+}
+
+void init_cpreg_list(ARMCPU *cpu)
+{
+ /* Initialise the cpreg_tuples[] array based on the cp_regs hash.
+ * Note that we require cpreg_tuples[] to be sorted by key ID.
+ */
+ GList *keys;
+ int arraylen;
+
+ keys = g_hash_table_get_keys(cpu->cp_regs);
+ keys = g_list_sort(keys, cpreg_key_compare);
+
+ cpu->cpreg_array_len = 0;
+
+ g_list_foreach(keys, count_cpreg, cpu);
+
+ arraylen = cpu->cpreg_array_len;
+ cpu->cpreg_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_values = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
+ cpu->cpreg_array_len = 0;
+
+ g_list_foreach(keys, add_cpreg_to_list, cpu);
+
+ assert(cpu->cpreg_array_len == arraylen);
+
+ g_list_free(keys);
+}
+
+/*
+ * Some registers are not accessible if EL3.NS=0 and EL3 is using AArch32 but
+ * they are accessible when EL3 is using AArch64 regardless of EL3.NS.
+ *
+ * access_el3_aa32ns: Used to check AArch32 register views.
+ * access_el3_aa32ns_aa64any: Used to check both AArch32/64 register views.
+ */
+static CPAccessResult access_el3_aa32ns(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ bool secure = arm_is_secure_below_el3(env);
+
+ assert(!arm_el_is_aa64(env, 3));
+ if (secure) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_el3_aa32ns_aa64any(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (!arm_el_is_aa64(env, 3)) {
+ return access_el3_aa32ns(env, ri, isread);
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Some secure-only AArch32 registers trap to EL3 if used from
+ * Secure EL1 (but are just ordinary UNDEF in other non-EL3 contexts).
+ * Note that an access from Secure EL1 can only happen if EL3 is AArch64.
+ * We assume that the .access field is set to PL1_RW.
+ */
+static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3) {
+ return CP_ACCESS_OK;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ /* This will be EL1 NS and EL2 NS, which just UNDEF */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
+/* Check for traps to "powerdown debug" registers, which are controlled
+ * by MDCR.TDOSA
+ */
+static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to "debug ROM" registers, which are controlled
+ * by MDCR_EL2.TDRA for EL2 but by the more general MDCR_EL3.TDA for EL3.
+ */
+static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to general debug registers, which are controlled
+ * by MDCR_EL2.TDA for EL2 and MDCR_EL3.TDA for EL3.
+ */
+static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* Check for traps to performance monitor registers, which are controlled
+ * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
+ */
+static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ raw_write(env, ri, value);
+ tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
+}
+
+static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (raw_read(env, ri) != value) {
+ /* Unlike real hardware the qemu TLB uses virtual addresses,
+ * not modified virtual addresses, so this causes a TLB flush.
+ */
+ tlb_flush(CPU(cpu), 1);
+ raw_write(env, ri, value);
+ }
+}
+
+static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_MPU)
+ && !extended_addresses_enabled(env)) {
+ /* For VMSA (when not using the LPAE long descriptor page table
+ * format) this register includes the ASID, so do a TLB flush.
+ * For PMSA it is purely a process ID and no action is needed.
+ */
+ tlb_flush(CPU(cpu), 1);
+ }
+ raw_write(env, ri, value);
+}
+
+static void tlbiall_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate all (TLBIALL) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
+}
+
+static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
+}
+
+static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by ASID (TLBIASID) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), value == 0);
+}
+
+static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
+}
+
+/* IS variants of TLB operations must affect all cores */
+static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, 1);
+ }
+}
+
+static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush(other_cs, value == 0);
+ }
+}
+
+static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
+ }
+}
+
+static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page(other_cs, value & TARGET_PAGE_MASK);
+ }
+}
+
+static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+ ARMMMUIdx_S2NS, -1);
+}
+
+static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
+ }
+}
+
+static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by IPA. This has to invalidate any structures that
+ * contain only stage 2 translation information, but does not need
+ * to apply to structures that contain combined stage 1 and stage 2
+ * translation information.
+ * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
+ */
+ CPUState *cs = ENV_GET_CPU(env);
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 40);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
+}
+
+static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 40);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
+ }
+}
+
+static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
+}
+
+static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
+ }
+}
+
+static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
+}
+
+static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
+ }
+}
+
+static const ARMCPRegInfo cp_reginfo[] = {
+ /* Define the secure and non-secure FCSE identifier CP registers
+ * separately because there is no secure bank in V8 (no _EL3). This allows
+ * the secure register to be properly reset and migrated. There is also no
+ * v8 EL1 version of the register so the non-secure instance stands alone.
+ */
+ { .name = "FCSEIDR(NS)",
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
+ .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_ns),
+ .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
+ { .name = "FCSEIDR(S)",
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
+ .fieldoffset = offsetof(CPUARMState, cp15.fcseidr_s),
+ .resetvalue = 0, .writefn = fcse_write, .raw_writefn = raw_write, },
+ /* Define the secure and non-secure context identifier CP registers
+ * separately because there is no secure bank in V8 (no _EL3). This allows
+ * the secure register to be properly reset and migrated. In the
+ * non-secure case, the 32-bit register will have reset and migration
+ * disabled during registration as it is handled by the 64-bit instance.
+ */
+ { .name = "CONTEXTIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_NS,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
+ .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
+ { .name = "CONTEXTIDR(S)", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 13, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .secure = ARM_CP_SECSTATE_S,
+ .fieldoffset = offsetof(CPUARMState, cp15.contextidr_s),
+ .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v8_cp_reginfo[] = {
+ /* NB: Some of these registers exist in v8 but with more precise
+ * definitions that don't use CP_ANY wildcards (mostly in v8_cp_reginfo[]).
+ */
+ /* MMU Domain access control / MPU write buffer control */
+ { .name = "DACR",
+ .cp = 15, .opc1 = CP_ANY, .crn = 3, .crm = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
+ offsetoflow32(CPUARMState, cp15.dacr_ns) } },
+ /* ARMv7 allocates a range of implementation defined TLB LOCKDOWN regs.
+ * For v6 and v5, these mappings are overly broad.
+ */
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 0,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 1,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 4,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TLB_LOCKDOWN", .cp = 15, .crn = 10, .crm = 8,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_NOP },
+ /* Cache maintenance ops; some of this space may be overridden later. */
+ { .name = "CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
+ .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
+ .type = ARM_CP_NOP | ARM_CP_OVERRIDE },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v6_cp_reginfo[] = {
+ /* Not all pre-v6 cores implemented this WFI, so this is slightly
+ * over-broad.
+ */
+ { .name = "WFI_v5", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_WFI },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo not_v7_cp_reginfo[] = {
+ /* Standard v6 WFI (also used in some pre-v6 cores); not in v7 (which
+ * is UNPREDICTABLE; we choose to NOP as most implementations do).
+ */
+ { .name = "WFI_v6", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_WFI },
+ /* L1 cache lockdown. Not architectural in v6 and earlier but in practice
+ * implemented in 926, 946, 1026, 1136, 1176 and 11MPCore. StrongARM and
+ * OMAPCP will override this space.
+ */
+ { .name = "DLOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_data),
+ .resetvalue = 0 },
+ { .name = "ILOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_insn),
+ .resetvalue = 0 },
+ /* v6 doesn't have the cache ID registers but Linux reads them anyway */
+ { .name = "DUMMY", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = 0 },
+ /* We don't implement pre-v7 debug but most CPUs had at least a DBGDIDR;
+ * implementing it as RAZ means the "debug architecture version" bits
+ * will read as a reserved value, which should cause Linux to not try
+ * to use the debug hardware.
+ */
+ { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MMU TLB control. Note that the wildcarding means we cover not just
+ * the unified TLB ops but also the dside/iside/inner-shareable variants.
+ */
+ { .name = "TLBIALL", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 0, .access = PL1_W, .writefn = tlbiall_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIMVA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 1, .access = PL1_W, .writefn = tlbimva_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIASID", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 2, .access = PL1_W, .writefn = tlbiasid_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "TLBIMVAA", .cp = 15, .crn = 8, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = 3, .access = PL1_W, .writefn = tlbimvaa_write,
+ .type = ARM_CP_NO_RAW },
+ { .name = "PRRR", .cp = 15, .crn = 10, .crm = 2,
+ .opc1 = 0, .opc2 = 0, .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "NMRR", .cp = 15, .crn = 10, .crm = 2,
+ .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint32_t mask = 0;
+
+ /* In ARMv8 most bits of CPACR_EL1 are RES0. */
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /* ARMv7 defines bits for unimplemented coprocessors as RAZ/WI.
+ * ASEDIS [31] and D32DIS [30] are both UNK/SBZP without VFP.
+ * TRCDIS [28] is RAZ/WI since we do not implement a trace macrocell.
+ */
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ /* VFP coprocessor: cp10 & cp11 [23:20] */
+ mask |= (1 << 31) | (1 << 30) | (0xf << 20);
+
+ if (!arm_feature(env, ARM_FEATURE_NEON)) {
+ /* ASEDIS [31] bit is RAO/WI */
+ value |= (1 << 31);
+ }
+
+ /* VFPv3 and upwards with NEON implement 32 double precision
+ * registers (D0-D31).
+ */
+ if (!arm_feature(env, ARM_FEATURE_NEON) ||
+ !arm_feature(env, ARM_FEATURE_VFP3)) {
+ /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
+ value |= (1 << 30);
+ }
+ }
+ value &= mask;
+ }
+ env->cp15.cpacr_el1 = value;
+}
+
+static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* Check if CPACR accesses are to be trapped to EL2 */
+ if (arm_current_el(env) == 1 &&
+ (env->cp15.cptr_el[2] & CPTR_TCPAC) && !arm_is_secure(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ /* Check if CPACR accesses are to be trapped to EL3 */
+ } else if (arm_current_el(env) < 3 &&
+ (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Check if CPTR accesses are set to trap to EL3 */
+ if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo v6_cp_reginfo[] = {
+ /* prefetch by MVA in v6, NOP in v7 */
+ { .name = "MVA_prefetch",
+ .cp = 15, .crn = 7, .crm = 13, .opc1 = 0, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ /* We need to break the TB after ISB to execute self-modifying code
+ * correctly and also to take any pending interrupts immediately.
+ * So use arm_cp_write_ignore() function instead of ARM_CP_NOP flag.
+ */
+ { .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .type = ARM_CP_NO_RAW, .writefn = arm_cp_write_ignore },
+ { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .type = ARM_CP_NOP },
+ { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
+ .access = PL0_W, .type = ARM_CP_NOP },
+ { .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ifar_s),
+ offsetof(CPUARMState, cp15.ifar_ns) },
+ .resetvalue = 0, },
+ /* Watchpoint Fault Address Register : should actually only be present
+ * for 1136, 1176, 11MPCore.
+ */
+ { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
+ { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
+ .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
+ .resetvalue = 0, .writefn = cpacr_write },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Performance monitor registers user accessibility is controlled
+ * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
+ * trapping to EL2 or EL3 for other accesses.
+ */
+ int el = arm_current_el(env);
+
+ if (el == 0 && !env->cp15.c9_pmuserenr) {
+ return CP_ACCESS_TRAP;
+ }
+ if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TPM)
+ && !arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static inline bool arm_ccnt_enabled(CPUARMState *env)
+{
+ /* This does not support checking PMCCFILTR_EL0 register */
+
+ if (!(env->cp15.c9_pmcr & PMCRE)) {
+ return false;
+ }
+
+ return true;
+}
+
+void pmccntr_sync(CPUARMState *env)
+{
+ uint64_t temp_ticks;
+
+ temp_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ temp_ticks /= 64;
+ }
+
+ if (arm_ccnt_enabled(env)) {
+ env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
+ }
+}
+
+static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_sync(env);
+
+ if (value & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
+ /* only the DP, X, D and E bits are writable */
+ env->cp15.c9_pmcr &= ~0x39;
+ env->cp15.c9_pmcr |= (value & 0x39);
+
+ pmccntr_sync(env);
+}
+
+static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t total_ticks;
+
+ if (!arm_ccnt_enabled(env)) {
+ /* Counter is disabled, do not change value */
+ return env->cp15.c15_ccnt;
+ }
+
+ total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ total_ticks /= 64;
+ }
+ return total_ticks - env->cp15.c15_ccnt;
+}
+
+static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t total_ticks;
+
+ if (!arm_ccnt_enabled(env)) {
+ /* Counter is disabled, set the absolute value */
+ env->cp15.c15_ccnt = value;
+ return;
+ }
+
+ total_ticks = muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
+ ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
+
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ total_ticks /= 64;
+ }
+ env->cp15.c15_ccnt = total_ticks - value;
+}
+
+static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t cur_val = pmccntr_read(env, NULL);
+
+ pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
+}
+
+#else /* CONFIG_USER_ONLY */
+
+void pmccntr_sync(CPUARMState *env)
+{
+}
+
+#endif
+
+static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ pmccntr_sync(env);
+ env->cp15.pmccfiltr_el0 = value & 0x7E000000;
+ pmccntr_sync(env);
+}
+
+static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= (1 << 31);
+ env->cp15.c9_pmcnten |= value;
+}
+
+static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= (1 << 31);
+ env->cp15.c9_pmcnten &= ~value;
+}
+
+static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c9_pmovsr &= ~value;
+}
+
+static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c9_pmxevtyper = value & 0xff;
+}
+
+static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c9_pmuserenr = value & 1;
+}
+
+static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* We have no event counters so only the C bit can be changed */
+ value &= (1 << 31);
+ env->cp15.c9_pminten |= value;
+}
+
+static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= (1 << 31);
+ env->cp15.c9_pminten &= ~value;
+}
+
+static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that even though the AArch64 view of this register has bits
+ * [10:0] all RES0 we can only mask the bottom 5, to comply with the
+ * architectural requirements for bits which are RES0 only in some
+ * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
+ * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
+ */
+ raw_write(env, ri, value & ~0x1FULL);
+}
+
+static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ /* We only mask off bits that are RES0 both for AArch64 and AArch32.
+ * For bits that vary between AArch32/64, code needs to check the
+ * current execution mode before directly using the feature bit.
+ */
+ uint32_t valid_mask = SCR_AARCH64_MASK | SCR_AARCH32_MASK;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ valid_mask &= ~SCR_HCE;
+
+ /* On ARMv7, SMD (or SCD as it is called in v7) is only
+ * supported if EL2 exists. The bit is UNK/SBZP when
+ * EL2 is unavailable. In QEMU ARMv7, we force it to always zero
+ * when EL2 is unavailable.
+ * On ARMv8, this bit is always available.
+ */
+ if (arm_feature(env, ARM_FEATURE_V7) &&
+ !arm_feature(env, ARM_FEATURE_V8)) {
+ valid_mask &= ~SCR_SMD;
+ }
+ }
+
+ /* Clear all-context RES0 bits. */
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ /* Acquire the CSSELR index from the bank corresponding to the CCSIDR
+ * bank
+ */
+ uint32_t index = A32_BANKED_REG_GET(env, csselr,
+ ri->secure & ARM_CP_SECSTATE_S);
+
+ return cpu->ccsidr[index];
+}
+
+static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ raw_write(env, ri, value & 0xf);
+}
+
+static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ CPUState *cs = ENV_GET_CPU(env);
+ uint64_t ret = 0;
+
+ if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ ret |= CPSR_I;
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
+ ret |= CPSR_F;
+ }
+ /* External aborts are not possible in QEMU so A bit is always clear */
+ return ret;
+}
+
+static const ARMCPRegInfo v7_cp_reginfo[] = {
+ /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
+ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ /* Performance monitors are implementation defined in v7,
+ * but with an ARM recommended set of registers, which we
+ * follow (although we don't actually implement any counters)
+ *
+ * Performance registers fall into three categories:
+ * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
+ * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
+ * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
+ * For the cases controlled by PMUSERENR we must set .access to PL0_RW
+ * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
+ */
+ { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenset_write,
+ .accessfn = pmreg_access,
+ .raw_writefn = raw_write },
+ { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
+ .writefn = pmcntenset_write, .raw_writefn = raw_write },
+ { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
+ .accessfn = pmreg_access,
+ .writefn = pmcntenclr_write,
+ .type = ARM_CP_ALIAS },
+ { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
+ .writefn = pmcntenclr_write },
+ { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
+ .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .accessfn = pmreg_access,
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
+ .writefn = pmovsr_write,
+ .raw_writefn = raw_write },
+ /* Unimplemented so WI. */
+ { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
+ .access = PL0_W, .accessfn = pmreg_access, .type = ARM_CP_NOP },
+ /* Since we don't implement any events, writing to PMSELR is UNPREDICTABLE.
+ * We choose to RAZ/WI.
+ */
+ { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
+ .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = pmreg_access },
+#ifndef CONFIG_USER_ONLY
+ { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
+ .readfn = pmccntr_read, .writefn = pmccntr_write32,
+ .accessfn = pmreg_access },
+ { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .readfn = pmccntr_read, .writefn = pmccntr_write, },
+#endif
+ { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
+ .writefn = pmccfiltr_write,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
+ .resetvalue = 0, },
+ { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
+ .access = PL0_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
+ .accessfn = pmreg_access, .writefn = pmxevtyper_write,
+ .raw_writefn = raw_write },
+ /* Unimplemented, RAZ/WI. */
+ { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = pmreg_access },
+ { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
+ .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
+ .resetvalue = 0,
+ .writefn = pmuserenr_write, .raw_writefn = raw_write },
+ { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_tpm,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .resetvalue = 0,
+ .writefn = pmintenset_write, .raw_writefn = raw_write },
+ { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write, },
+ { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
+ .writefn = pmintenclr_write },
+ { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .writefn = vbar_write,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
+ offsetof(CPUARMState, cp15.vbar_ns) },
+ .resetvalue = 0 },
+ { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
+ .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW },
+ { .name = "CSSELR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0,
+ .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s),
+ offsetof(CPUARMState, cp15.csselr_ns) } },
+ /* Auxiliary ID register: this actually has an IMPDEF value but for now
+ * just RAZ for all cores:
+ */
+ { .name = "AIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* Auxiliary fault status registers: these also are IMPDEF, and we
+ * choose to RAZ/WI for all cores.
+ */
+ { .name = "AFSR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MAIR can just read-as-written because we don't implement caches
+ * and so don't need to care about memory attributes.
+ */
+ { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
+ .resetvalue = 0 },
+ { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[3]),
+ .resetvalue = 0 },
+ /* For non-long-descriptor page tables these are PRRR and NMRR;
+ * regardless they still act as reads-as-written for QEMU.
+ */
+ /* MAIR0/1 are defined separately from their 64-bit counterpart which
+ * allows them to assign the correct fieldoffset based on the endianness
+ * handled in the field definitions.
+ */
+ { .name = "MAIR0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair0_s),
+ offsetof(CPUARMState, cp15.mair0_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "MAIR1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.mair1_s),
+ offsetof(CPUARMState, cp15.mair1_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "ISR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
+ /* 32 bit ITLB invalidates */
+ { .name = "ITLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "ITLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "ITLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 5, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit DTLB invalidates */
+ { .name = "DTLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "DTLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "DTLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
+ /* 32 bit TLB invalidates */
+ { .name = "TLBIALL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_write },
+ { .name = "TLBIMVA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIASID", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiasid_write },
+ { .name = "TLBIMVAA", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v7mp_cp_reginfo[] = {
+ /* 32 bit TLB invalidates, Inner Shareable */
+ { .name = "TLBIALLIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbiall_is_write },
+ { .name = "TLBIMVAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
+ { .name = "TLBIASIDIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
+ .writefn = tlbiasid_is_write },
+ { .name = "TLBIMVAAIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
+ .writefn = tlbimvaa_is_write },
+ REGINFO_SENTINEL
+};
+
+static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value &= 1;
+ env->teecr = value;
+}
+
+static CPAccessResult teehbr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 0 && (env->teecr & 1)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo t2ee_cp_reginfo[] = {
+ { .name = "TEECR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 6, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, teecr),
+ .resetvalue = 0,
+ .writefn = teecr_write },
+ { .name = "TEEHBR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 6, .opc2 = 0,
+ .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, teehbr),
+ .accessfn = teehbr_access, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo v6k_cp_reginfo[] = {
+ { .name = "TPIDR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 2, .crn = 13, .crm = 0,
+ .access = PL0_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[0]), .resetvalue = 0 },
+ { .name = "TPIDRURW", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL0_RW,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrurw_s),
+ offsetoflow32(CPUARMState, cp15.tpidrurw_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "TPIDRRO_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 3, .crn = 13, .crm = 0,
+ .access = PL0_R|PL1_W,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidrro_el[0]),
+ .resetvalue = 0},
+ { .name = "TPIDRURO", .cp = 15, .crn = 13, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL0_R|PL1_W,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidruro_s),
+ offsetoflow32(CPUARMState, cp15.tpidruro_ns) },
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "TPIDR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 4, .crn = 13, .crm = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[1]), .resetvalue = 0 },
+ { .name = "TPIDRPRW", .opc1 = 0, .cp = 15, .crn = 13, .crm = 0, .opc2 = 4,
+ .access = PL1_RW,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tpidrprw_s),
+ offsetoflow32(CPUARMState, cp15.tpidrprw_ns) },
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+#ifndef CONFIG_USER_ONLY
+
+static CPAccessResult gt_cntfrq_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* CNTFRQ: not visible from PL0 if both PL0PCTEN and PL0VCTEN are zero.
+ * Writable only at the highest implemented exception level.
+ */
+ int el = arm_current_el(env);
+
+ switch (el) {
+ case 0:
+ if (!extract32(env->cp15.c14_cntkctl, 0, 2)) {
+ return CP_ACCESS_TRAP;
+ }
+ break;
+ case 1:
+ if (!isread && ri->state == ARM_CP_STATE_AA32 &&
+ arm_is_secure_below_el3(env)) {
+ /* Accesses from 32-bit Secure EL1 UNDEF (*not* trap to EL3!) */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ break;
+ case 2:
+ case 3:
+ break;
+ }
+
+ if (!isread && el < arm_highest_el(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult gt_counter_access(CPUARMState *env, int timeridx,
+ bool isread)
+{
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
+ /* CNT[PV]CT: not visible from PL0 if ELO[PV]CTEN is zero */
+ if (cur_el == 0 &&
+ !extract32(env->cp15.c14_cntkctl, timeridx, 1)) {
+ return CP_ACCESS_TRAP;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
+ timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
+ !extract32(env->cp15.cnthctl_el2, 0, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult gt_timer_access(CPUARMState *env, int timeridx,
+ bool isread)
+{
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
+ /* CNT[PV]_CVAL, CNT[PV]_CTL, CNT[PV]_TVAL: not visible from PL0 if
+ * EL0[PV]TEN is zero.
+ */
+ if (cur_el == 0 &&
+ !extract32(env->cp15.c14_cntkctl, 9 - timeridx, 1)) {
+ return CP_ACCESS_TRAP;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL2) &&
+ timeridx == GTIMER_PHYS && !secure && cur_el < 2 &&
+ !extract32(env->cp15.cnthctl_el2, 1, 1)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult gt_pct_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_counter_access(env, GTIMER_PHYS, isread);
+}
+
+static CPAccessResult gt_vct_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_counter_access(env, GTIMER_VIRT, isread);
+}
+
+static CPAccessResult gt_ptimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_timer_access(env, GTIMER_PHYS, isread);
+}
+
+static CPAccessResult gt_vtimer_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ return gt_timer_access(env, GTIMER_VIRT, isread);
+}
+
+static CPAccessResult gt_stimer_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* The AArch64 register view of the secure physical timer is
+ * always accessible from EL3, and configurably accessible from
+ * Secure EL1.
+ */
+ switch (arm_current_el(env)) {
+ case 1:
+ if (!arm_is_secure(env)) {
+ return CP_ACCESS_TRAP;
+ }
+ if (!(env->cp15.scr_el3 & SCR_ST)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+ case 0:
+ case 2:
+ return CP_ACCESS_TRAP;
+ case 3:
+ return CP_ACCESS_OK;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static uint64_t gt_get_countervalue(CPUARMState *env)
+{
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / GTIMER_SCALE;
+}
+
+static void gt_recalc_timer(ARMCPU *cpu, int timeridx)
+{
+ ARMGenericTimer *gt = &cpu->env.cp15.c14_timer[timeridx];
+
+ if (gt->ctl & 1) {
+ /* Timer enabled: calculate and set current ISTATUS, irq, and
+ * reset timer to when ISTATUS next has to change
+ */
+ uint64_t offset = timeridx == GTIMER_VIRT ?
+ cpu->env.cp15.cntvoff_el2 : 0;
+ uint64_t count = gt_get_countervalue(&cpu->env);
+ /* Note that this must be unsigned 64 bit arithmetic: */
+ int istatus = count - offset >= gt->cval;
+ uint64_t nexttick;
+ int irqstate;
+
+ gt->ctl = deposit32(gt->ctl, 2, 1, istatus);
+
+ irqstate = (istatus && !(gt->ctl & 2));
+ qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
+
+ if (istatus) {
+ /* Next transition is when count rolls back over to zero */
+ nexttick = UINT64_MAX;
+ } else {
+ /* Next transition is when we hit cval */
+ nexttick = gt->cval + offset;
+ }
+ /* Note that the desired next expiry time might be beyond the
+ * signed-64-bit range of a QEMUTimer -- in this case we just
+ * set the timer for as far in the future as possible. When the
+ * timer expires we will reset the timer for any remaining period.
+ */
+ if (nexttick > INT64_MAX / GTIMER_SCALE) {
+ nexttick = INT64_MAX / GTIMER_SCALE;
+ }
+ timer_mod(cpu->gt_timer[timeridx], nexttick);
+ trace_arm_gt_recalc(timeridx, irqstate, nexttick);
+ } else {
+ /* Timer disabled: ISTATUS and timer output always clear */
+ gt->ctl &= ~4;
+ qemu_set_irq(cpu->gt_timer_outputs[timeridx], 0);
+ timer_del(cpu->gt_timer[timeridx]);
+ trace_arm_gt_recalc_disabled(timeridx);
+ }
+}
+
+static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ timer_del(cpu->gt_timer[timeridx]);
+}
+
+static uint64_t gt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_get_countervalue(env);
+}
+
+static uint64_t gt_virt_cnt_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_get_countervalue(env) - env->cp15.cntvoff_el2;
+}
+
+static void gt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ trace_arm_gt_cval_write(timeridx, value);
+ env->cp15.c14_timer[timeridx].cval = value;
+ gt_recalc_timer(arm_env_get_cpu(env), timeridx);
+}
+
+static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx)
+{
+ uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
+
+ return (uint32_t)(env->cp15.c14_timer[timeridx].cval -
+ (gt_get_countervalue(env) - offset));
+}
+
+static void gt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ uint64_t offset = timeridx == GTIMER_VIRT ? env->cp15.cntvoff_el2 : 0;
+
+ trace_arm_gt_tval_write(timeridx, value);
+ env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset +
+ sextract64(value, 0, 32);
+ gt_recalc_timer(arm_env_get_cpu(env), timeridx);
+}
+
+static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ int timeridx,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint32_t oldval = env->cp15.c14_timer[timeridx].ctl;
+
+ trace_arm_gt_ctl_write(timeridx, value);
+ env->cp15.c14_timer[timeridx].ctl = deposit64(oldval, 0, 2, value);
+ if ((oldval ^ value) & 1) {
+ /* Enable toggled */
+ gt_recalc_timer(cpu, timeridx);
+ } else if ((oldval ^ value) & 2) {
+ /* IMASK toggled: don't need to recalculate,
+ * just set the interrupt line based on ISTATUS
+ */
+ int irqstate = (oldval & 4) && !(value & 2);
+
+ trace_arm_gt_imask_toggle(timeridx, irqstate);
+ qemu_set_irq(cpu->gt_timer_outputs[timeridx], irqstate);
+ }
+}
+
+static void gt_phys_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_PHYS);
+}
+
+static void gt_phys_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_PHYS, value);
+}
+
+static uint64_t gt_phys_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_PHYS);
+}
+
+static void gt_phys_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_PHYS, value);
+}
+
+static void gt_phys_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_PHYS, value);
+}
+
+static void gt_virt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_VIRT);
+}
+
+static void gt_virt_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_VIRT, value);
+}
+
+static uint64_t gt_virt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_VIRT);
+}
+
+static void gt_virt_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_VIRT, value);
+}
+
+static void gt_virt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_VIRT, value);
+}
+
+static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ trace_arm_gt_cntvoff_write(value);
+ raw_write(env, ri, value);
+ gt_recalc_timer(cpu, GTIMER_VIRT);
+}
+
+static void gt_hyp_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_HYP);
+}
+
+static void gt_hyp_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_HYP, value);
+}
+
+static uint64_t gt_hyp_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_HYP);
+}
+
+static void gt_hyp_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_HYP, value);
+}
+
+static void gt_hyp_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_HYP, value);
+}
+
+static void gt_sec_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ gt_timer_reset(env, ri, GTIMER_SEC);
+}
+
+static void gt_sec_cval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_cval_write(env, ri, GTIMER_SEC, value);
+}
+
+static uint64_t gt_sec_tval_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return gt_tval_read(env, ri, GTIMER_SEC);
+}
+
+static void gt_sec_tval_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_tval_write(env, ri, GTIMER_SEC, value);
+}
+
+static void gt_sec_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ gt_ctl_write(env, ri, GTIMER_SEC, value);
+}
+
+void arm_gt_ptimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_PHYS);
+}
+
+void arm_gt_vtimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_VIRT);
+}
+
+void arm_gt_htimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_HYP);
+}
+
+void arm_gt_stimer_cb(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ gt_recalc_timer(cpu, GTIMER_SEC);
+}
+
+static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
+ /* Note that CNTFRQ is purely reads-as-written for the benefit
+ * of software; writing it doesn't actually change the timer frequency.
+ * Our reset value matches the fixed frequency we implement the timer at.
+ */
+ { .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
+ },
+ { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
+ .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
+ .resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
+ },
+ /* overall control: mostly access permissions */
+ { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
+ .resetvalue = 0,
+ },
+ /* per-timer control */
+ { .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .secure = ARM_CP_SECSTATE_NS,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_PHYS].ctl),
+ .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CTL(S)",
+ .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .secure = ARM_CP_SECSTATE_S,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_SEC].ctl),
+ .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
+ .resetvalue = 0,
+ .writefn = gt_phys_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_ALIAS, .access = PL1_RW | PL0_R,
+ .accessfn = gt_vtimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_VIRT].ctl),
+ .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_vtimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
+ .resetvalue = 0,
+ .writefn = gt_virt_ctl_write, .raw_writefn = raw_write,
+ },
+ /* TimerValue views: a 32 bit downcounting view of the underlying state */
+ { .name = "CNTP_TVAL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .secure = ARM_CP_SECSTATE_NS,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
+ },
+ { .name = "CNTP_TVAL(S)",
+ .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .secure = ARM_CP_SECSTATE_S,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .readfn = gt_sec_tval_read, .writefn = gt_sec_tval_write,
+ },
+ { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access, .resetfn = gt_phys_timer_reset,
+ .readfn = gt_phys_tval_read, .writefn = gt_phys_tval_write,
+ },
+ { .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_vtimer_access,
+ .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
+ },
+ { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_vtimer_access, .resetfn = gt_virt_timer_reset,
+ .readfn = gt_virt_tval_read, .writefn = gt_virt_tval_write,
+ },
+ /* The counter itself */
+ { .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_pct_access,
+ .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_pct_access, .readfn = gt_cnt_read,
+ },
+ { .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
+ .access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_vct_access,
+ .readfn = gt_virt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
+ .access = PL0_R, .type = ARM_CP_NO_RAW | ARM_CP_IO,
+ .accessfn = gt_vct_access, .readfn = gt_virt_cnt_read,
+ },
+ /* Comparison value, indicating when the timer goes off */
+ { .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
+ .secure = ARM_CP_SECSTATE_NS,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
+ .accessfn = gt_ptimer_access,
+ .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CVAL(S)", .cp = 15, .crm = 14, .opc1 = 2,
+ .secure = ARM_CP_SECSTATE_S,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
+ .accessfn = gt_ptimer_access,
+ .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
+ .resetvalue = 0, .accessfn = gt_ptimer_access,
+ .writefn = gt_phys_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
+ .accessfn = gt_vtimer_access,
+ .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
+ .resetvalue = 0, .accessfn = gt_vtimer_access,
+ .writefn = gt_virt_cval_write, .raw_writefn = raw_write,
+ },
+ /* Secure timer -- this is actually restricted to only EL3
+ * and configurably Secure-EL1 via the accessfn.
+ */
+ { .name = "CNTPS_TVAL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .readfn = gt_sec_tval_read,
+ .writefn = gt_sec_tval_write,
+ .resetfn = gt_sec_timer_reset,
+ },
+ { .name = "CNTPS_CTL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 1,
+ .type = ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].ctl),
+ .resetvalue = 0,
+ .writefn = gt_sec_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTPS_CVAL_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 7, .crn = 14, .crm = 2, .opc2 = 2,
+ .type = ARM_CP_IO, .access = PL1_RW,
+ .accessfn = gt_stimer_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_SEC].cval),
+ .writefn = gt_sec_cval_write, .raw_writefn = raw_write,
+ },
+ REGINFO_SENTINEL
+};
+
+#else
+/* In user-mode none of the generic timer registers are accessible,
+ * and their implementation depends on QEMU_CLOCK_VIRTUAL and qdev gpio outputs,
+ * so instead just don't register any of them.
+ */
+static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
+ REGINFO_SENTINEL
+};
+
+#endif
+
+static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ raw_write(env, ri, value);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ raw_write(env, ri, value & 0xfffff6ff);
+ } else {
+ raw_write(env, ri, value & 0xfffff1ff);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* get_phys_addr() isn't present for user-mode-only targets */
+
+static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (ri->opc2 & 4) {
+ /* The ATS12NSO* operations must trap to EL3 if executed in
+ * Secure EL1 (which can only happen if EL3 is AArch64).
+ * They are simply UNDEF if executed from NS EL1.
+ * They function normally from EL2 or EL3.
+ */
+ if (arm_current_el(env) == 1) {
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_UNCATEGORIZED_EL3;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
+ int access_type, ARMMMUIdx mmu_idx)
+{
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot;
+ uint32_t fsr;
+ bool ret;
+ uint64_t par64;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+
+ ret = get_phys_addr(env, value, access_type, mmu_idx,
+ &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
+ if (extended_addresses_enabled(env)) {
+ /* fsr is a DFSR/IFSR value for the long descriptor
+ * translation table format, but with WnR always clear.
+ * Convert it to a 64-bit PAR.
+ */
+ par64 = (1 << 11); /* LPAE bit always set */
+ if (!ret) {
+ par64 |= phys_addr & ~0xfffULL;
+ if (!attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ /* We don't set the ATTR or SH fields in the PAR. */
+ } else {
+ par64 |= 1; /* F */
+ par64 |= (fsr & 0x3f) << 1; /* FS */
+ /* Note that S2WLK and FSTAGE are always zero, because we don't
+ * implement virtualization and therefore there can't be a stage 2
+ * fault.
+ */
+ }
+ } else {
+ /* fsr is a DFSR/IFSR value for the short descriptor
+ * translation table format (with WnR always clear).
+ * Convert it to a 32-bit PAR.
+ */
+ if (!ret) {
+ /* We do not set any attribute bits in the PAR */
+ if (page_size == (1 << 24)
+ && arm_feature(env, ARM_FEATURE_V7)) {
+ par64 = (phys_addr & 0xff000000) | (1 << 1);
+ } else {
+ par64 = phys_addr & 0xfffff000;
+ }
+ if (!attrs.secure) {
+ par64 |= (1 << 9); /* NS */
+ }
+ } else {
+ par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
+ ((fsr & 0xf) << 1) | 1;
+ }
+ }
+ return par64;
+}
+
+static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ uint64_t par64;
+ ARMMMUIdx mmu_idx;
+ int el = arm_current_el(env);
+ bool secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ /* stage 1 current state PL1: ATS1CPR, ATS1CPW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_S1E3;
+ break;
+ case 2:
+ mmu_idx = ARMMMUIdx_S1NSE1;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2:
+ /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
+ switch (el) {
+ case 3:
+ mmu_idx = ARMMMUIdx_S1SE0;
+ break;
+ case 2:
+ mmu_idx = ARMMMUIdx_S1NSE0;
+ break;
+ case 1:
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 4:
+ /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
+ mmu_idx = ARMMMUIdx_S12NSE1;
+ break;
+ case 6:
+ /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
+ mmu_idx = ARMMMUIdx_S12NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ par64 = do_ats_write(env, value, access_type, mmu_idx);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ uint64_t par64;
+
+ par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
+
+ A32_BANKED_CURRENT_REG_SET(env, par, par64);
+}
+
+static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 3 && !(env->cp15.scr_el3 & SCR_NS)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ int access_type = ri->opc2 & 1;
+ ARMMMUIdx mmu_idx;
+ int secure = arm_is_secure_below_el3(env);
+
+ switch (ri->opc2 & 6) {
+ case 0:
+ switch (ri->opc1) {
+ case 0: /* AT S1E1R, AT S1E1W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S1NSE1;
+ break;
+ case 4: /* AT S1E2R, AT S1E2W */
+ mmu_idx = ARMMMUIdx_S1E2;
+ break;
+ case 6: /* AT S1E3R, AT S1E3W */
+ mmu_idx = ARMMMUIdx_S1E3;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ case 2: /* AT S1E0R, AT S1E0W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S1NSE0;
+ break;
+ case 4: /* AT S12E1R, AT S12E1W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE1 : ARMMMUIdx_S12NSE1;
+ break;
+ case 6: /* AT S12E0R, AT S12E0W */
+ mmu_idx = secure ? ARMMMUIdx_S1SE0 : ARMMMUIdx_S12NSE0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx);
+}
+#endif
+
+static const ARMCPRegInfo vapa_cp_reginfo[] = {
+ { .name = "PAR", .cp = 15, .crn = 7, .crm = 4, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
+ offsetoflow32(CPUARMState, cp15.par_ns) },
+ .writefn = par_write },
+#ifndef CONFIG_USER_ONLY
+ /* This underdecoding is safe because the reginfo is NO_RAW. */
+ { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_W, .accessfn = ats_access,
+ .writefn = ats_write, .type = ARM_CP_NO_RAW },
+#endif
+ REGINFO_SENTINEL
+};
+
+/* Return basic MPU access permission bits. */
+static uint32_t simple_mpu_ap_bits(uint32_t val)
+{
+ uint32_t ret;
+ uint32_t mask;
+ int i;
+ ret = 0;
+ mask = 3;
+ for (i = 0; i < 16; i += 2) {
+ ret |= (val >> i) & mask;
+ mask <<= 2;
+ }
+ return ret;
+}
+
+/* Pad basic MPU access permission bits to extended format. */
+static uint32_t extended_mpu_ap_bits(uint32_t val)
+{
+ uint32_t ret;
+ uint32_t mask;
+ int i;
+ ret = 0;
+ mask = 3;
+ for (i = 0; i < 16; i += 2) {
+ ret |= (val & mask) << i;
+ mask <<= 2;
+ }
+ return ret;
+}
+
+static void pmsav5_data_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.pmsav5_data_ap = extended_mpu_ap_bits(value);
+}
+
+static uint64_t pmsav5_data_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return simple_mpu_ap_bits(env->cp15.pmsav5_data_ap);
+}
+
+static void pmsav5_insn_ap_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.pmsav5_insn_ap = extended_mpu_ap_bits(value);
+}
+
+static uint64_t pmsav5_insn_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return simple_mpu_ap_bits(env->cp15.pmsav5_insn_ap);
+}
+
+static uint64_t pmsav7_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
+
+ if (!u32p) {
+ return 0;
+ }
+
+ u32p += env->cp15.c6_rgnr;
+ return *u32p;
+}
+
+static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
+
+ if (!u32p) {
+ return;
+ }
+
+ u32p += env->cp15.c6_rgnr;
+ tlb_flush(CPU(cpu), 1); /* Mappings may have changed - purge! */
+ *u32p = value;
+}
+
+static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
+
+ if (!u32p) {
+ return;
+ }
+
+ memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
+}
+
+static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint32_t nrgs = cpu->pmsav7_dregion;
+
+ if (value >= nrgs) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "PMSAv7 RGNR write >= # supported regions, %" PRIu32
+ " > %" PRIu32 "\n", (uint32_t)value, nrgs);
+ return;
+ }
+
+ raw_write(env, ri, value);
+}
+
+static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
+ { .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
+ .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
+ { .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
+ .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
+ { .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
+ .access = PL1_RW, .type = ARM_CP_NO_RAW,
+ .fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
+ .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
+ { .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
+ .writefn = pmsav7_rgnr_write },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
+ { .name = "DATA_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
+ .readfn = pmsav5_data_ap_read, .writefn = pmsav5_data_ap_write, },
+ { .name = "INSN_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
+ .readfn = pmsav5_insn_ap_read, .writefn = pmsav5_insn_ap_write, },
+ { .name = "DATA_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_data_ap),
+ .resetvalue = 0, },
+ { .name = "INSN_EXT_AP", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.pmsav5_insn_ap),
+ .resetvalue = 0, },
+ { .name = "DCACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c2_data), .resetvalue = 0, },
+ { .name = "ICACHE_CFG", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c2_insn), .resetvalue = 0, },
+ /* Protection region base and size registers */
+ { .name = "946_PRBS0", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[0]) },
+ { .name = "946_PRBS1", .cp = 15, .crn = 6, .crm = 1, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[1]) },
+ { .name = "946_PRBS2", .cp = 15, .crn = 6, .crm = 2, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[2]) },
+ { .name = "946_PRBS3", .cp = 15, .crn = 6, .crm = 3, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[3]) },
+ { .name = "946_PRBS4", .cp = 15, .crn = 6, .crm = 4, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[4]) },
+ { .name = "946_PRBS5", .cp = 15, .crn = 6, .crm = 5, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[5]) },
+ { .name = "946_PRBS6", .cp = 15, .crn = 6, .crm = 6, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[6]) },
+ { .name = "946_PRBS7", .cp = 15, .crn = 6, .crm = 7, .opc1 = 0,
+ .opc2 = CP_ANY, .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c6_region[7]) },
+ REGINFO_SENTINEL
+};
+
+static void vmsa_ttbcr_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ TCR *tcr = raw_ptr(env, ri);
+ int maskshift = extract32(value, 0, 3);
+
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_LPAE) && (value & TTBCR_EAE)) {
+ /* Pre ARMv8 bits [21:19], [15:14] and [6:3] are UNK/SBZP when
+ * using Long-desciptor translation table format */
+ value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
+ } else if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* In an implementation that includes the Security Extensions
+ * TTBCR has additional fields PD0 [4] and PD1 [5] for
+ * Short-descriptor translation table format.
+ */
+ value &= TTBCR_PD1 | TTBCR_PD0 | TTBCR_N;
+ } else {
+ value &= TTBCR_N;
+ }
+ }
+
+ /* Update the masks corresponding to the TCR bank being written
+ * Note that we always calculate mask and base_mask, but
+ * they are only used for short-descriptor tables (ie if EAE is 0);
+ * for long-descriptor tables the TCR fields are used differently
+ * and the mask and base_mask values are meaningless.
+ */
+ tcr->raw_tcr = value;
+ tcr->mask = ~(((uint32_t)0xffffffffu) >> maskshift);
+ tcr->base_mask = ~((uint32_t)0x3fffu >> maskshift);
+}
+
+static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ /* With LPAE the TTBCR could result in a change of ASID
+ * via the TTBCR.A1 bit, so do a TLB flush.
+ */
+ tlb_flush(CPU(cpu), 1);
+ }
+ vmsa_ttbcr_raw_write(env, ri, value);
+}
+
+static void vmsa_ttbcr_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ TCR *tcr = raw_ptr(env, ri);
+
+ /* Reset both the TCR as well as the masks corresponding to the bank of
+ * the TCR being reset.
+ */
+ tcr->raw_tcr = 0;
+ tcr->mask = 0;
+ tcr->base_mask = 0xffffc000u;
+}
+
+static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ TCR *tcr = raw_ptr(env, ri);
+
+ /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
+ tlb_flush(CPU(cpu), 1);
+ tcr->raw_tcr = value;
+}
+
+static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* 64 bit accesses to the TTBRs can change the ASID and so we
+ * must flush the TLB.
+ */
+ if (cpreg_field_is_64bit(ri)) {
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
+ }
+ raw_write(env, ri, value);
+}
+
+static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ /* Accesses to VTTBR may change the VMID so we must flush the TLB. */
+ if (raw_read(env, ri) != value) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+ ARMMMUIdx_S2NS, -1);
+ raw_write(env, ri, value);
+ }
+}
+
+static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
+ { .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
+ offsetoflow32(CPUARMState, cp15.dfsr_ns) }, },
+ { .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
+ offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
+ { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
+ offsetof(CPUARMState, cp15.dfar_ns) } },
+ { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
+ .resetvalue = 0, },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo vmsa_cp_reginfo[] = {
+ { .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
+ { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
+ offsetof(CPUARMState, cp15.ttbr0_ns) } },
+ { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
+ offsetof(CPUARMState, cp15.ttbr1_ns) } },
+ { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
+ .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[1]) },
+ { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_ALIAS, .writefn = vmsa_ttbcr_write,
+ .raw_writefn = vmsa_ttbcr_raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
+ offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
+ REGINFO_SENTINEL
+};
+
+static void omap_ticonfig_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c15_ticonfig = value & 0xe7;
+ /* The OS_TYPE bit in this register changes the reported CPUID! */
+ env->cp15.c0_cpuid = (value & (1 << 5)) ?
+ ARM_CPUID_TI915T : ARM_CPUID_TI925T;
+}
+
+static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c15_threadid = value & 0xffff;
+}
+
+static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Wait-for-interrupt (deprecated) */
+ cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT);
+}
+
+static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* On OMAP there are registers indicating the max/min index of dcache lines
+ * containing a dirty line; cache flush operations have to reset these.
+ */
+ env->cp15.c15_i_max = 0x000;
+ env->cp15.c15_i_min = 0xff0;
+}
+
+static const ARMCPRegInfo omap_cp_reginfo[] = {
+ { .name = "DFSR", .cp = 15, .crn = 5, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW, .type = ARM_CP_OVERRIDE,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.esr_el[1]),
+ .resetvalue = 0, },
+ { .name = "", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "TICONFIG", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_ticonfig), .resetvalue = 0,
+ .writefn = omap_ticonfig_write },
+ { .name = "IMAX", .cp = 15, .crn = 15, .crm = 2, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_i_max), .resetvalue = 0, },
+ { .name = "IMIN", .cp = 15, .crn = 15, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0xff0,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_i_min) },
+ { .name = "THREADID", .cp = 15, .crn = 15, .crm = 4, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_threadid), .resetvalue = 0,
+ .writefn = omap_threadid_write },
+ { .name = "TI925T_STATUS", .cp = 15, .crn = 15,
+ .crm = 8, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
+ .type = ARM_CP_NO_RAW,
+ .readfn = arm_cp_read_zero, .writefn = omap_wfi_write, },
+ /* TODO: Peripheral port remap register:
+ * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt controller
+ * base address at $rn & ~0xfff and map size of 0x200 << ($rn & 0xfff),
+ * when MMU is off.
+ */
+ { .name = "OMAP_CACHEMAINT", .cp = 15, .crn = 7, .crm = CP_ANY,
+ .opc1 = 0, .opc2 = CP_ANY, .access = PL1_W,
+ .type = ARM_CP_OVERRIDE | ARM_CP_NO_RAW,
+ .writefn = omap_cachemaint_write },
+ { .name = "C9", .cp = 15, .crn = 9,
+ .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_RW,
+ .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c15_cpar = value & 0x3fff;
+}
+
+static const ARMCPRegInfo xscale_cp_reginfo[] = {
+ { .name = "XSCALE_CPAR",
+ .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
+ .writefn = xscale_cpar_write, },
+ { .name = "XSCALE_AUXCR",
+ .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
+ .resetvalue = 0, },
+ /* XScale specific cache-lockdown: since we have no cache we NOP these
+ * and hope the guest does not really rely on cache behaviour.
+ */
+ { .name = "XSCALE_LOCK_ICACHE_LINE",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "XSCALE_UNLOCK_ICACHE",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "XSCALE_DCACHE_LOCK",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_NOP },
+ { .name = "XSCALE_UNLOCK_DCACHE",
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
+ /* RAZ/WI the whole crn=15 space, when we don't have a more specific
+ * implementation of this implementation-defined space.
+ * Ideally this should eventually disappear in favour of actually
+ * implementing the correct behaviour for all cores.
+ */
+ { .name = "C15_IMPDEF", .cp = 15, .crn = 15,
+ .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW,
+ .type = ARM_CP_CONST | ARM_CP_NO_RAW | ARM_CP_OVERRIDE,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo cache_dirty_status_cp_reginfo[] = {
+ /* Cache status: RAZ because we have no cache so it's always clean */
+ { .name = "CDSR", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo cache_block_ops_cp_reginfo[] = {
+ /* We never have a a block transfer operation in progress */
+ { .name = "BXSR", .cp = 15, .crn = 7, .crm = 12, .opc1 = 0, .opc2 = 4,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = 0 },
+ /* The cache ops themselves: these all NOP for QEMU */
+ { .name = "IICR", .cp = 15, .crm = 5, .opc1 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "IDCR", .cp = 15, .crm = 6, .opc1 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "CDCR", .cp = 15, .crm = 12, .opc1 = 0,
+ .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "PIR", .cp = 15, .crm = 12, .opc1 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "PDR", .cp = 15, .crm = 12, .opc1 = 2,
+ .access = PL0_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ { .name = "CIDCR", .cp = 15, .crm = 14, .opc1 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP|ARM_CP_64BIT },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo cache_test_clean_cp_reginfo[] = {
+ /* The cache test-and-clean instructions always return (1 << 30)
+ * to indicate that there are no dirty cache lines.
+ */
+ { .name = "TC_DCACHE", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 3,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = (1 << 30) },
+ { .name = "TCI_DCACHE", .cp = 15, .crn = 7, .crm = 14, .opc1 = 0, .opc2 = 3,
+ .access = PL0_R, .type = ARM_CP_CONST | ARM_CP_NO_RAW,
+ .resetvalue = (1 << 30) },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo strongarm_cp_reginfo[] = {
+ /* Ignore ReadBuffer accesses */
+ { .name = "C9_READBUFFER", .cp = 15, .crn = 9,
+ .crm = CP_ANY, .opc1 = CP_ANY, .opc2 = CP_ANY,
+ .access = PL1_RW, .resetvalue = 0,
+ .type = ARM_CP_CONST | ARM_CP_OVERRIDE | ARM_CP_NO_RAW },
+ REGINFO_SENTINEL
+};
+
+static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
+ return env->cp15.vpidr_el2;
+ }
+ return raw_read(env, ri);
+}
+
+static uint64_t mpidr_read_val(CPUARMState *env)
+{
+ ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
+ uint64_t mpidr = cpu->mp_affinity;
+
+ if (arm_feature(env, ARM_FEATURE_V7MP)) {
+ mpidr |= (1U << 31);
+ /* Cores which are uniprocessor (non-coherent)
+ * but still implement the MP extensions set
+ * bit 30. (For instance, Cortex-R5).
+ */
+ if (cpu->mp_is_up) {
+ mpidr |= (1u << 30);
+ }
+ }
+ return mpidr;
+}
+
+static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ unsigned int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+
+ if (arm_feature(env, ARM_FEATURE_EL2) && !secure && cur_el == 1) {
+ return env->cp15.vmpidr_el2;
+ }
+ return mpidr_read_val(env);
+}
+
+static const ARMCPRegInfo mpidr_cp_reginfo[] = {
+ { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
+ .access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_RAW },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo lpae_cp_reginfo[] = {
+ /* NOP AMAIR0/1 */
+ { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
+ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
+ .access = PL1_RW, .type = ARM_CP_64BIT, .resetvalue = 0,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.par_s),
+ offsetof(CPUARMState, cp15.par_ns)} },
+ { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
+ offsetof(CPUARMState, cp15.ttbr0_ns) },
+ .writefn = vmsa_ttbr_write, },
+ { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
+ offsetof(CPUARMState, cp15.ttbr1_ns) },
+ .writefn = vmsa_ttbr_write, },
+ REGINFO_SENTINEL
+};
+
+static uint64_t aa64_fpcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return vfp_get_fpcr(env);
+}
+
+static void aa64_fpcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ vfp_set_fpcr(env, value);
+}
+
+static uint64_t aa64_fpsr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return vfp_get_fpsr(env);
+}
+
+static void aa64_fpsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ vfp_set_fpsr(env, value);
+}
+
+static CPAccessResult aa64_daif_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void aa64_daif_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->daif = value & PSTATE_DAIF;
+}
+
+static CPAccessResult aa64_cacheop_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Cache invalidate/clean: NOP, but EL0 must UNDEF unless
+ * SCTLR_EL1.UCI is set.
+ */
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCI)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+/* See: D4.7.2 TLB maintenance requirements and the TLB maintenance instructions
+ * Page D4-1736 (DDI0487A.b)
+ */
+
+static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+ }
+}
+
+static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ bool sec = arm_is_secure_below_el3(env);
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else {
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0,
+ ARMMMUIdx_S2NS, -1);
+ } else {
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1);
+}
+
+static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1);
+}
+
+static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Note that the 'ALL' scope must invalidate both stage 1 and
+ * stage 2 translations, whereas most other scopes only invalidate
+ * stage 1 translations.
+ */
+ bool sec = arm_is_secure_below_el3(env);
+ bool has_el2 = arm_feature(env, ARM_FEATURE_EL2);
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1);
+ } else if (has_el2) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1);
+ } else {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1);
+ }
+}
+
+static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1);
+ }
+}
+
+static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL1&0 (AArch64 version).
+ * Currently handles all of VAE1, VAAE1, VAALE1 and VALE1,
+ * since we don't support flush-for-specific-ASID-only or
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ if (arm_is_secure_below_el3(env)) {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1,
+ ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+}
+
+static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL2
+ * Currently handles both VAE2 and VALE2, since we don't support
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1);
+}
+
+static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by VA, EL3
+ * Currently handles both VAE3 and VALE3, since we don't support
+ * flush-last-level-only.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1);
+}
+
+static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ bool sec = arm_is_secure_below_el3(env);
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ if (sec) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1,
+ ARMMMUIdx_S1SE0, -1);
+ } else {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1,
+ ARMMMUIdx_S12NSE0, -1);
+ }
+ }
+}
+
+static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1);
+ }
+}
+
+static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr = sextract64(value << 12, 0, 56);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1);
+ }
+}
+
+static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Invalidate by IPA. This has to invalidate any structures that
+ * contain only stage 2 translation information, but does not need
+ * to apply to structures that contain combined stage 1 and stage 2
+ * translation information.
+ * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 48);
+
+ tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1);
+}
+
+static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ CPUState *other_cs;
+ uint64_t pageaddr;
+
+ if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) {
+ return;
+ }
+
+ pageaddr = sextract64(value << 12, 0, 48);
+
+ CPU_FOREACH(other_cs) {
+ tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1);
+ }
+}
+
+static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* We don't implement EL2, so the only control on DC ZVA is the
+ * bit in the SCTLR which can prohibit access for EL0.
+ */
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_DZE)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int dzp_bit = 1 << 4;
+
+ /* DZP indicates whether DC ZVA access is allowed */
+ if (aa64_zva_access(env, NULL, false) == CP_ACCESS_OK) {
+ dzp_bit = 0;
+ }
+ return cpu->dcz_blocksize | dzp_bit;
+}
+
+static CPAccessResult sp_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (!(env->pstate & PSTATE_SP)) {
+ /* Access to SP_EL0 is undefined if it's being used as
+ * the stack pointer.
+ */
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+ }
+ return CP_ACCESS_OK;
+}
+
+static uint64_t spsel_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ return env->pstate & PSTATE_SP;
+}
+
+static void spsel_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t val)
+{
+ update_spsel(env, val);
+}
+
+static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (raw_read(env, ri) == value) {
+ /* Skip the TLB flush if nothing actually changed; Linux likes
+ * to do a lot of pointless SCTLR writes.
+ */
+ return;
+ }
+
+ raw_write(env, ri, value);
+ /* ??? Lots of these bits are not implemented. */
+ /* This may enable/disable the MMU, so do a TLB flush. */
+ tlb_flush(CPU(cpu), 1);
+}
+
+static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) {
+ return CP_ACCESS_TRAP_FP_EL2;
+ }
+ if (env->cp15.cptr_el[3] & CPTR_TFP) {
+ return CP_ACCESS_TRAP_FP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
+}
+
+static const ARMCPRegInfo v8_cp_reginfo[] = {
+ /* Minimal set of EL0-visible registers. This will need to be expanded
+ * significantly for system emulation of AArch64 CPUs.
+ */
+ { .name = "NZCV", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 2,
+ .access = PL0_RW, .type = ARM_CP_NZCV },
+ { .name = "DAIF", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 2,
+ .type = ARM_CP_NO_RAW,
+ .access = PL0_RW, .accessfn = aa64_daif_access,
+ .fieldoffset = offsetof(CPUARMState, daif),
+ .writefn = aa64_daif_write, .resetfn = arm_cp_reset_ignore },
+ { .name = "FPCR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 0, .crn = 4, .crm = 4,
+ .access = PL0_RW, .readfn = aa64_fpcr_read, .writefn = aa64_fpcr_write },
+ { .name = "FPSR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 4, .crm = 4,
+ .access = PL0_RW, .readfn = aa64_fpsr_read, .writefn = aa64_fpsr_write },
+ { .name = "DCZID_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 7, .crn = 0, .crm = 0,
+ .access = PL0_R, .type = ARM_CP_NO_RAW,
+ .readfn = aa64_dczid_read },
+ { .name = "DC_ZVA", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 4, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_DC_ZVA,
+#ifndef CONFIG_USER_ONLY
+ /* Avoid overhead of an access check that always passes in user-mode */
+ .accessfn = aa64_zva_access,
+#endif
+ },
+ { .name = "CURRENTEL", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 4, .crm = 2,
+ .access = PL1_R, .type = ARM_CP_CURRENTEL },
+ /* Cache ops: all NOPs since we don't emulate caches */
+ { .name = "IC_IALLUIS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "IC_IALLU", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "IC_IVAU", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 5, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_access },
+ { .name = "DC_IVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "DC_ISW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "DC_CVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 10, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_access },
+ { .name = "DC_CSW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ { .name = "DC_CVAU", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 11, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_access },
+ { .name = "DC_CIVAC", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 14, .opc2 = 1,
+ .access = PL0_W, .type = ARM_CP_NOP,
+ .accessfn = aa64_cacheop_access },
+ { .name = "DC_CISW", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NOP },
+ /* TLBI operations */
+ { .name = "TLBI_VMALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_ASIDE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1is_write },
+ { .name = "TLBI_VAAE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VAALE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1is_write },
+ { .name = "TLBI_VMALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1_write },
+ { .name = "TLBI_VAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_ASIDE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vmalle1_write },
+ { .name = "TLBI_VAAE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_VALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_VAALE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae1_write },
+ { .name = "TLBI_IPAS2E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_IPAS2LE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1is_write },
+ { .name = "TLBI_ALLE1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_VMALLS12E1IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+ { .name = "TLBI_IPAS2E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_IPAS2LE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_ipas2e1_write },
+ { .name = "TLBI_ALLE1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1_write },
+ { .name = "TLBI_VMALLS12E1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle1is_write },
+#ifndef CONFIG_USER_ONLY
+ /* 64 bit address translation operations */
+ { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
+ .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
+ { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
+ .writefn = par_write },
+#endif
+ /* TLB invalidate last level of translation table walk */
+ { .name = "TLBIMVALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_is_write },
+ { .name = "TLBIMVAALIS", .cp = 15, .opc1 = 0, .crn = 8, .crm = 3, .opc2 = 7,
+ .type = ARM_CP_NO_RAW, .access = PL1_W,
+ .writefn = tlbimvaa_is_write },
+ { .name = "TLBIMVAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimva_write },
+ { .name = "TLBIMVAAL", .cp = 15, .opc1 = 0, .crn = 8, .crm = 7, .opc2 = 7,
+ .type = ARM_CP_NO_RAW, .access = PL1_W, .writefn = tlbimvaa_write },
+ { .name = "TLBIMVALH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_write },
+ { .name = "TLBIMVALHIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_is_write },
+ { .name = "TLBIIPAS2",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2_write },
+ { .name = "TLBIIPAS2IS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2_is_write },
+ { .name = "TLBIIPAS2L",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 4, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2_write },
+ { .name = "TLBIIPAS2LIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 0, .opc2 = 5,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiipas2_is_write },
+ /* 32 bit cache operations */
+ { .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "ICIALLU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 0,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "ICIMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIALL", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 6,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "BPIMVA", .cp = 15, .opc1 = 0, .crn = 7, .crm = 5, .opc2 = 7,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 6, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCSW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 10, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCMVAU", .cp = 15, .opc1 = 0, .crn = 7, .crm = 11, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCIMVAC", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 1,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ { .name = "DCCISW", .cp = 15, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
+ .type = ARM_CP_NOP, .access = PL1_W },
+ /* MMU Domain access control / MPU write buffer control */
+ { .name = "DACR", .cp = 15, .opc1 = 0, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL1_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dacr_s),
+ offsetoflow32(CPUARMState, cp15.dacr_ns) } },
+ { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[1]) },
+ { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
+ /* We rely on the access checks not allowing the guest to write to the
+ * state field when SPSel indicates that it's being used as the stack
+ * pointer.
+ */
+ { .name = "SP_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = sp_el0_access,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[0]) },
+ { .name = "SP_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[1]) },
+ { .name = "SPSel", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW,
+ .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
+ { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]),
+ .access = PL2_RW, .accessfn = fpexc32_access },
+ { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
+ { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
+ { .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_IRQ]) },
+ { .name = "SPSR_ABT", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_ABT]) },
+ { .name = "SPSR_UND", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 2,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_UND]) },
+ { .name = "SPSR_FIQ", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 3,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_FIQ]) },
+ { .name = "MDCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 3, .opc2 = 1,
+ .resetvalue = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el3) },
+ { .name = "SDCR", .type = ARM_CP_ALIAS,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .writefn = sdcr_write,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
+ REGINFO_SENTINEL
+};
+
+/* Used to describe the behaviour of EL2 regs when EL2 does not exist. */
+static const ARMCPRegInfo el3_no_el2_cp_reginfo[] = {
+ { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL2_RW,
+ .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
+ { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_NO_RAW,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW,
+ .readfn = arm_cp_read_zero, .writefn = arm_cp_write_ignore },
+ { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "VTCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 6, .crm = 2,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .type = ARM_CP_CONST | ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HPFAR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t valid_mask = HCR_MASK;
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ valid_mask &= ~HCR_HCD;
+ } else {
+ valid_mask &= ~HCR_TSC;
+ }
+
+ /* Clear RES0 bits. */
+ value &= valid_mask;
+
+ /* These bits change the MMU setup:
+ * HCR_VM enables stage 2 translation
+ * HCR_PTW forbids certain page-table setups
+ * HCR_DC Disables stage1 and enables stage2 translation
+ */
+ if ((raw_read(env, ri) ^ value) & (HCR_VM | HCR_PTW | HCR_DC)) {
+ tlb_flush(CPU(cpu), 1);
+ }
+ raw_write(env, ri, value);
+}
+
+static const ARMCPRegInfo el2_cp_reginfo[] = {
+ { .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.hcr_el2),
+ .writefn = hcr_write },
+ { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[2]) },
+ { .name = "ESR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[2]) },
+ { .name = "FAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[2]) },
+ { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
+ { .name = "VBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .writefn = vbar_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[2]),
+ .resetvalue = 0 },
+ { .name = "SP_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, sp_el[2]) },
+ { .name = "CPTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL2_RW, .accessfn = cptr_access, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[2]) },
+ { .name = "MAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL2_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el[2]),
+ .resetvalue = 0 },
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 2, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_ALIAS,
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el[2]) },
+ { .name = "AMAIR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ /* HAMAIR1 is mapped to AMAIR_EL2[63:32] */
+ { .name = "HMAIR1", .state = ARM_CP_STATE_AA32,
+ .opc1 = 4, .crn = 10, .crm = 3, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL2_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[2]) },
+ { .name = "VTCR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .type = ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ { .name = "VTCR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 2,
+ .access = PL2_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * no .raw_writefn or .resetfn needed as we never use mask/base_mask
+ */
+ .fieldoffset = offsetof(CPUARMState, cp15.vtcr_el2) },
+ { .name = "VTTBR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 6, .crm = 2,
+ .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2),
+ .writefn = vttbr_write },
+ { .name = "VTTBR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 1, .opc2 = 0,
+ .access = PL2_RW, .writefn = vttbr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vttbr_el2) },
+ { .name = "SCTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .raw_writefn = raw_write, .writefn = sctlr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[2]) },
+ { .name = "TPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[2]) },
+ { .name = "TTBR0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
+ { .name = "HTTBR", .cp = 15, .opc1 = 4, .crm = 2,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
+ { .name = "TLBIALLNSNH",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_nsnh_write },
+ { .name = "TLBIALLNSNHIS",
+ .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 4,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_nsnh_is_write },
+ { .name = "TLBIALLH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_hyp_write },
+ { .name = "TLBIALLHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbiall_hyp_is_write },
+ { .name = "TLBIMVAH", .cp = 15, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_write },
+ { .name = "TLBIMVAHIS", .cp = 15, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbimva_hyp_is_write },
+ { .name = "TLBI_ALLE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 0,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbi_aa64_alle2_write },
+ { .name = "TLBI_VAE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_VALE2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2_write },
+ { .name = "TLBI_ALLE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle2is_write },
+ { .name = "TLBI_VAE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 1,
+ .type = ARM_CP_NO_RAW, .access = PL2_W,
+ .writefn = tlbi_aa64_vae2is_write },
+ { .name = "TLBI_VALE2IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL2_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae2is_write },
+#ifndef CONFIG_USER_ONLY
+ /* Unlike the other EL2-related AT operations, these must
+ * UNDEF from EL3 if EL2 is not implemented, which is why we
+ * define them here rather than with the rest of the AT ops.
+ */
+ { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W, .accessfn = at_s1e2_access,
+ .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ /* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
+ * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
+ * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
+ * to behave as if SCR.NS was 1.
+ */
+ { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
+ .access = PL2_W,
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ { .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
+ /* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
+ * reset values as IMPDEF. We choose to reset to 3 to comply with
+ * both ARMv7 and ARMv8.
+ */
+ .access = PL2_RW, .resetvalue = 3,
+ .fieldoffset = offsetof(CPUARMState, cp15.cnthctl_el2) },
+ { .name = "CNTVOFF_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 0, .opc2 = 3,
+ .access = PL2_RW, .type = ARM_CP_IO, .resetvalue = 0,
+ .writefn = gt_cntvoff_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
+ { .name = "CNTVOFF", .cp = 15, .opc1 = 4, .crm = 14,
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS | ARM_CP_IO,
+ .writefn = gt_cntvoff_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.cntvoff_el2) },
+ { .name = "CNTHP_CVAL_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 2,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
+ .type = ARM_CP_IO, .access = PL2_RW,
+ .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHP_CVAL", .cp = 15, .opc1 = 6, .crm = 14,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].cval),
+ .access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_IO,
+ .writefn = gt_hyp_cval_write, .raw_writefn = raw_write },
+ { .name = "CNTHP_TVAL_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_RAW | ARM_CP_IO, .access = PL2_RW,
+ .resetfn = gt_hyp_timer_reset,
+ .readfn = gt_hyp_tval_read, .writefn = gt_hyp_tval_write },
+ { .name = "CNTHP_CTL_EL2", .state = ARM_CP_STATE_BOTH,
+ .type = ARM_CP_IO,
+ .opc0 = 3, .opc1 = 4, .crn = 14, .crm = 2, .opc2 = 1,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_HYP].ctl),
+ .resetvalue = 0,
+ .writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
+#endif
+ /* The only field of MDCR_EL2 that has a defined architectural reset value
+ * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N; but we
+ * don't impelment any PMU event counters, so using zero as a reset
+ * value for MDCR_EL2 is okay
+ */
+ { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
+ { .name = "HPFAR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
+ { .name = "HPFAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.hpfar_el2) },
+ { .name = "HSTR_EL2", .state = ARM_CP_STATE_BOTH,
+ .cp = 15, .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 3,
+ .access = PL2_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.hstr_el2) },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult nsacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* The NSACR is RW at EL3, and RO for NS EL1 and NS EL2.
+ * At Secure EL1 it traps to EL3.
+ */
+ if (arm_current_el(env) == 3) {
+ return CP_ACCESS_OK;
+ }
+ if (arm_is_secure_below_el3(env)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ /* Accesses from EL1 NS and EL2 NS are UNDEF for write but allow reads. */
+ if (isread) {
+ return CP_ACCESS_OK;
+ }
+ return CP_ACCESS_TRAP_UNCATEGORIZED;
+}
+
+static const ARMCPRegInfo el3_cp_reginfo[] = {
+ { .name = "SCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3),
+ .resetvalue = 0, .writefn = scr_write },
+ { .name = "SCR", .type = ARM_CP_ALIAS,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3),
+ .writefn = scr_write },
+ { .name = "SDER32_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.sder) },
+ { .name = "SDER",
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.sder) },
+ { .name = "MVBAR", .cp = 15, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_trap_aa32s_el1,
+ .writefn = vbar_write, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.mvbar) },
+ { .name = "TTBR0_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .writefn = vmsa_ttbr_write, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[3]) },
+ { .name = "TCR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 0, .opc2 = 2,
+ .access = PL3_RW,
+ /* no .writefn needed as this can't cause an ASID change;
+ * we must provide a .raw_writefn and .resetfn because we handle
+ * reset and migration for the AArch32 TTBCR(S), which might be
+ * using mask and base_mask.
+ */
+ .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr_el[3]) },
+ { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, elr_el[3]) },
+ { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.esr_el[3]) },
+ { .name = "FAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 6, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[3]) },
+ { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_ALIAS,
+ .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
+ { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
+ .access = PL3_RW, .writefn = vbar_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vbar_el[3]),
+ .resetvalue = 0 },
+ { .name = "CPTR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL3_RW, .accessfn = cptr_access, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.cptr_el[3]) },
+ { .name = "TPIDR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 13, .crm = 0, .opc2 = 2,
+ .access = PL3_RW, .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.tpidr_el[3]) },
+ { .name = "AMAIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 10, .crm = 3, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR0_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 0,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "AFSR1_EL3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 1, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "TLBI_ALLE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3is_write },
+ { .name = "TLBI_VAE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_VALE3IS", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 3, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3is_write },
+ { .name = "TLBI_ALLE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 0,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_alle3_write },
+ { .name = "TLBI_VAE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 1,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
+ { .name = "TLBI_VALE3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 7, .opc2 = 5,
+ .access = PL3_W, .type = ARM_CP_NO_RAW,
+ .writefn = tlbi_aa64_vae3_write },
+ REGINFO_SENTINEL
+};
+
+static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ /* Only accessible in EL0 if SCTLR.UCT is set (and only in AArch64,
+ * but the AArch32 CTR has its own reginfo struct)
+ */
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) {
+ return CP_ACCESS_TRAP;
+ }
+ return CP_ACCESS_OK;
+}
+
+static void oslar_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Writes to OSLAR_EL1 may update the OS lock status, which can be
+ * read via a bit in OSLSR_EL1.
+ */
+ int oslock;
+
+ if (ri->state == ARM_CP_STATE_AA32) {
+ oslock = (value == 0xC5ACCE55);
+ } else {
+ oslock = value & 1;
+ }
+
+ env->cp15.oslsr_el1 = deposit32(env->cp15.oslsr_el1, 1, 1, oslock);
+}
+
+static const ARMCPRegInfo debug_cp_reginfo[] = {
+ /* DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped
+ * debug components. The AArch64 version of DBGDRAR is named MDRAR_EL1;
+ * unlike DBGDRAR it is never accessible from EL0.
+ * DBGDSAR is deprecated and must RAZ from v8 anyway, so it has no AArch64
+ * accessor.
+ */
+ { .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "MDRAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tdra,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* Monitor debug system control register; the 32-bit alias is DBGDSCRext. */
+ { .name = "MDSCR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1),
+ .resetvalue = 0 },
+ /* MDCCSR_EL0, aka DBGDSCRint. This is a read-only mirror of MDSCR_EL1.
+ * We don't implement the configurable EL0 access.
+ */
+ { .name = "MDCCSR_EL0", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .type = ARM_CP_ALIAS,
+ .access = PL1_R, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.mdscr_el1), },
+ { .name = "OSLAR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_NO_RAW,
+ .accessfn = access_tdosa,
+ .writefn = oslar_write },
+ { .name = "OSLSR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 4,
+ .access = PL1_R, .resetvalue = 10,
+ .accessfn = access_tdosa,
+ .fieldoffset = offsetof(CPUARMState, cp15.oslsr_el1) },
+ /* Dummy OSDLR_EL1: 32-bit Linux will read this */
+ { .name = "OSDLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 3, .opc2 = 4,
+ .access = PL1_RW, .accessfn = access_tdosa,
+ .type = ARM_CP_NOP },
+ /* Dummy DBGVCR: Linux wants to clear this on startup, but we don't
+ * implement vector catch debug events yet.
+ */
+ { .name = "DBGVCR",
+ .cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
+ /* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
+ * Channel but Linux may try to access this register. The 32-bit
+ * alias is DBGDCCINT.
+ */
+ { .name = "MDCCINT_EL1", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP },
+ REGINFO_SENTINEL
+};
+
+static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
+ /* 64 bit access versions of the (dummy) debug registers */
+ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ REGINFO_SENTINEL
+};
+
+void hw_watchpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ vaddr len = 0;
+ vaddr wvr = env->cp15.dbgwvr[n];
+ uint64_t wcr = env->cp15.dbgwcr[n];
+ int mask;
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+
+ if (env->cpu_watchpoint[n]) {
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[n]);
+ env->cpu_watchpoint[n] = NULL;
+ }
+
+ if (!extract64(wcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ switch (extract64(wcr, 3, 2)) {
+ case 0:
+ /* LSC 00 is reserved and must behave as if the wp is disabled */
+ return;
+ case 1:
+ flags |= BP_MEM_READ;
+ break;
+ case 2:
+ flags |= BP_MEM_WRITE;
+ break;
+ case 3:
+ flags |= BP_MEM_ACCESS;
+ break;
+ }
+
+ /* Attempts to use both MASK and BAS fields simultaneously are
+ * CONSTRAINED UNPREDICTABLE; we opt to ignore BAS in this case,
+ * thus generating a watchpoint for every byte in the masked region.
+ */
+ mask = extract64(wcr, 24, 4);
+ if (mask == 1 || mask == 2) {
+ /* Reserved values of MASK; we must act as if the mask value was
+ * some non-reserved value, or as if the watchpoint were disabled.
+ * We choose the latter.
+ */
+ return;
+ } else if (mask) {
+ /* Watchpoint covers an aligned area up to 2GB in size */
+ len = 1ULL << mask;
+ /* If masked bits in WVR are not zero it's CONSTRAINED UNPREDICTABLE
+ * whether the watchpoint fires when the unmasked bits match; we opt
+ * to generate the exceptions.
+ */
+ wvr &= ~(len - 1);
+ } else {
+ /* Watchpoint covers bytes defined by the byte address select bits */
+ int bas = extract64(wcr, 5, 8);
+ int basstart;
+
+ if (bas == 0) {
+ /* This must act as if the watchpoint is disabled */
+ return;
+ }
+
+ if (extract64(wvr, 2, 1)) {
+ /* Deprecated case of an only 4-aligned address. BAS[7:4] are
+ * ignored, and BAS[3:0] define which bytes to watch.
+ */
+ bas &= 0xf;
+ }
+ /* The BAS bits are supposed to be programmed to indicate a contiguous
+ * range of bytes. Otherwise it is CONSTRAINED UNPREDICTABLE whether
+ * we fire for each byte in the word/doubleword addressed by the WVR.
+ * We choose to ignore any non-zero bits after the first range of 1s.
+ */
+ basstart = ctz32(bas);
+ len = cto32(bas >> basstart);
+ wvr += basstart;
+ }
+
+ cpu_watchpoint_insert(CPU(cpu), wvr, len, flags,
+ &env->cpu_watchpoint[n]);
+}
+
+void hw_watchpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /* Completely clear out existing QEMU watchpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_watchpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_watchpoint, 0, sizeof(env->cpu_watchpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_watchpoint); i++) {
+ hw_watchpoint_update(cpu, i);
+ }
+}
+
+static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is, the
+ * register reads and behaves as if values written are sign extended.
+ * Bits [1:0] are RES0.
+ */
+ value = sextract64(value, 0, 49) & ~3ULL;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_watchpoint_update(cpu, i);
+}
+
+void hw_breakpoint_update(ARMCPU *cpu, int n)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bvr = env->cp15.dbgbvr[n];
+ uint64_t bcr = env->cp15.dbgbcr[n];
+ vaddr addr;
+ int bt;
+ int flags = BP_CPU;
+
+ if (env->cpu_breakpoint[n]) {
+ cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[n]);
+ env->cpu_breakpoint[n] = NULL;
+ }
+
+ if (!extract64(bcr, 0, 1)) {
+ /* E bit clear : watchpoint disabled */
+ return;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ switch (bt) {
+ case 4: /* unlinked address mismatch (reserved if AArch64) */
+ case 5: /* linked address mismatch (reserved if AArch64) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: address mismatch breakpoint types not implemented");
+ return;
+ case 0: /* unlinked address match */
+ case 1: /* linked address match */
+ {
+ /* Bits [63:49] are hardwired to the value of bit [48]; that is,
+ * we behave as if the register was sign extended. Bits [1:0] are
+ * RES0. The BAS field is used to allow setting breakpoints on 16
+ * bit wide instructions; it is CONSTRAINED UNPREDICTABLE whether
+ * a bp will fire if the addresses covered by the bp and the addresses
+ * covered by the insn overlap but the insn doesn't start at the
+ * start of the bp address range. We choose to require the insn and
+ * the bp to have the same address. The constraints on writing to
+ * BAS enforced in dbgbcr_write mean we have only four cases:
+ * 0b0000 => no breakpoint
+ * 0b0011 => breakpoint on addr
+ * 0b1100 => breakpoint on addr + 2
+ * 0b1111 => breakpoint on addr
+ * See also figure D2-3 in the v8 ARM ARM (DDI0487A.c).
+ */
+ int bas = extract64(bcr, 5, 4);
+ addr = sextract64(bvr, 0, 49) & ~3ULL;
+ if (bas == 0) {
+ return;
+ }
+ if (bas == 0xc) {
+ addr += 2;
+ }
+ break;
+ }
+ case 2: /* unlinked context ID match */
+ case 8: /* unlinked VMID match (reserved if no EL2) */
+ case 10: /* unlinked context ID and VMID match (reserved if no EL2) */
+ qemu_log_mask(LOG_UNIMP,
+ "arm: unlinked context breakpoint types not implemented");
+ return;
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ case 3: /* linked context ID match */
+ default:
+ /* We must generate no events for Linked context matches (unless
+ * they are linked to by some other bp/wp, which is handled in
+ * updates for the linking bp/wp). We choose to also generate no events
+ * for reserved values.
+ */
+ return;
+ }
+
+ cpu_breakpoint_insert(CPU(cpu), addr, flags, &env->cpu_breakpoint[n]);
+}
+
+void hw_breakpoint_update_all(ARMCPU *cpu)
+{
+ int i;
+ CPUARMState *env = &cpu->env;
+
+ /* Completely clear out existing QEMU breakpoints and our array, to
+ * avoid possible stale entries following migration load.
+ */
+ cpu_breakpoint_remove_all(CPU(cpu), BP_CPU);
+ memset(env->cpu_breakpoint, 0, sizeof(env->cpu_breakpoint));
+
+ for (i = 0; i < ARRAY_SIZE(cpu->env.cpu_breakpoint); i++) {
+ hw_breakpoint_update(cpu, i);
+ }
+}
+
+static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ raw_write(env, ri, value);
+ hw_breakpoint_update(cpu, i);
+}
+
+static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int i = ri->crm;
+
+ /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only
+ * copy of BAS[0].
+ */
+ value = deposit64(value, 6, 1, extract64(value, 5, 1));
+ value = deposit64(value, 8, 1, extract64(value, 7, 1));
+
+ raw_write(env, ri, value);
+ hw_breakpoint_update(cpu, i);
+}
+
+static void define_debug_regs(ARMCPU *cpu)
+{
+ /* Define v7 and v8 architectural debug registers.
+ * These are just dummy implementations for now.
+ */
+ int i;
+ int wrps, brps, ctx_cmps;
+ ARMCPRegInfo dbgdidr = {
+ .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL0_R, .accessfn = access_tda,
+ .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr,
+ };
+
+ /* Note that all these register fields hold "number of Xs minus 1". */
+ brps = extract32(cpu->dbgdidr, 24, 4);
+ wrps = extract32(cpu->dbgdidr, 28, 4);
+ ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
+
+ assert(ctx_cmps <= brps);
+
+ /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties
+ * of the debug registers such as number of breakpoints;
+ * check that if they both exist then they agree.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps);
+ assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps);
+ assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps);
+ }
+
+ define_one_arm_cp_reg(cpu, &dbgdidr);
+ define_arm_cp_regs(cpu, debug_cp_reginfo);
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
+ define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
+ }
+
+ for (i = 0; i < brps + 1; i++) {
+ ARMCPRegInfo dbgregs[] = {
+ { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
+ .writefn = dbgbvr_write, .raw_writefn = raw_write
+ },
+ { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
+ .writefn = dbgbcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, dbgregs);
+ }
+
+ for (i = 0; i < wrps + 1; i++) {
+ ARMCPRegInfo dbgregs[] = {
+ { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
+ .writefn = dbgwvr_write, .raw_writefn = raw_write
+ },
+ { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
+ .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
+ .access = PL1_RW, .accessfn = access_tda,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
+ .writefn = dbgwcr_write, .raw_writefn = raw_write
+ },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, dbgregs);
+ }
+}
+
+void register_cp_regs_for_features(ARMCPU *cpu)
+{
+ /* Register all the coprocessor registers based on feature bits */
+ CPUARMState *env = &cpu->env;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* M profile has no coprocessor registers */
+ return;
+ }
+
+ define_arm_cp_regs(cpu, cp_reginfo);
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ /* Must go early as it is full of wildcards that may be
+ * overridden by later definitions.
+ */
+ define_arm_cp_regs(cpu, not_v8_cp_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ /* The ID registers all have impdef reset values */
+ ARMCPRegInfo v6_idregs[] = {
+ { .name = "ID_PFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_pfr0 },
+ { .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_pfr1 },
+ { .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_dfr0 },
+ { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_afr0 },
+ { .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_mmfr0 },
+ { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_mmfr1 },
+ { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_mmfr2 },
+ { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_mmfr3 },
+ { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_isar0 },
+ { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_isar1 },
+ { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_isar2 },
+ { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_isar3 },
+ { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_isar4 },
+ { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_isar5 },
+ { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_mmfr4 },
+ /* 7 is as yet unallocated and must RAZ */
+ { .name = "ID_ISAR7_RESERVED", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, v6_idregs);
+ define_arm_cp_regs(cpu, v6_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, not_v6_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6K)) {
+ define_arm_cp_regs(cpu, v6k_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7MP) &&
+ !arm_feature(env, ARM_FEATURE_MPU)) {
+ define_arm_cp_regs(cpu, v7mp_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ /* v7 performance monitor control register: same implementor
+ * field as main ID register, and we implement only the cycle
+ * count register.
+ */
+#ifndef CONFIG_USER_ONLY
+ ARMCPRegInfo pmcr = {
+ .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
+ .access = PL0_RW,
+ .type = ARM_CP_IO | ARM_CP_ALIAS,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
+ .accessfn = pmreg_access, .writefn = pmcr_write,
+ .raw_writefn = raw_write,
+ };
+ ARMCPRegInfo pmcr64 = {
+ .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
+ .access = PL0_RW, .accessfn = pmreg_access,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
+ .resetvalue = cpu->midr & 0xff000000,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
+ };
+ define_one_arm_cp_reg(cpu, &pmcr);
+ define_one_arm_cp_reg(cpu, &pmcr64);
+#endif
+ ARMCPRegInfo clidr = {
+ .name = "CLIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
+ };
+ define_one_arm_cp_reg(cpu, &clidr);
+ define_arm_cp_regs(cpu, v7_cp_reginfo);
+ define_debug_regs(cpu);
+ } else {
+ define_arm_cp_regs(cpu, not_v7_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* AArch64 ID registers, which all have impdef reset values.
+ * Note that within the ID register ranges the unused slots
+ * must all RAZ, not UNDEF; future architecture versions may
+ * define new registers here.
+ */
+ ARMCPRegInfo v8_idregs[] = {
+ { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64pfr0 },
+ { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64pfr1},
+ { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64PFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ /* We mask out the PMUVer field, because we don't currently
+ * implement the PMU. Not advertising it prevents the guest
+ * from trying to use it and getting UNDEFs on registers we
+ * don't implement.
+ */
+ .resetvalue = cpu->id_aa64dfr0 & ~0xf00 },
+ { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64dfr1 },
+ { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64DFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64afr0 },
+ { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64afr1 },
+ { .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64AFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64isar0 },
+ { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64isar1 },
+ { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64ISAR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64mmfr0 },
+ { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64mmfr1 },
+ { .name = "ID_AA64MMFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ID_AA64MMFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->mvfr0 },
+ { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->mvfr1 },
+ { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->mvfr2 },
+ { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid0 },
+ { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid1 },
+ { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
+ .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmceid1 },
+ REGINFO_SENTINEL
+ };
+ /* RVBAR_EL1 is only implemented if EL1 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL1_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
+ define_arm_cp_regs(cpu, v8_idregs);
+ define_arm_cp_regs(cpu, v8_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ uint64_t vmpidr_def = mpidr_read_val(env);
+ ARMCPRegInfo vpidr_regs[] = {
+ { .name = "VPIDR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VPIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VMPIDR", .state = ARM_CP_STATE_AA32,
+ .cp = 15, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns,
+ .resetvalue = vmpidr_def,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
+ { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW,
+ .resetvalue = vmpidr_def,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vpidr_regs);
+ define_arm_cp_regs(cpu, el2_cp_reginfo);
+ /* RVBAR_EL2 is only implemented if EL2 is the highest EL */
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
+ ARMCPRegInfo rvbar = {
+ .name = "RVBAR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL2_R, .resetvalue = cpu->rvbar
+ };
+ define_one_arm_cp_reg(cpu, &rvbar);
+ }
+ } else {
+ /* If EL2 is missing but higher ELs are enabled, we need to
+ * register the no_el2 reginfos.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* When EL3 exists but not EL2, VPIDR and VMPIDR take the value
+ * of MIDR_EL1 and MPIDR_EL1.
+ */
+ ARMCPRegInfo vpidr_regs[] = {
+ { .name = "VPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_CONST, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.vpidr_el2) },
+ { .name = "VMPIDR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 0, .crm = 0, .opc2 = 5,
+ .access = PL2_RW, .accessfn = access_el3_aa32ns_aa64any,
+ .type = ARM_CP_NO_RAW,
+ .writefn = arm_cp_write_ignore, .readfn = mpidr_read },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, vpidr_regs);
+ define_arm_cp_regs(cpu, el3_no_el2_cp_reginfo);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ define_arm_cp_regs(cpu, el3_cp_reginfo);
+ ARMCPRegInfo el3_regs[] = {
+ { .name = "RVBAR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 1,
+ .type = ARM_CP_CONST, .access = PL3_R, .resetvalue = cpu->rvbar },
+ { .name = "SCTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL3_RW,
+ .raw_writefn = raw_write, .writefn = sctlr_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr_el[3]),
+ .resetvalue = cpu->reset_sctlr },
+ REGINFO_SENTINEL
+ };
+
+ define_arm_cp_regs(cpu, el3_regs);
+ }
+ /* The behaviour of NSACR is sufficiently various that we don't
+ * try to describe it in a single reginfo:
+ * if EL3 is 64 bit, then trap to EL3 from S EL1,
+ * reads as constant 0xc00 from NS EL1 and NS EL2
+ * if EL3 is 32 bit, then RW at EL3, RO at NS EL1 and NS EL2
+ * if v7 without EL3, register doesn't exist
+ * if v8 without EL3, reads as constant 0xc00 from NS EL1 and NS EL2
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR", .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL1_RW, .accessfn = nsacr_access,
+ .resetvalue = 0xc00
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ } else {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR",
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL3_RW | PL1_R,
+ .resetvalue = 0,
+ .fieldoffset = offsetof(CPUARMState, cp15.nsacr)
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ }
+ } else {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ ARMCPRegInfo nsacr = {
+ .name = "NSACR", .type = ARM_CP_CONST,
+ .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 2,
+ .access = PL1_R,
+ .resetvalue = 0xc00
+ };
+ define_one_arm_cp_reg(cpu, &nsacr);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ /* PMSAv6 not implemented */
+ assert(arm_feature(env, ARM_FEATURE_V7));
+ define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
+ define_arm_cp_regs(cpu, pmsav7_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
+ }
+ } else {
+ define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
+ define_arm_cp_regs(cpu, vmsa_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
+ define_arm_cp_regs(cpu, t2ee_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
+ define_arm_cp_regs(cpu, generic_timer_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_VAPA)) {
+ define_arm_cp_regs(cpu, vapa_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_CACHE_TEST_CLEAN)) {
+ define_arm_cp_regs(cpu, cache_test_clean_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_CACHE_DIRTY_REG)) {
+ define_arm_cp_regs(cpu, cache_dirty_status_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_CACHE_BLOCK_OPS)) {
+ define_arm_cp_regs(cpu, cache_block_ops_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
+ define_arm_cp_regs(cpu, omap_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
+ define_arm_cp_regs(cpu, strongarm_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ define_arm_cp_regs(cpu, xscale_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
+ define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ define_arm_cp_regs(cpu, lpae_cp_reginfo);
+ }
+ /* Slightly awkwardly, the OMAP and StrongARM cores need all of
+ * cp15 crn=0 to be writes-ignored, whereas for other cores they should
+ * be read-only (ie write causes UNDEF exception).
+ */
+ {
+ ARMCPRegInfo id_pre_v8_midr_cp_reginfo[] = {
+ /* Pre-v8 MIDR space.
+ * Note that the MIDR isn't a simple constant register because
+ * of the TI925 behaviour where writes to another register can
+ * cause the MIDR value to change.
+ *
+ * Unimplemented registers in the c15 0 0 0 space default to
+ * MIDR. Define MIDR first as this entire space, then CTR, TCMTR
+ * and friends override accordingly.
+ */
+ { .name = "MIDR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .resetvalue = cpu->midr,
+ .writefn = arm_cp_write_ignore, .raw_writefn = raw_write,
+ .readfn = midr_read,
+ .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
+ .type = ARM_CP_OVERRIDE },
+ /* crn = 0 op1 = 0 crm = 3..7 : currently unassigned; we RAZ. */
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 3, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 4, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 5, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 6, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "DUMMY",
+ .cp = 15, .crn = 0, .crm = 7, .opc1 = 0, .opc2 = CP_ANY,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
+ { .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_NO_RAW, .resetvalue = cpu->midr,
+ .fieldoffset = offsetof(CPUARMState, cp15.c0_cpuid),
+ .readfn = midr_read },
+ /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
+ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_R, .resetvalue = cpu->midr },
+ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
+ .access = PL1_R, .resetvalue = cpu->midr },
+ { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
+ REGINFO_SENTINEL
+ };
+ ARMCPRegInfo id_cp_reginfo[] = {
+ /* These are common to v8 and pre-v8 */
+ { .name = "CTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0,
+ .access = PL0_R, .accessfn = ctr_el0_access,
+ .type = ARM_CP_CONST, .resetvalue = cpu->ctr },
+ /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */
+ { .name = "TCMTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ /* TLBTR is specific to VMSA */
+ ARMCPRegInfo id_tlbtr_reginfo = {
+ .name = "TLBTR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
+ };
+ /* MPUIR is specific to PMSA V6+ */
+ ARMCPRegInfo id_mpuir_reginfo = {
+ .name = "MPUIR",
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->pmsav7_dregion << 8
+ };
+ ARMCPRegInfo crn0_wi_reginfo = {
+ .name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
+ .opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
+ .type = ARM_CP_NOP | ARM_CP_OVERRIDE
+ };
+ if (arm_feature(env, ARM_FEATURE_OMAPCP) ||
+ arm_feature(env, ARM_FEATURE_STRONGARM)) {
+ ARMCPRegInfo *r;
+ /* Register the blanket "writes ignored" value first to cover the
+ * whole space. Then update the specific ID registers to allow write
+ * access, so that they ignore writes rather than causing them to
+ * UNDEF.
+ */
+ define_one_arm_cp_reg(cpu, &crn0_wi_reginfo);
+ for (r = id_pre_v8_midr_cp_reginfo;
+ r->type != ARM_CP_SENTINEL; r++) {
+ r->access = PL1_RW;
+ }
+ for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
+ r->access = PL1_RW;
+ }
+ id_tlbtr_reginfo.access = PL1_RW;
+ id_tlbtr_reginfo.access = PL1_RW;
+ }
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
+ } else {
+ define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
+ }
+ define_arm_cp_regs(cpu, id_cp_reginfo);
+ if (!arm_feature(env, ARM_FEATURE_MPU)) {
+ define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ define_one_arm_cp_reg(cpu, &id_mpuir_reginfo);
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPIDR)) {
+ define_arm_cp_regs(cpu, mpidr_cp_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_AUXCR)) {
+ ARMCPRegInfo auxcr_reginfo[] = {
+ { .name = "ACTLR_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST,
+ .resetvalue = cpu->reset_auxcr },
+ { .name = "ACTLR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ { .name = "ACTLR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 0, .opc2 = 1,
+ .access = PL3_RW, .type = ARM_CP_CONST,
+ .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, auxcr_reginfo);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_CBAR)) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ /* 32 bit view is [31:18] 0...0 [43:32]. */
+ uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
+ | extract64(cpu->reset_cbar, 32, 12);
+ ARMCPRegInfo cbar_reginfo[] = {
+ { .name = "CBAR",
+ .type = ARM_CP_CONST,
+ .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cpu->reset_cbar },
+ { .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
+ .type = ARM_CP_CONST,
+ .opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cbar32 },
+ REGINFO_SENTINEL
+ };
+ /* We don't implement a r/w 64 bit CBAR currently */
+ assert(arm_feature(env, ARM_FEATURE_CBAR_RO));
+ define_arm_cp_regs(cpu, cbar_reginfo);
+ } else {
+ ARMCPRegInfo cbar = {
+ .name = "CBAR",
+ .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
+ .access = PL1_R|PL3_W, .resetvalue = cpu->reset_cbar,
+ .fieldoffset = offsetof(CPUARMState,
+ cp15.c15_config_base_address)
+ };
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ cbar.access = PL1_R;
+ cbar.fieldoffset = 0;
+ cbar.type = ARM_CP_CONST;
+ }
+ define_one_arm_cp_reg(cpu, &cbar);
+ }
+ }
+
+ /* Generic registers whose values depend on the implementation */
+ {
+ ARMCPRegInfo sctlr = {
+ .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
+ .access = PL1_RW,
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
+ offsetof(CPUARMState, cp15.sctlr_ns) },
+ .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
+ .raw_writefn = raw_write,
+ };
+ if (arm_feature(env, ARM_FEATURE_XSCALE)) {
+ /* Normally we would always end the TB on an SCTLR write, but Linux
+ * arch/arm/mach-pxa/sleep.S expects two instructions following
+ * an MMU enable to execute from cache. Imitate this behaviour.
+ */
+ sctlr.type |= ARM_CP_SUPPRESS_TB_END;
+ }
+ define_one_arm_cp_reg(cpu, &sctlr);
+ }
+}
+
+ARMCPU *cpu_arm_init(const char *cpu_model)
+{
+ return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
+}
+
+void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_feature(env, ARM_FEATURE_AARCH64)) {
+ gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg,
+ aarch64_fpu_gdb_set_reg,
+ 34, "aarch64-fpu.xml", 0);
+ } else if (arm_feature(env, ARM_FEATURE_NEON)) {
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 51, "arm-neon.xml", 0);
+ } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 35, "arm-vfp3.xml", 0);
+ } else if (arm_feature(env, ARM_FEATURE_VFP)) {
+ gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg,
+ 19, "arm-vfp.xml", 0);
+ }
+}
+
+/* Sort alphabetically by type name, except for "any". */
+static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any-" TYPE_ARM_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any-" TYPE_ARM_CPU) == 0) {
+ return -1;
+ } else {
+ return strcmp(name_a, name_b);
+ }
+}
+
+static void arm_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(oc);
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_ARM_CPU));
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ name);
+ g_free(name);
+}
+
+void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ARM_CPU, false);
+ list = g_slist_sort(list, arm_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, arm_cpu_list_entry, &s);
+ g_slist_free(list);
+#ifdef CONFIG_KVM
+ /* The 'host' CPU type is dynamically registered only if KVM is
+ * enabled, so we have to special-case it here:
+ */
+ (*cpu_fprintf)(f, " host (only available in KVM mode)\n");
+#endif
+}
+
+static void arm_cpu_add_definition(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+ const char *typename;
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_ARM_CPU));
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = *cpu_list;
+ *cpu_list = entry;
+}
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+
+ list = object_class_get_list(TYPE_ARM_CPU, false);
+ g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
+ g_slist_free(list);
+
+ return cpu_list;
+}
+
+static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
+ void *opaque, int state, int secstate,
+ int crm, int opc1, int opc2)
+{
+ /* Private utility function for define_one_arm_cp_reg_with_opaque():
+ * add a single reginfo struct to the hash table.
+ */
+ uint32_t *key = g_new(uint32_t, 1);
+ ARMCPRegInfo *r2 = g_memdup(r, sizeof(ARMCPRegInfo));
+ int is64 = (r->type & ARM_CP_64BIT) ? 1 : 0;
+ int ns = (secstate & ARM_CP_SECSTATE_NS) ? 1 : 0;
+
+ /* Reset the secure state to the specific incoming state. This is
+ * necessary as the register may have been defined with both states.
+ */
+ r2->secure = secstate;
+
+ if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
+ /* Register is banked (using both entries in array).
+ * Overwriting fieldoffset as the array is only used to define
+ * banked registers but later only fieldoffset is used.
+ */
+ r2->fieldoffset = r->bank_fieldoffsets[ns];
+ }
+
+ if (state == ARM_CP_STATE_AA32) {
+ if (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]) {
+ /* If the register is banked then we don't need to migrate or
+ * reset the 32-bit instance in certain cases:
+ *
+ * 1) If the register has both 32-bit and 64-bit instances then we
+ * can count on the 64-bit instance taking care of the
+ * non-secure bank.
+ * 2) If ARMv8 is enabled then we can count on a 64-bit version
+ * taking care of the secure bank. This requires that separate
+ * 32 and 64-bit definitions are provided.
+ */
+ if ((r->state == ARM_CP_STATE_BOTH && ns) ||
+ (arm_feature(&cpu->env, ARM_FEATURE_V8) && !ns)) {
+ r2->type |= ARM_CP_ALIAS;
+ }
+ } else if ((secstate != r->secure) && !ns) {
+ /* The register is not banked so we only want to allow migration of
+ * the non-secure instance.
+ */
+ r2->type |= ARM_CP_ALIAS;
+ }
+
+ if (r->state == ARM_CP_STATE_BOTH) {
+ /* We assume it is a cp15 register if the .cp field is left unset.
+ */
+ if (r2->cp == 0) {
+ r2->cp = 15;
+ }
+
+#ifdef HOST_WORDS_BIGENDIAN
+ if (r2->fieldoffset) {
+ r2->fieldoffset += sizeof(uint32_t);
+ }
+#endif
+ }
+ }
+ if (state == ARM_CP_STATE_AA64) {
+ /* To allow abbreviation of ARMCPRegInfo
+ * definitions, we treat cp == 0 as equivalent to
+ * the value for "standard guest-visible sysreg".
+ * STATE_BOTH definitions are also always "standard
+ * sysreg" in their AArch64 view (the .cp value may
+ * be non-zero for the benefit of the AArch32 view).
+ */
+ if (r->cp == 0 || r->state == ARM_CP_STATE_BOTH) {
+ r2->cp = CP_REG_ARM64_SYSREG_CP;
+ }
+ *key = ENCODE_AA64_CP_REG(r2->cp, r2->crn, crm,
+ r2->opc0, opc1, opc2);
+ } else {
+ *key = ENCODE_CP_REG(r2->cp, is64, ns, r2->crn, crm, opc1, opc2);
+ }
+ if (opaque) {
+ r2->opaque = opaque;
+ }
+ /* reginfo passed to helpers is correct for the actual access,
+ * and is never ARM_CP_STATE_BOTH:
+ */
+ r2->state = state;
+ /* Make sure reginfo passed to helpers for wildcarded regs
+ * has the correct crm/opc1/opc2 for this reg, not CP_ANY:
+ */
+ r2->crm = crm;
+ r2->opc1 = opc1;
+ r2->opc2 = opc2;
+ /* By convention, for wildcarded registers only the first
+ * entry is used for migration; the others are marked as
+ * ALIAS so we don't try to transfer the register
+ * multiple times. Special registers (ie NOP/WFI) are
+ * never migratable and not even raw-accessible.
+ */
+ if ((r->type & ARM_CP_SPECIAL)) {
+ r2->type |= ARM_CP_NO_RAW;
+ }
+ if (((r->crm == CP_ANY) && crm != 0) ||
+ ((r->opc1 == CP_ANY) && opc1 != 0) ||
+ ((r->opc2 == CP_ANY) && opc2 != 0)) {
+ r2->type |= ARM_CP_ALIAS;
+ }
+
+ /* Check that raw accesses are either forbidden or handled. Note that
+ * we can't assert this earlier because the setup of fieldoffset for
+ * banked registers has to be done first.
+ */
+ if (!(r2->type & ARM_CP_NO_RAW)) {
+ assert(!raw_accessors_invalid(r2));
+ }
+
+ /* Overriding of an existing definition must be explicitly
+ * requested.
+ */
+ if (!(r->type & ARM_CP_OVERRIDE)) {
+ ARMCPRegInfo *oldreg;
+ oldreg = g_hash_table_lookup(cpu->cp_regs, key);
+ if (oldreg && !(oldreg->type & ARM_CP_OVERRIDE)) {
+ fprintf(stderr, "Register redefined: cp=%d %d bit "
+ "crn=%d crm=%d opc1=%d opc2=%d, "
+ "was %s, now %s\n", r2->cp, 32 + 32 * is64,
+ r2->crn, r2->crm, r2->opc1, r2->opc2,
+ oldreg->name, r2->name);
+ g_assert_not_reached();
+ }
+ }
+ g_hash_table_insert(cpu->cp_regs, key, r2);
+}
+
+
+void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *r, void *opaque)
+{
+ /* Define implementations of coprocessor registers.
+ * We store these in a hashtable because typically
+ * there are less than 150 registers in a space which
+ * is 16*16*16*8*8 = 262144 in size.
+ * Wildcarding is supported for the crm, opc1 and opc2 fields.
+ * If a register is defined twice then the second definition is
+ * used, so this can be used to define some generic registers and
+ * then override them with implementation specific variations.
+ * At least one of the original and the second definition should
+ * include ARM_CP_OVERRIDE in its type bits -- this is just a guard
+ * against accidental use.
+ *
+ * The state field defines whether the register is to be
+ * visible in the AArch32 or AArch64 execution state. If the
+ * state is set to ARM_CP_STATE_BOTH then we synthesise a
+ * reginfo structure for the AArch32 view, which sees the lower
+ * 32 bits of the 64 bit register.
+ *
+ * Only registers visible in AArch64 may set r->opc0; opc0 cannot
+ * be wildcarded. AArch64 registers are always considered to be 64
+ * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
+ * the register, if any.
+ */
+ int crm, opc1, opc2, state;
+ int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
+ int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
+ int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
+ int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
+ int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
+ int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
+ /* 64 bit registers have only CRm and Opc1 fields */
+ assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
+ /* op0 only exists in the AArch64 encodings */
+ assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
+ /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
+ assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
+ /* The AArch64 pseudocode CheckSystemAccess() specifies that op1
+ * encodes a minimum access level for the register. We roll this
+ * runtime check into our general permission check code, so check
+ * here that the reginfo's specified permissions are strict enough
+ * to encompass the generic architectural permission check.
+ */
+ if (r->state != ARM_CP_STATE_AA32) {
+ int mask = 0;
+ switch (r->opc1) {
+ case 0: case 1: case 2:
+ /* min_EL EL1 */
+ mask = PL1_RW;
+ break;
+ case 3:
+ /* min_EL EL0 */
+ mask = PL0_RW;
+ break;
+ case 4:
+ /* min_EL EL2 */
+ mask = PL2_RW;
+ break;
+ case 5:
+ /* unallocated encoding, so not possible */
+ assert(false);
+ break;
+ case 6:
+ /* min_EL EL3 */
+ mask = PL3_RW;
+ break;
+ case 7:
+ /* min_EL EL1, secure mode only (we don't check the latter) */
+ mask = PL1_RW;
+ break;
+ default:
+ /* broken reginfo with out-of-range opc1 */
+ assert(false);
+ break;
+ }
+ /* assert our permissions are not too lax (stricter is fine) */
+ assert((r->access & ~mask) == 0);
+ }
+
+ /* Check that the register definition has enough info to handle
+ * reads and writes if they are permitted.
+ */
+ if (!(r->type & (ARM_CP_SPECIAL|ARM_CP_CONST))) {
+ if (r->access & PL3_R) {
+ assert((r->fieldoffset ||
+ (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
+ r->readfn);
+ }
+ if (r->access & PL3_W) {
+ assert((r->fieldoffset ||
+ (r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1])) ||
+ r->writefn);
+ }
+ }
+ /* Bad type field probably means missing sentinel at end of reg list */
+ assert(cptype_valid(r->type));
+ for (crm = crmmin; crm <= crmmax; crm++) {
+ for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
+ for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
+ for (state = ARM_CP_STATE_AA32;
+ state <= ARM_CP_STATE_AA64; state++) {
+ if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
+ continue;
+ }
+ if (state == ARM_CP_STATE_AA32) {
+ /* Under AArch32 CP registers can be common
+ * (same for secure and non-secure world) or banked.
+ */
+ switch (r->secure) {
+ case ARM_CP_SECSTATE_S:
+ case ARM_CP_SECSTATE_NS:
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ r->secure, crm, opc1, opc2);
+ break;
+ default:
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_S,
+ crm, opc1, opc2);
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2);
+ break;
+ }
+ } else {
+ /* AArch64 registers get mapped to non-secure instance
+ * of AArch32 */
+ add_cpreg_to_hashtable(cpu, r, opaque, state,
+ ARM_CP_SECSTATE_NS,
+ crm, opc1, opc2);
+ }
+ }
+ }
+ }
+ }
+}
+
+void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
+ const ARMCPRegInfo *regs, void *opaque)
+{
+ /* Define a whole list of registers */
+ const ARMCPRegInfo *r;
+ for (r = regs; r->type != ARM_CP_SENTINEL; r++) {
+ define_one_arm_cp_reg_with_opaque(cpu, r, opaque);
+ }
+}
+
+const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp)
+{
+ return g_hash_table_lookup(cpregs, &encoded_cp);
+}
+
+void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* Helper coprocessor write function for write-ignore registers */
+}
+
+uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ /* Helper coprocessor write function for read-as-zero registers */
+ return 0;
+}
+
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
+{
+ /* Helper coprocessor reset function for do-nothing-on-reset registers */
+}
+
+static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
+{
+ /* Return true if it is not valid for us to switch to
+ * this CPU mode (ie all the UNPREDICTABLE cases in
+ * the ARM ARM CPSRWriteByInstr pseudocode).
+ */
+
+ /* Changes to or from Hyp via MSR and CPS are illegal. */
+ if (write_type == CPSRWriteByInstr &&
+ ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_HYP ||
+ mode == ARM_CPU_MODE_HYP)) {
+ return 1;
+ }
+
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_SYS:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_FIQ:
+ /* Note that we don't implement the IMPDEF NSACR.RFR which in v7
+ * allows FIQ mode to be Secure-only. (In v8 this doesn't exist.)
+ */
+ /* If HCR.TGE is set then changes from Monitor to NS PL1 via MSR
+ * and CPS are treated as illegal mode changes.
+ */
+ if (write_type == CPSRWriteByInstr &&
+ (env->cp15.hcr_el2 & HCR_TGE) &&
+ (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
+ !arm_is_secure_below_el3(env)) {
+ return 1;
+ }
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return !arm_feature(env, ARM_FEATURE_EL2)
+ || arm_current_el(env) < 2 || arm_is_secure(env);
+ case ARM_CPU_MODE_MON:
+ return arm_current_el(env) < 3;
+ default:
+ return 1;
+ }
+}
+
+uint32_t cpsr_read(CPUARMState *env)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
+ (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+ | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
+ | ((env->condexec_bits & 0xfc) << 8)
+ | (env->GE << 16) | (env->daif & CPSR_AIF);
+}
+
+void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
+ CPSRWriteType write_type)
+{
+ uint32_t changed_daif;
+
+ if (mask & CPSR_NZCV) {
+ env->ZF = (~val) & CPSR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+ if (mask & CPSR_Q)
+ env->QF = ((val & CPSR_Q) != 0);
+ if (mask & CPSR_T)
+ env->thumb = ((val & CPSR_T) != 0);
+ if (mask & CPSR_IT_0_1) {
+ env->condexec_bits &= ~3;
+ env->condexec_bits |= (val >> 25) & 3;
+ }
+ if (mask & CPSR_IT_2_7) {
+ env->condexec_bits &= 3;
+ env->condexec_bits |= (val >> 8) & 0xfc;
+ }
+ if (mask & CPSR_GE) {
+ env->GE = (val >> 16) & 0xf;
+ }
+
+ /* In a V7 implementation that includes the security extensions but does
+ * not include Virtualization Extensions the SCR.FW and SCR.AW bits control
+ * whether non-secure software is allowed to change the CPSR_F and CPSR_A
+ * bits respectively.
+ *
+ * In a V8 implementation, it is permitted for privileged software to
+ * change the CPSR A/F bits regardless of the SCR.AW/FW bits.
+ */
+ if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
+ arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_feature(env, ARM_FEATURE_EL2) &&
+ !arm_is_secure(env)) {
+
+ changed_daif = (env->daif ^ val) & mask;
+
+ if (changed_daif & CPSR_A) {
+ /* Check to see if we are allowed to change the masking of async
+ * abort exceptions from a non-secure state.
+ */
+ if (!(env->cp15.scr_el3 & SCR_AW)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to switch CPSR_A flag from "
+ "non-secure world with SCR.AW bit clear\n");
+ mask &= ~CPSR_A;
+ }
+ }
+
+ if (changed_daif & CPSR_F) {
+ /* Check to see if we are allowed to change the masking of FIQ
+ * exceptions from a non-secure state.
+ */
+ if (!(env->cp15.scr_el3 & SCR_FW)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to switch CPSR_F flag from "
+ "non-secure world with SCR.FW bit clear\n");
+ mask &= ~CPSR_F;
+ }
+
+ /* Check whether non-maskable FIQ (NMFI) support is enabled.
+ * If this bit is set software is not allowed to mask
+ * FIQs, but is allowed to set CPSR_F to 0.
+ */
+ if ((A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_NMFI) &&
+ (val & CPSR_F)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Ignoring attempt to enable CPSR_F flag "
+ "(non-maskable FIQ [NMFI] support enabled)\n");
+ mask &= ~CPSR_F;
+ }
+ }
+ }
+
+ env->daif &= ~(CPSR_AIF & mask);
+ env->daif |= val & CPSR_AIF & mask;
+
+ if (write_type != CPSRWriteRaw &&
+ ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR) {
+ /* Note that we can only get here in USR mode if this is a
+ * gdb stub write; for this case we follow the architectural
+ * behaviour for guest writes in USR mode of ignoring an attempt
+ * to switch mode. (Those are caught by translate.c for writes
+ * triggered by guest instructions.)
+ */
+ mask &= ~CPSR_M;
+ } else if (bad_mode_switch(env, val & CPSR_M, write_type)) {
+ /* Attempt to switch to an invalid mode: this is UNPREDICTABLE in
+ * v7, and has defined behaviour in v8:
+ * + leave CPSR.M untouched
+ * + allow changes to the other CPSR fields
+ * + set PSTATE.IL
+ * For user changes via the GDB stub, we don't set PSTATE.IL,
+ * as this would be unnecessarily harsh for a user error.
+ */
+ mask &= ~CPSR_M;
+ if (write_type != CPSRWriteByGDBStub &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ mask |= CPSR_IL;
+ val |= CPSR_IL;
+ }
+ } else {
+ switch_mode(env, val & CPSR_M);
+ }
+ }
+ mask &= ~CACHED_CPSR_BITS;
+ env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
+}
+
+/* Sign/zero extend */
+uint32_t HELPER(sxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(int8_t)x;
+ res |= (uint32_t)(int8_t)(x >> 16) << 16;
+ return res;
+}
+
+uint32_t HELPER(uxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(uint8_t)x;
+ res |= (uint32_t)(uint8_t)(x >> 16) << 16;
+ return res;
+}
+
+uint32_t HELPER(clz)(uint32_t x)
+{
+ return clz32(x);
+}
+
+int32_t HELPER(sdiv)(int32_t num, int32_t den)
+{
+ if (den == 0)
+ return 0;
+ if (num == INT_MIN && den == -1)
+ return INT_MIN;
+ return num / den;
+}
+
+uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
+{
+ if (den == 0)
+ return 0;
+ return num / den;
+}
+
+uint32_t HELPER(rbit)(uint32_t x)
+{
+ return revbit32(x);
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+/* These should probably raise undefined insn exceptions. */
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
+}
+
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
+ return 0;
+}
+
+void switch_mode(CPUARMState *env, int mode)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (mode != ARM_CPU_MODE_USR) {
+ cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
+ }
+}
+
+uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure)
+{
+ return 1;
+}
+
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ g_assert_not_reached();
+}
+
+#else
+
+void switch_mode(CPUARMState *env, int mode)
+{
+ int old_mode;
+ int i;
+
+ old_mode = env->uncached_cpsr & CPSR_M;
+ if (mode == old_mode)
+ return;
+
+ if (old_mode == ARM_CPU_MODE_FIQ) {
+ memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
+ } else if (mode == ARM_CPU_MODE_FIQ) {
+ memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
+ }
+
+ i = bank_number(old_mode);
+ env->banked_r13[i] = env->regs[13];
+ env->banked_r14[i] = env->regs[14];
+ env->banked_spsr[i] = env->spsr;
+
+ i = bank_number(mode);
+ env->regs[13] = env->banked_r13[i];
+ env->regs[14] = env->banked_r14[i];
+ env->spsr = env->banked_spsr[i];
+}
+
+/* Physical Interrupt Target EL Lookup Table
+ *
+ * [ From ARM ARM section G1.13.4 (Table G1-15) ]
+ *
+ * The below multi-dimensional table is used for looking up the target
+ * exception level given numerous condition criteria. Specifically, the
+ * target EL is based on SCR and HCR routing controls as well as the
+ * currently executing EL and secure state.
+ *
+ * Dimensions:
+ * target_el_table[2][2][2][2][2][4]
+ * | | | | | +--- Current EL
+ * | | | | +------ Non-secure(0)/Secure(1)
+ * | | | +--------- HCR mask override
+ * | | +------------ SCR exec state control
+ * | +--------------- SCR mask override
+ * +------------------ 32-bit(0)/64-bit(1) EL3
+ *
+ * The table values are as such:
+ * 0-3 = EL0-EL3
+ * -1 = Cannot occur
+ *
+ * The ARM ARM target EL table includes entries indicating that an "exception
+ * is not taken". The two cases where this is applicable are:
+ * 1) An exception is taken from EL3 but the SCR does not have the exception
+ * routed to EL3.
+ * 2) An exception is taken from EL2 but the HCR does not have the exception
+ * routed to EL2.
+ * In these two cases, the below table contain a target of EL1. This value is
+ * returned as it is expected that the consumer of the table data will check
+ * for "target EL >= current EL" to ensure the exception is not taken.
+ *
+ * SCR HCR
+ * 64 EA AMO From
+ * BIT IRQ IMO Non-secure Secure
+ * EL3 FIQ RW FMO EL0 EL1 EL2 EL3 EL0 EL1 EL2 EL3
+ */
+static const int8_t target_el_table[2][2][2][2][2][4] = {
+ {{{{/* 0 0 0 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 0 0 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},
+ {{/* 0 0 1 0 */{ 1, 1, 2, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 0 1 1 */{ 2, 2, 2, -1 },{ 3, -1, -1, 3 },},},},
+ {{{/* 0 1 0 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 1 0 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},
+ {{/* 0 1 1 0 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},
+ {/* 0 1 1 1 */{ 3, 3, 3, -1 },{ 3, -1, -1, 3 },},},},},
+ {{{{/* 1 0 0 0 */{ 1, 1, 2, -1 },{ 1, 1, -1, 1 },},
+ {/* 1 0 0 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},
+ {{/* 1 0 1 0 */{ 1, 1, 1, -1 },{ 1, 1, -1, 1 },},
+ {/* 1 0 1 1 */{ 2, 2, 2, -1 },{ 1, 1, -1, 1 },},},},
+ {{{/* 1 1 0 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
+ {/* 1 1 0 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},
+ {{/* 1 1 1 0 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},
+ {/* 1 1 1 1 */{ 3, 3, 3, -1 },{ 3, 3, -1, 3 },},},},},
+};
+
+/*
+ * Determine the target EL for physical exceptions
+ */
+uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
+ uint32_t cur_el, bool secure)
+{
+ CPUARMState *env = cs->env_ptr;
+ int rw;
+ int scr;
+ int hcr;
+ int target_el;
+ /* Is the highest EL AArch64? */
+ int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
+ } else {
+ /* Either EL2 is the highest EL (and so the EL2 register width
+ * is given by is64); or there is no EL2 or EL3, in which case
+ * the value of 'rw' does not affect the table lookup anyway.
+ */
+ rw = is64;
+ }
+
+ switch (excp_idx) {
+ case EXCP_IRQ:
+ scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
+ hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
+ break;
+ case EXCP_FIQ:
+ scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
+ hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
+ break;
+ default:
+ scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
+ hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
+ break;
+ };
+
+ /* If HCR.TGE is set then HCR is treated as being 1 */
+ hcr |= ((env->cp15.hcr_el2 & HCR_TGE) == HCR_TGE);
+
+ /* Perform a table-lookup for the target EL given the current state */
+ target_el = target_el_table[is64][scr][rw][hcr][secure][cur_el];
+
+ assert(target_el > 0);
+
+ return target_el;
+}
+
+static void v7m_push(CPUARMState *env, uint32_t val)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ env->regs[13] -= 4;
+ stl_phys(cs->as, env->regs[13], val);
+}
+
+static uint32_t v7m_pop(CPUARMState *env)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+ uint32_t val;
+
+ val = ldl_phys(cs->as, env->regs[13]);
+ env->regs[13] += 4;
+ return val;
+}
+
+/* Switch to V7M main or process stack pointer. */
+static void switch_v7m_sp(CPUARMState *env, int process)
+{
+ uint32_t tmp;
+ if (env->v7m.current_sp != process) {
+ tmp = env->v7m.other_sp;
+ env->v7m.other_sp = env->regs[13];
+ env->regs[13] = tmp;
+ env->v7m.current_sp = process;
+ }
+}
+
+static void do_v7m_exception_exit(CPUARMState *env)
+{
+ uint32_t type;
+ uint32_t xpsr;
+
+ type = env->regs[15];
+ if (env->v7m.exception != 0)
+ armv7m_nvic_complete_irq(env->nvic, env->v7m.exception);
+
+ /* Switch to the target stack. */
+ switch_v7m_sp(env, (type & 4) != 0);
+ /* Pop registers. */
+ env->regs[0] = v7m_pop(env);
+ env->regs[1] = v7m_pop(env);
+ env->regs[2] = v7m_pop(env);
+ env->regs[3] = v7m_pop(env);
+ env->regs[12] = v7m_pop(env);
+ env->regs[14] = v7m_pop(env);
+ env->regs[15] = v7m_pop(env);
+ if (env->regs[15] & 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile return from interrupt with misaligned "
+ "PC is UNPREDICTABLE\n");
+ /* Actual hardware seems to ignore the lsbit, and there are several
+ * RTOSes out there which incorrectly assume the r15 in the stack
+ * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
+ */
+ env->regs[15] &= ~1U;
+ }
+ xpsr = v7m_pop(env);
+ xpsr_write(env, xpsr, 0xfffffdff);
+ /* Undo stack alignment. */
+ if (xpsr & 0x200)
+ env->regs[13] |= 4;
+ /* ??? The exception return type specifies Thread/Handler mode. However
+ this is also implied by the xPSR value. Not sure what to do
+ if there is a mismatch. */
+ /* ??? Likewise for mismatches between the CONTROL register and the stack
+ pointer. */
+}
+
+static void arm_log_exception(int idx)
+{
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *exc = NULL;
+
+ if (idx >= 0 && idx < ARRAY_SIZE(excnames)) {
+ exc = excnames[idx];
+ }
+ if (!exc) {
+ exc = "unknown";
+ }
+ qemu_log_mask(CPU_LOG_INT, "Taking exception %d [%s]\n", idx, exc);
+ }
+}
+
+void arm_v7m_cpu_do_interrupt(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t xpsr = xpsr_read(env);
+ uint32_t lr;
+ uint32_t addr;
+
+ arm_log_exception(cs->exception_index);
+
+ lr = 0xfffffff1;
+ if (env->v7m.current_sp)
+ lr |= 4;
+ if (env->v7m.exception == 0)
+ lr |= 8;
+
+ /* For exceptions we just mark as pending on the NVIC, and let that
+ handle it. */
+ /* TODO: Need to escalate if the current priority is higher than the
+ one we're raising. */
+ switch (cs->exception_index) {
+ case EXCP_UDEF:
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
+ return;
+ case EXCP_SWI:
+ /* The PC already points to the next instruction. */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
+ return;
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ /* TODO: if we implemented the MPU registers, this is where we
+ * should set the MMFAR, etc from exception.fsr and exception.vaddress.
+ */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ return;
+ case EXCP_BKPT:
+ if (semihosting_enabled()) {
+ int nr;
+ nr = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env)) & 0xff;
+ if (nr == 0xab) {
+ env->regs[15] += 2;
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%x\n",
+ env->regs[0]);
+ env->regs[0] = do_arm_semihosting(env);
+ return;
+ }
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
+ return;
+ case EXCP_IRQ:
+ env->v7m.exception = armv7m_nvic_acknowledge_irq(env->nvic);
+ break;
+ case EXCP_EXCEPTION_EXIT:
+ do_v7m_exception_exit(env);
+ return;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ return; /* Never happens. Keep compiler happy. */
+ }
+
+ /* Align stack pointer. */
+ /* ??? Should only do this if Configuration Control Register
+ STACKALIGN bit is set. */
+ if (env->regs[13] & 4) {
+ env->regs[13] -= 4;
+ xpsr |= 0x200;
+ }
+ /* Switch to the handler mode. */
+ v7m_push(env, xpsr);
+ v7m_push(env, env->regs[15]);
+ v7m_push(env, env->regs[14]);
+ v7m_push(env, env->regs[12]);
+ v7m_push(env, env->regs[3]);
+ v7m_push(env, env->regs[2]);
+ v7m_push(env, env->regs[1]);
+ v7m_push(env, env->regs[0]);
+ switch_v7m_sp(env, 0);
+ /* Clear IT bits */
+ env->condexec_bits = 0;
+ env->regs[14] = lr;
+ addr = ldl_phys(cs->as, env->v7m.vecbase + env->v7m.exception * 4);
+ env->regs[15] = addr & 0xfffffffe;
+ env->thumb = addr & 1;
+}
+
+/* Function used to synchronize QEMU's AArch64 register set with AArch32
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_32_to_64(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy R[0:7] to X[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+
+ /* Unless we are in FIQ mode, x8-x12 come from the user registers r8-r12.
+ * Otherwise, they come from the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->usr_regs[i - 8];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->xregs[i] = env->regs[i];
+ }
+ }
+
+ /* Registers x13-x23 are the various mode SP and FP registers. Registers
+ * r13 and r14 are only copied if we are in that mode, otherwise we copy
+ * from the mode banked register.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->xregs[13] = env->regs[13];
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[13] = env->banked_r13[bank_number(ARM_CPU_MODE_USR)];
+ /* HYP is an exception in that it is copied from r14 */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[14] = env->regs[14];
+ } else {
+ env->xregs[14] = env->banked_r14[bank_number(ARM_CPU_MODE_USR)];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->xregs[15] = env->regs[13];
+ } else {
+ env->xregs[15] = env->banked_r13[bank_number(ARM_CPU_MODE_HYP)];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->xregs[16] = env->regs[14];
+ env->xregs[17] = env->regs[13];
+ } else {
+ env->xregs[16] = env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)];
+ env->xregs[17] = env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->xregs[18] = env->regs[14];
+ env->xregs[19] = env->regs[13];
+ } else {
+ env->xregs[18] = env->banked_r14[bank_number(ARM_CPU_MODE_SVC)];
+ env->xregs[19] = env->banked_r13[bank_number(ARM_CPU_MODE_SVC)];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->xregs[20] = env->regs[14];
+ env->xregs[21] = env->regs[13];
+ } else {
+ env->xregs[20] = env->banked_r14[bank_number(ARM_CPU_MODE_ABT)];
+ env->xregs[21] = env->banked_r13[bank_number(ARM_CPU_MODE_ABT)];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->xregs[22] = env->regs[14];
+ env->xregs[23] = env->regs[13];
+ } else {
+ env->xregs[22] = env->banked_r14[bank_number(ARM_CPU_MODE_UND)];
+ env->xregs[23] = env->banked_r13[bank_number(ARM_CPU_MODE_UND)];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy from r8-r14. Otherwise, we copy from the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->xregs[i] = env->regs[i - 16]; /* X[24:30] <- R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->xregs[i] = env->fiq_regs[i - 24];
+ }
+ env->xregs[29] = env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)];
+ env->xregs[30] = env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)];
+ }
+
+ env->pc = env->regs[15];
+}
+
+/* Function used to synchronize QEMU's AArch32 register set with AArch64
+ * register set. This is necessary when switching between AArch32 and AArch64
+ * execution state.
+ */
+void aarch64_sync_64_to_32(CPUARMState *env)
+{
+ int i;
+ uint32_t mode = env->uncached_cpsr & CPSR_M;
+
+ /* We can blanket copy X[0:7] to R[0:7] */
+ for (i = 0; i < 8; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+
+ /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
+ * Otherwise, we copy x8-x12 into the banked user regs.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 8; i < 13; i++) {
+ env->usr_regs[i - 8] = env->xregs[i];
+ }
+ } else {
+ for (i = 8; i < 13; i++) {
+ env->regs[i] = env->xregs[i];
+ }
+ }
+
+ /* Registers r13 & r14 depend on the current mode.
+ * If we are in a given mode, we copy the corresponding x registers to r13
+ * and r14. Otherwise, we copy the x register to the banked r13 and r14
+ * for the mode.
+ */
+ if (mode == ARM_CPU_MODE_USR || mode == ARM_CPU_MODE_SYS) {
+ env->regs[13] = env->xregs[13];
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_USR)] = env->xregs[13];
+
+ /* HYP is an exception in that it does not have its own banked r14 but
+ * shares the USR r14
+ */
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[14] = env->xregs[14];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_USR)] = env->xregs[14];
+ }
+ }
+
+ if (mode == ARM_CPU_MODE_HYP) {
+ env->regs[13] = env->xregs[15];
+ } else {
+ env->banked_r13[bank_number(ARM_CPU_MODE_HYP)] = env->xregs[15];
+ }
+
+ if (mode == ARM_CPU_MODE_IRQ) {
+ env->regs[14] = env->xregs[16];
+ env->regs[13] = env->xregs[17];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[16];
+ env->banked_r13[bank_number(ARM_CPU_MODE_IRQ)] = env->xregs[17];
+ }
+
+ if (mode == ARM_CPU_MODE_SVC) {
+ env->regs[14] = env->xregs[18];
+ env->regs[13] = env->xregs[19];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[18];
+ env->banked_r13[bank_number(ARM_CPU_MODE_SVC)] = env->xregs[19];
+ }
+
+ if (mode == ARM_CPU_MODE_ABT) {
+ env->regs[14] = env->xregs[20];
+ env->regs[13] = env->xregs[21];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[20];
+ env->banked_r13[bank_number(ARM_CPU_MODE_ABT)] = env->xregs[21];
+ }
+
+ if (mode == ARM_CPU_MODE_UND) {
+ env->regs[14] = env->xregs[22];
+ env->regs[13] = env->xregs[23];
+ } else {
+ env->banked_r14[bank_number(ARM_CPU_MODE_UND)] = env->xregs[22];
+ env->banked_r13[bank_number(ARM_CPU_MODE_UND)] = env->xregs[23];
+ }
+
+ /* Registers x24-x30 are mapped to r8-r14 in FIQ mode. If we are in FIQ
+ * mode, then we can copy to r8-r14. Otherwise, we copy to the
+ * FIQ bank for r8-r14.
+ */
+ if (mode == ARM_CPU_MODE_FIQ) {
+ for (i = 24; i < 31; i++) {
+ env->regs[i - 16] = env->xregs[i]; /* X[24:30] -> R[8:14] */
+ }
+ } else {
+ for (i = 24; i < 29; i++) {
+ env->fiq_regs[i - 24] = env->xregs[i];
+ }
+ env->banked_r13[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[29];
+ env->banked_r14[bank_number(ARM_CPU_MODE_FIQ)] = env->xregs[30];
+ }
+
+ env->regs[15] = env->pc;
+}
+
+static void arm_cpu_do_interrupt_aarch32(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t addr;
+ uint32_t mask;
+ int new_mode;
+ uint32_t offset;
+ uint32_t moe;
+
+ /* If this is a debug exception we must update the DBGDSCR.MOE bits */
+ switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) {
+ case EC_BREAKPOINT:
+ case EC_BREAKPOINT_SAME_EL:
+ moe = 1;
+ break;
+ case EC_WATCHPOINT:
+ case EC_WATCHPOINT_SAME_EL:
+ moe = 10;
+ break;
+ case EC_AA32_BKPT:
+ moe = 3;
+ break;
+ case EC_VECTORCATCH:
+ moe = 5;
+ break;
+ default:
+ moe = 0;
+ break;
+ }
+
+ if (moe) {
+ env->cp15.mdscr_el1 = deposit64(env->cp15.mdscr_el1, 2, 4, moe);
+ }
+
+ /* TODO: Vectored interrupt controller. */
+ switch (cs->exception_index) {
+ case EXCP_UDEF:
+ new_mode = ARM_CPU_MODE_UND;
+ addr = 0x04;
+ mask = CPSR_I;
+ if (env->thumb)
+ offset = 2;
+ else
+ offset = 4;
+ break;
+ case EXCP_SWI:
+ new_mode = ARM_CPU_MODE_SVC;
+ addr = 0x08;
+ mask = CPSR_I;
+ /* The PC already points to the next instruction. */
+ offset = 0;
+ break;
+ case EXCP_BKPT:
+ env->exception.fsr = 2;
+ /* Fall through to prefetch abort. */
+ case EXCP_PREFETCH_ABORT:
+ A32_BANKED_CURRENT_REG_SET(env, ifsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, ifar, env->exception.vaddress);
+ qemu_log_mask(CPU_LOG_INT, "...with IFSR 0x%x IFAR 0x%x\n",
+ env->exception.fsr, (uint32_t)env->exception.vaddress);
+ new_mode = ARM_CPU_MODE_ABT;
+ addr = 0x0c;
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ break;
+ case EXCP_DATA_ABORT:
+ A32_BANKED_CURRENT_REG_SET(env, dfsr, env->exception.fsr);
+ A32_BANKED_CURRENT_REG_SET(env, dfar, env->exception.vaddress);
+ qemu_log_mask(CPU_LOG_INT, "...with DFSR 0x%x DFAR 0x%x\n",
+ env->exception.fsr,
+ (uint32_t)env->exception.vaddress);
+ new_mode = ARM_CPU_MODE_ABT;
+ addr = 0x10;
+ mask = CPSR_A | CPSR_I;
+ offset = 8;
+ break;
+ case EXCP_IRQ:
+ new_mode = ARM_CPU_MODE_IRQ;
+ addr = 0x18;
+ /* Disable IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I;
+ offset = 4;
+ if (env->cp15.scr_el3 & SCR_IRQ) {
+ /* IRQ routed to monitor mode */
+ new_mode = ARM_CPU_MODE_MON;
+ mask |= CPSR_F;
+ }
+ break;
+ case EXCP_FIQ:
+ new_mode = ARM_CPU_MODE_FIQ;
+ addr = 0x1c;
+ /* Disable FIQ, IRQ and imprecise data aborts. */
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ if (env->cp15.scr_el3 & SCR_FIQ) {
+ /* FIQ routed to monitor mode */
+ new_mode = ARM_CPU_MODE_MON;
+ }
+ offset = 4;
+ break;
+ case EXCP_SMC:
+ new_mode = ARM_CPU_MODE_MON;
+ addr = 0x08;
+ mask = CPSR_A | CPSR_I | CPSR_F;
+ offset = 0;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ return; /* Never happens. Keep compiler happy. */
+ }
+
+ if (new_mode == ARM_CPU_MODE_MON) {
+ addr += env->cp15.mvbar;
+ } else if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
+ /* High vectors. When enabled, base address cannot be remapped. */
+ addr += 0xffff0000;
+ } else {
+ /* ARM v7 architectures provide a vector base address register to remap
+ * the interrupt vector table.
+ * This register is only followed in non-monitor mode, and is banked.
+ * Note: only bits 31:5 are valid.
+ */
+ addr += A32_BANKED_CURRENT_REG_GET(env, vbar);
+ }
+
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
+ env->cp15.scr_el3 &= ~SCR_NS;
+ }
+
+ switch_mode (env, new_mode);
+ /* For exceptions taken to AArch32 we must clear the SS bit in both
+ * PSTATE and in the old-state value we save to SPSR_<mode>, so zero it now.
+ */
+ env->uncached_cpsr &= ~PSTATE_SS;
+ env->spsr = cpsr_read(env);
+ /* Clear IT bits. */
+ env->condexec_bits = 0;
+ /* Switch to the new mode, and to the correct instruction set. */
+ env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
+ /* Set new mode endianness */
+ env->uncached_cpsr &= ~CPSR_E;
+ if (env->cp15.sctlr_el[arm_current_el(env)] & SCTLR_EE) {
+ env->uncached_cpsr |= CPSR_E;
+ }
+ env->daif |= mask;
+ /* this is a lie, as the was no c1_sys on V4T/V5, but who cares
+ * and we should just guard the thumb mode on V4 */
+ if (arm_feature(env, ARM_FEATURE_V4T)) {
+ env->thumb = (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_TE) != 0;
+ }
+ env->regs[14] = env->regs[15] + offset;
+ env->regs[15] = addr;
+}
+
+/* Handle exception entry to a target EL which is using AArch64 */
+static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int new_el = env->exception.target_el;
+ target_ulong addr = env->cp15.vbar_el[new_el];
+ unsigned int new_mode = aarch64_pstate_mode(new_el, true);
+
+ if (arm_current_el(env) < new_el) {
+ /* Entry vector offset depends on whether the implemented EL
+ * immediately lower than the target level is using AArch32 or AArch64
+ */
+ bool is_aa64;
+
+ switch (new_el) {
+ case 3:
+ is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0;
+ break;
+ case 2:
+ is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0;
+ break;
+ case 1:
+ is_aa64 = is_a64(env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_aa64) {
+ addr += 0x400;
+ } else {
+ addr += 0x600;
+ }
+ } else if (pstate_read(env) & PSTATE_SP) {
+ addr += 0x200;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ case EXCP_DATA_ABORT:
+ env->cp15.far_el[new_el] = env->exception.vaddress;
+ qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n",
+ env->cp15.far_el[new_el]);
+ /* fall through */
+ case EXCP_BKPT:
+ case EXCP_UDEF:
+ case EXCP_SWI:
+ case EXCP_HVC:
+ case EXCP_HYP_TRAP:
+ case EXCP_SMC:
+ env->cp15.esr_el[new_el] = env->exception.syndrome;
+ break;
+ case EXCP_IRQ:
+ case EXCP_VIRQ:
+ addr += 0x80;
+ break;
+ case EXCP_FIQ:
+ case EXCP_VFIQ:
+ addr += 0x100;
+ break;
+ case EXCP_SEMIHOST:
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%" PRIx64 "\n",
+ env->xregs[0]);
+ env->xregs[0] = do_arm_semihosting(env);
+ return;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ }
+
+ if (is_a64(env)) {
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env);
+ aarch64_save_sp(env, arm_current_el(env));
+ env->elr_el[new_el] = env->pc;
+ } else {
+ env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env);
+ env->elr_el[new_el] = env->regs[15];
+
+ aarch64_sync_32_to_64(env);
+
+ env->condexec_bits = 0;
+ }
+ qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
+ env->elr_el[new_el]);
+
+ pstate_write(env, PSTATE_DAIF | new_mode);
+ env->aarch64 = 1;
+ aarch64_restore_sp(env, new_el);
+
+ env->pc = addr;
+
+ qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
+ new_el, env->pc, pstate_read(env));
+}
+
+static inline bool check_for_semihosting(CPUState *cs)
+{
+ /* Check whether this exception is a semihosting call; if so
+ * then handle it and return true; otherwise return false.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (is_a64(env)) {
+ if (cs->exception_index == EXCP_SEMIHOST) {
+ /* This is always the 64-bit semihosting exception.
+ * The "is this usermode" and "is semihosting enabled"
+ * checks have been done at translate time.
+ */
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%" PRIx64 "\n",
+ env->xregs[0]);
+ env->xregs[0] = do_arm_semihosting(env);
+ return true;
+ }
+ return false;
+ } else {
+ uint32_t imm;
+
+ /* Only intercept calls from privileged modes, to provide some
+ * semblance of security.
+ */
+ if (cs->exception_index != EXCP_SEMIHOST &&
+ (!semihosting_enabled() ||
+ ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
+ return false;
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_SEMIHOST:
+ /* This is always a semihosting call; the "is this usermode"
+ * and "is semihosting enabled" checks have been done at
+ * translate time.
+ */
+ break;
+ case EXCP_SWI:
+ /* Check for semihosting interrupt. */
+ if (env->thumb) {
+ imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
+ & 0xff;
+ if (imm == 0xab) {
+ break;
+ }
+ } else {
+ imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
+ & 0xffffff;
+ if (imm == 0x123456) {
+ break;
+ }
+ }
+ return false;
+ case EXCP_BKPT:
+ /* See if this is a semihosting syscall. */
+ if (env->thumb) {
+ imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
+ & 0xff;
+ if (imm == 0xab) {
+ env->regs[15] += 2;
+ break;
+ }
+ }
+ return false;
+ default:
+ return false;
+ }
+
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%x\n",
+ env->regs[0]);
+ env->regs[0] = do_arm_semihosting(env);
+ return true;
+ }
+}
+
+/* Handle a CPU exception for A and R profile CPUs.
+ * Do any appropriate logging, handle PSCI calls, and then hand off
+ * to the AArch64-entry or AArch32-entry function depending on the
+ * target exception level's register width.
+ */
+void arm_cpu_do_interrupt(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ unsigned int new_el = env->exception.target_el;
+
+ assert(!IS_M(env));
+
+ arm_log_exception(cs->exception_index);
+ qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env),
+ new_el);
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && !excp_is_internal(cs->exception_index)) {
+ qemu_log_mask(CPU_LOG_INT, "...with ESR %x/0x%" PRIx32 "\n",
+ env->exception.syndrome >> ARM_EL_EC_SHIFT,
+ env->exception.syndrome);
+ }
+
+ if (arm_is_psci_call(cpu, cs->exception_index)) {
+ arm_handle_psci_call(cpu);
+ qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n");
+ return;
+ }
+
+ /* Semihosting semantics depend on the register width of the
+ * code that caused the exception, not the target exception level,
+ * so must be handled here.
+ */
+ if (check_for_semihosting(cs)) {
+ return;
+ }
+
+ assert(!excp_is_internal(cs->exception_index));
+ if (arm_el_is_aa64(env, new_el)) {
+ arm_cpu_do_interrupt_aarch64(cs);
+ } else {
+ arm_cpu_do_interrupt_aarch32(cs);
+ }
+
+ arm_call_el_change_hook(cpu);
+
+ if (!kvm_enabled()) {
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+}
+
+/* Return the exception level which controls this address translation regime */
+static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S2NS:
+ case ARMMMUIdx_S1E2:
+ return 2;
+ case ARMMMUIdx_S1E3:
+ return 3;
+ case ARMMMUIdx_S1SE0:
+ return arm_el_is_aa64(env, 3) ? 1 : 3;
+ case ARMMMUIdx_S1SE1:
+ case ARMMMUIdx_S1NSE0:
+ case ARMMMUIdx_S1NSE1:
+ return 1;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Return true if this address translation regime is secure */
+static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ case ARMMMUIdx_S1NSE0:
+ case ARMMMUIdx_S1NSE1:
+ case ARMMMUIdx_S1E2:
+ case ARMMMUIdx_S2NS:
+ return false;
+ case ARMMMUIdx_S1E3:
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1SE1:
+ return true;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Return the SCTLR value which controls this address translation regime */
+static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ return env->cp15.sctlr_el[regime_el(env, mmu_idx)];
+}
+
+/* Return true if the specified stage of address translation is disabled */
+static inline bool regime_translation_disabled(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ return (env->cp15.hcr_el2 & HCR_VM) == 0;
+ }
+ return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
+}
+
+static inline bool regime_translation_big_endian(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0;
+}
+
+/* Return the TCR controlling this translation regime */
+static inline TCR *regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ return &env->cp15.vtcr_el2;
+ }
+ return &env->cp15.tcr_el[regime_el(env, mmu_idx)];
+}
+
+/* Returns TBI0 value for current regime el */
+uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ TCR *tcr;
+ uint32_t el;
+
+ /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
+
+ tcr = regime_tcr(env, mmu_idx);
+ el = regime_el(env, mmu_idx);
+
+ if (el > 1) {
+ return extract64(tcr->raw_tcr, 20, 1);
+ } else {
+ return extract64(tcr->raw_tcr, 37, 1);
+ }
+}
+
+/* Returns TBI1 value for current regime el */
+uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ TCR *tcr;
+ uint32_t el;
+
+ /* For EL0 and EL1, TBI is controlled by stage 1's TCR, so convert
+ * a stage 1+2 mmu index into the appropriate stage 1 mmu index.
+ */
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
+
+ tcr = regime_tcr(env, mmu_idx);
+ el = regime_el(env, mmu_idx);
+
+ if (el > 1) {
+ return 0;
+ } else {
+ return extract64(tcr->raw_tcr, 38, 1);
+ }
+}
+
+/* Return the TTBR associated with this translation regime */
+static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ttbrn)
+{
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ return env->cp15.vttbr_el2;
+ }
+ if (ttbrn == 0) {
+ return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
+ } else {
+ return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
+ }
+}
+
+/* Return true if the translation regime is using LPAE format page tables */
+static inline bool regime_using_lpae_format(CPUARMState *env,
+ ARMMMUIdx mmu_idx)
+{
+ int el = regime_el(env, mmu_idx);
+ if (el == 2 || arm_el_is_aa64(env, el)) {
+ return true;
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)
+ && (regime_tcr(env, mmu_idx)->raw_tcr & TTBCR_EAE)) {
+ return true;
+ }
+ return false;
+}
+
+/* Returns true if the stage 1 translation regime is using LPAE format page
+ * tables. Used when raising alignment exceptions, whose FSR changes depending
+ * on whether the long or short descriptor format is in use. */
+bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
+
+ return regime_using_lpae_format(env, mmu_idx);
+}
+
+static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
+{
+ switch (mmu_idx) {
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1NSE0:
+ return true;
+ default:
+ return false;
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @ap: The 3-bit access permissions (AP[2:0])
+ * @domain_prot: The 2-bit domain access permissions
+ */
+static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int ap, int domain_prot)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ if (domain_prot == 3) {
+ return PAGE_READ | PAGE_WRITE;
+ }
+
+ switch (ap) {
+ case 0:
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ return 0;
+ }
+ switch (regime_sctlr(env, mmu_idx) & (SCTLR_S | SCTLR_R)) {
+ case SCTLR_S:
+ return is_user ? 0 : PAGE_READ;
+ case SCTLR_R:
+ return PAGE_READ;
+ default:
+ return 0;
+ }
+ case 1:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 2:
+ if (is_user) {
+ return PAGE_READ;
+ } else {
+ return PAGE_READ | PAGE_WRITE;
+ }
+ case 3:
+ return PAGE_READ | PAGE_WRITE;
+ case 4: /* Reserved. */
+ return 0;
+ case 5:
+ return is_user ? 0 : PAGE_READ;
+ case 6:
+ return PAGE_READ;
+ case 7:
+ if (!arm_feature(env, ARM_FEATURE_V6K)) {
+ return 0;
+ }
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Translate section/page access permissions to page
+ * R/W protection flags.
+ *
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @is_user: TRUE if accessing from PL0
+ */
+static inline int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
+{
+ switch (ap) {
+ case 0:
+ return is_user ? 0 : PAGE_READ | PAGE_WRITE;
+ case 1:
+ return PAGE_READ | PAGE_WRITE;
+ case 2:
+ return is_user ? 0 : PAGE_READ;
+ case 3:
+ return PAGE_READ;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int
+simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
+{
+ return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
+}
+
+/* Translate S2 section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @s2ap: The 2-bit stage2 access permissions (S2AP)
+ * @xn: XN (execute-never) bit
+ */
+static int get_S2prot(CPUARMState *env, int s2ap, int xn)
+{
+ int prot = 0;
+
+ if (s2ap & 1) {
+ prot |= PAGE_READ;
+ }
+ if (s2ap & 2) {
+ prot |= PAGE_WRITE;
+ }
+ if (!xn) {
+ if (arm_el_is_aa64(env, 2) || prot & PAGE_READ) {
+ prot |= PAGE_EXEC;
+ }
+ }
+ return prot;
+}
+
+/* Translate section/page access permissions to protection flags
+ *
+ * @env: CPUARMState
+ * @mmu_idx: MMU index indicating required translation regime
+ * @is_aa64: TRUE if AArch64
+ * @ap: The 2-bit simple AP (AP[2:1])
+ * @ns: NS (non-secure) bit
+ * @xn: XN (execute-never) bit
+ * @pxn: PXN (privileged execute-never) bit
+ */
+static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
+ int ap, int ns, int xn, int pxn)
+{
+ bool is_user = regime_is_user(env, mmu_idx);
+ int prot_rw, user_rw;
+ bool have_wxn;
+ int wxn = 0;
+
+ assert(mmu_idx != ARMMMUIdx_S2NS);
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ if (is_user) {
+ prot_rw = user_rw;
+ } else {
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ }
+
+ if (ns && arm_is_secure(env) && (env->cp15.scr_el3 & SCR_SIF)) {
+ return prot_rw;
+ }
+
+ /* TODO have_wxn should be replaced with
+ * ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
+ * when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
+ * compatible processors have EL2, which is required for [U]WXN.
+ */
+ have_wxn = arm_feature(env, ARM_FEATURE_LPAE);
+
+ if (have_wxn) {
+ wxn = regime_sctlr(env, mmu_idx) & SCTLR_WXN;
+ }
+
+ if (is_aa64) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ if (!is_user) {
+ xn = pxn || (user_rw & PAGE_WRITE);
+ }
+ break;
+ case 2:
+ case 3:
+ break;
+ }
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ switch (regime_el(env, mmu_idx)) {
+ case 1:
+ case 3:
+ if (is_user) {
+ xn = xn || !(user_rw & PAGE_READ);
+ } else {
+ int uwxn = 0;
+ if (have_wxn) {
+ uwxn = regime_sctlr(env, mmu_idx) & SCTLR_UWXN;
+ }
+ xn = xn || !(prot_rw & PAGE_READ) || pxn ||
+ (uwxn && (user_rw & PAGE_WRITE));
+ }
+ break;
+ case 2:
+ break;
+ }
+ } else {
+ xn = wxn = 0;
+ }
+
+ if (xn || (wxn && (prot_rw & PAGE_WRITE))) {
+ return prot_rw;
+ }
+ return prot_rw | PAGE_EXEC;
+}
+
+static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
+ uint32_t *table, uint32_t address)
+{
+ /* Note that we can only get here for an AArch32 PL0/PL1 lookup */
+ TCR *tcr = regime_tcr(env, mmu_idx);
+
+ if (address & tcr->mask) {
+ if (tcr->raw_tcr & TTBCR_PD1) {
+ /* Translation table walk disabled for TTBR1 */
+ return false;
+ }
+ *table = regime_ttbr(env, mmu_idx, 1) & 0xffffc000;
+ } else {
+ if (tcr->raw_tcr & TTBCR_PD0) {
+ /* Translation table walk disabled for TTBR0 */
+ return false;
+ }
+ *table = regime_ttbr(env, mmu_idx, 0) & tcr->base_mask;
+ }
+ *table |= (address >> 18) & 0x3ffc;
+ return true;
+}
+
+/* Translate a S1 pagetable walk through S2 if needed. */
+static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
+ hwaddr addr, MemTxAttrs txattrs,
+ uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ if ((mmu_idx == ARMMMUIdx_S1NSE0 || mmu_idx == ARMMMUIdx_S1NSE1) &&
+ !regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
+ target_ulong s2size;
+ hwaddr s2pa;
+ int s2prot;
+ int ret;
+
+ ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
+ &txattrs, &s2prot, &s2size, fsr, fi);
+ if (ret) {
+ fi->s2addr = addr;
+ fi->stage2 = true;
+ fi->s1ptw = true;
+ return ~0;
+ }
+ addr = s2pa;
+ }
+ return addr;
+}
+
+/* All loads done in the course of a page table walk go through here.
+ * TODO: rather than ignoring errors from physical memory reads (which
+ * are external aborts in ARM terminology) we should propagate this
+ * error out so that we can turn it into a Data Abort if this walk
+ * was being done for a CPU load/store or an address translation instruction
+ * (but not if it was for a debug access).
+ */
+static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ AddressSpace *as;
+
+ attrs.secure = is_secure;
+ as = arm_addressspace(cs, attrs);
+ addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ return address_space_ldl_be(as, addr, attrs, NULL);
+ } else {
+ return address_space_ldl_le(as, addr, attrs, NULL);
+ }
+}
+
+static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure,
+ ARMMMUIdx mmu_idx, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ MemTxAttrs attrs = {};
+ AddressSpace *as;
+
+ attrs.secure = is_secure;
+ as = arm_addressspace(cs, attrs);
+ addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi);
+ if (fi->s1ptw) {
+ return 0;
+ }
+ if (regime_translation_big_endian(env, mmu_idx)) {
+ return address_space_ldq_be(as, addr, attrs, NULL);
+ } else {
+ return address_space_ldq_le(as, addr, attrs, NULL);
+ }
+}
+
+static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot,
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+ int code;
+ uint32_t table;
+ uint32_t desc;
+ int type;
+ int ap;
+ int domain = 0;
+ int domain_prot;
+ hwaddr phys_addr;
+ uint32_t dacr;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
+ /* Section translation fault if page walk is disabled by PD0 or PD1 */
+ code = 5;
+ goto do_fault;
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
+ type = (desc & 3);
+ domain = (desc >> 5) & 0x0f;
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
+ if (type == 0) {
+ /* Section translation fault. */
+ code = 5;
+ goto do_fault;
+ }
+ if (domain_prot == 0 || domain_prot == 2) {
+ if (type == 2)
+ code = 9; /* Section domain fault. */
+ else
+ code = 11; /* Page domain fault. */
+ goto do_fault;
+ }
+ if (type == 2) {
+ /* 1Mb section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ ap = (desc >> 10) & 3;
+ code = 13;
+ *page_size = 1024 * 1024;
+ } else {
+ /* Lookup l2 entry. */
+ if (type == 1) {
+ /* Coarse pagetable. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ } else {
+ /* Fine pagetable. */
+ table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ code = 7;
+ goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x10000;
+ break;
+ case 2: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
+ *page_size = 0x1000;
+ break;
+ case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
+ if (type == 1) {
+ /* ARMv6/XScale extended small page format */
+ if (arm_feature(env, ARM_FEATURE_XSCALE)
+ || arm_feature(env, ARM_FEATURE_V6)) {
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ *page_size = 0x1000;
+ } else {
+ /* UNPREDICTABLE in ARMv5; we choose to take a
+ * page translation fault.
+ */
+ code = 7;
+ goto do_fault;
+ }
+ } else {
+ phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
+ *page_size = 0x400;
+ }
+ ap = (desc >> 4) & 3;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
+ }
+ code = 15;
+ }
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ *prot |= *prot ? PAGE_EXEC : 0;
+ if (!(*prot & (1 << access_type))) {
+ /* Access permission fault. */
+ goto do_fault;
+ }
+ *phys_ptr = phys_addr;
+ return false;
+do_fault:
+ *fsr = code | (domain << 4);
+ return true;
+}
+
+static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+ int code;
+ uint32_t table;
+ uint32_t desc;
+ uint32_t xn;
+ uint32_t pxn = 0;
+ int type;
+ int ap;
+ int domain = 0;
+ int domain_prot;
+ hwaddr phys_addr;
+ uint32_t dacr;
+ bool ns;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ if (!get_level1_table_address(env, mmu_idx, &table, address)) {
+ /* Section translation fault if page walk is disabled by PD0 or PD1 */
+ code = 5;
+ goto do_fault;
+ }
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
+ type = (desc & 3);
+ if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
+ /* Section translation fault, or attempt to use the encoding
+ * which is Reserved on implementations without PXN.
+ */
+ code = 5;
+ goto do_fault;
+ }
+ if ((type == 1) || !(desc & (1 << 18))) {
+ /* Page or Section. */
+ domain = (desc >> 5) & 0x0f;
+ }
+ if (regime_el(env, mmu_idx) == 1) {
+ dacr = env->cp15.dacr_ns;
+ } else {
+ dacr = env->cp15.dacr_s;
+ }
+ domain_prot = (dacr >> (domain * 2)) & 3;
+ if (domain_prot == 0 || domain_prot == 2) {
+ if (type != 1) {
+ code = 9; /* Section domain fault. */
+ } else {
+ code = 11; /* Page domain fault. */
+ }
+ goto do_fault;
+ }
+ if (type != 1) {
+ if (desc & (1 << 18)) {
+ /* Supersection. */
+ phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
+ phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
+ phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
+ *page_size = 0x1000000;
+ } else {
+ /* Section. */
+ phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ *page_size = 0x100000;
+ }
+ ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
+ xn = desc & (1 << 4);
+ pxn = desc & 1;
+ code = 13;
+ ns = extract32(desc, 19, 1);
+ } else {
+ if (arm_feature(env, ARM_FEATURE_PXN)) {
+ pxn = (desc >> 2) & 1;
+ }
+ ns = extract32(desc, 3, 1);
+ /* Lookup l2 entry. */
+ table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
+ desc = arm_ldl_ptw(cs, table, regime_is_secure(env, mmu_idx),
+ mmu_idx, fsr, fi);
+ ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
+ switch (desc & 3) {
+ case 0: /* Page translation fault. */
+ code = 7;
+ goto do_fault;
+ case 1: /* 64k page. */
+ phys_addr = (desc & 0xffff0000) | (address & 0xffff);
+ xn = desc & (1 << 15);
+ *page_size = 0x10000;
+ break;
+ case 2: case 3: /* 4k page. */
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ xn = desc & 1;
+ *page_size = 0x1000;
+ break;
+ default:
+ /* Never happens, but compiler isn't smart enough to tell. */
+ abort();
+ }
+ code = 15;
+ }
+ if (domain_prot == 3) {
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ } else {
+ if (pxn && !regime_is_user(env, mmu_idx)) {
+ xn = 1;
+ }
+ if (xn && access_type == 2)
+ goto do_fault;
+
+ if (arm_feature(env, ARM_FEATURE_V6K) &&
+ (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) {
+ /* The simplified model uses AP[0] as an access control bit. */
+ if ((ap & 1) == 0) {
+ /* Access flag fault. */
+ code = (code == 15) ? 6 : 3;
+ goto do_fault;
+ }
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
+ } else {
+ *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
+ }
+ if (*prot && !xn) {
+ *prot |= PAGE_EXEC;
+ }
+ if (!(*prot & (1 << access_type))) {
+ /* Access permission fault. */
+ goto do_fault;
+ }
+ }
+ if (ns) {
+ /* The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the attribute will already be non-secure.
+ */
+ attrs->secure = false;
+ }
+ *phys_ptr = phys_addr;
+ return false;
+do_fault:
+ *fsr = code | (domain << 4);
+ return true;
+}
+
+/* Fault type for long-descriptor MMU fault reporting; this corresponds
+ * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
+ */
+typedef enum {
+ translation_fault = 1,
+ access_fault = 2,
+ permission_fault = 3,
+} MMUFaultType;
+
+/*
+ * check_s2_mmu_setup
+ * @cpu: ARMCPU
+ * @is_aa64: True if the translation regime is in AArch64 state
+ * @startlevel: Suggested starting level
+ * @inputsize: Bitsize of IPAs
+ * @stride: Page-table stride (See the ARM ARM)
+ *
+ * Returns true if the suggested S2 translation parameters are OK and
+ * false otherwise.
+ */
+static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
+ int inputsize, int stride)
+{
+ const int grainsize = stride + 3;
+ int startsizecheck;
+
+ /* Negative levels are never allowed. */
+ if (level < 0) {
+ return false;
+ }
+
+ startsizecheck = inputsize - ((3 - level) * stride + grainsize);
+ if (startsizecheck < 1 || startsizecheck > stride + 4) {
+ return false;
+ }
+
+ if (is_aa64) {
+ CPUARMState *env = &cpu->env;
+ unsigned int pamax = arm_pamax(cpu);
+
+ switch (stride) {
+ case 13: /* 64KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 42)) {
+ return false;
+ }
+ break;
+ case 11: /* 16KB Pages. */
+ if (level == 0 || (level == 1 && pamax <= 40)) {
+ return false;
+ }
+ break;
+ case 9: /* 4KB Pages. */
+ if (level == 0 && pamax <= 42) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* Inputsize checks. */
+ if (inputsize > pamax &&
+ (arm_el_is_aa64(env, 1) || inputsize > 40)) {
+ /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */
+ return false;
+ }
+ } else {
+ /* AArch32 only supports 4KB pages. Assert on that. */
+ assert(stride == 9);
+
+ if (level == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
+ target_ulong *page_size_ptr, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ /* Read an LPAE long-descriptor translation table. */
+ MMUFaultType fault_type = translation_fault;
+ uint32_t level;
+ uint32_t epd = 0;
+ int32_t t0sz, t1sz;
+ uint32_t tg;
+ uint64_t ttbr;
+ int ttbr_select;
+ hwaddr descaddr, indexmask, indexmask_grainsize;
+ uint32_t tableattrs;
+ target_ulong page_size;
+ uint32_t attrs;
+ int32_t stride = 9;
+ int32_t addrsize;
+ int inputsize;
+ int32_t tbi = 0;
+ TCR *tcr = regime_tcr(env, mmu_idx);
+ int ap, ns, xn, pxn;
+ uint32_t el = regime_el(env, mmu_idx);
+ bool ttbr1_valid = true;
+ uint64_t descaddrmask;
+ bool aarch64 = arm_el_is_aa64(env, el);
+
+ /* TODO:
+ * This code does not handle the different format TCR for VTCR_EL2.
+ * This code also does not support shareability levels.
+ * Attribute and permission bit handling should also be checked when adding
+ * support for those page table walks.
+ */
+ if (aarch64) {
+ level = 0;
+ addrsize = 64;
+ if (el > 1) {
+ if (mmu_idx != ARMMMUIdx_S2NS) {
+ tbi = extract64(tcr->raw_tcr, 20, 1);
+ }
+ } else {
+ if (extract64(address, 55, 1)) {
+ tbi = extract64(tcr->raw_tcr, 38, 1);
+ } else {
+ tbi = extract64(tcr->raw_tcr, 37, 1);
+ }
+ }
+ tbi *= 8;
+
+ /* If we are in 64-bit EL2 or EL3 then there is no TTBR1, so mark it
+ * invalid.
+ */
+ if (el > 1) {
+ ttbr1_valid = false;
+ }
+ } else {
+ level = 1;
+ addrsize = 32;
+ /* There is no TTBR1 for EL2 */
+ if (el == 2) {
+ ttbr1_valid = false;
+ }
+ }
+
+ /* Determine whether this address is in the region controlled by
+ * TTBR0 or TTBR1 (or if it is in neither region and should fault).
+ * This is a Non-secure PL0/1 stage 1 translation, so controlled by
+ * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
+ */
+ if (aarch64) {
+ /* AArch64 translation. */
+ t0sz = extract32(tcr->raw_tcr, 0, 6);
+ t0sz = MIN(t0sz, 39);
+ t0sz = MAX(t0sz, 16);
+ } else if (mmu_idx != ARMMMUIdx_S2NS) {
+ /* AArch32 stage 1 translation. */
+ t0sz = extract32(tcr->raw_tcr, 0, 3);
+ } else {
+ /* AArch32 stage 2 translation. */
+ bool sext = extract32(tcr->raw_tcr, 4, 1);
+ bool sign = extract32(tcr->raw_tcr, 3, 1);
+ /* Address size is 40-bit for a stage 2 translation,
+ * and t0sz can be negative (from -8 to 7),
+ * so we need to adjust it to use the TTBR selecting logic below.
+ */
+ addrsize = 40;
+ t0sz = sextract32(tcr->raw_tcr, 0, 4) + 8;
+
+ /* If the sign-extend bit is not the same as t0sz[3], the result
+ * is unpredictable. Flag this as a guest error. */
+ if (sign != sext) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "AArch32: VTCR.S / VTCR.T0SZ[3] mismatch\n");
+ }
+ }
+ t1sz = extract32(tcr->raw_tcr, 16, 6);
+ if (aarch64) {
+ t1sz = MIN(t1sz, 39);
+ t1sz = MAX(t1sz, 16);
+ }
+ if (t0sz && !extract64(address, addrsize - t0sz, t0sz - tbi)) {
+ /* there is a ttbr0 region and we are in it (high bits all zero) */
+ ttbr_select = 0;
+ } else if (ttbr1_valid && t1sz &&
+ !extract64(~address, addrsize - t1sz, t1sz - tbi)) {
+ /* there is a ttbr1 region and we are in it (high bits all one) */
+ ttbr_select = 1;
+ } else if (!t0sz) {
+ /* ttbr0 region is "everything not in the ttbr1 region" */
+ ttbr_select = 0;
+ } else if (!t1sz && ttbr1_valid) {
+ /* ttbr1 region is "everything not in the ttbr0 region" */
+ ttbr_select = 1;
+ } else {
+ /* in the gap between the two regions, this is a Translation fault */
+ fault_type = translation_fault;
+ goto do_fault;
+ }
+
+ /* Note that QEMU ignores shareability and cacheability attributes,
+ * so we don't need to do anything with the SH, ORGN, IRGN fields
+ * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
+ * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
+ * implement any ASID-like capability so we can ignore it (instead
+ * we will always flush the TLB any time the ASID is changed).
+ */
+ if (ttbr_select == 0) {
+ ttbr = regime_ttbr(env, mmu_idx, 0);
+ if (el < 2) {
+ epd = extract32(tcr->raw_tcr, 7, 1);
+ }
+ inputsize = addrsize - t0sz;
+
+ tg = extract32(tcr->raw_tcr, 14, 2);
+ if (tg == 1) { /* 64KB pages */
+ stride = 13;
+ }
+ if (tg == 2) { /* 16KB pages */
+ stride = 11;
+ }
+ } else {
+ /* We should only be here if TTBR1 is valid */
+ assert(ttbr1_valid);
+
+ ttbr = regime_ttbr(env, mmu_idx, 1);
+ epd = extract32(tcr->raw_tcr, 23, 1);
+ inputsize = addrsize - t1sz;
+
+ tg = extract32(tcr->raw_tcr, 30, 2);
+ if (tg == 3) { /* 64KB pages */
+ stride = 13;
+ }
+ if (tg == 1) { /* 16KB pages */
+ stride = 11;
+ }
+ }
+
+ /* Here we should have set up all the parameters for the translation:
+ * inputsize, ttbr, epd, stride, tbi
+ */
+
+ if (epd) {
+ /* Translation table walk disabled => Translation fault on TLB miss
+ * Note: This is always 0 on 64-bit EL2 and EL3.
+ */
+ goto do_fault;
+ }
+
+ if (mmu_idx != ARMMMUIdx_S2NS) {
+ /* The starting level depends on the virtual address size (which can
+ * be up to 48 bits) and the translation granule size. It indicates
+ * the number of strides (stride bits at a time) needed to
+ * consume the bits of the input address. In the pseudocode this is:
+ * level = 4 - RoundUp((inputsize - grainsize) / stride)
+ * where their 'inputsize' is our 'inputsize', 'grainsize' is
+ * our 'stride + 3' and 'stride' is our 'stride'.
+ * Applying the usual "rounded up m/n is (m+n-1)/n" and simplifying:
+ * = 4 - (inputsize - stride - 3 + stride - 1) / stride
+ * = 4 - (inputsize - 4) / stride;
+ */
+ level = 4 - (inputsize - 4) / stride;
+ } else {
+ /* For stage 2 translations the starting level is specified by the
+ * VTCR_EL2.SL0 field (whose interpretation depends on the page size)
+ */
+ uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
+ uint32_t startlevel;
+ bool ok;
+
+ if (!aarch64 || stride == 9) {
+ /* AArch32 or 4KB pages */
+ startlevel = 2 - sl0;
+ } else {
+ /* 16KB or 64KB pages */
+ startlevel = 3 - sl0;
+ }
+
+ /* Check that the starting level is valid. */
+ ok = check_s2_mmu_setup(cpu, aarch64, startlevel,
+ inputsize, stride);
+ if (!ok) {
+ fault_type = translation_fault;
+ goto do_fault;
+ }
+ level = startlevel;
+ }
+
+ indexmask_grainsize = (1ULL << (stride + 3)) - 1;
+ indexmask = (1ULL << (inputsize - (stride * (4 - level)))) - 1;
+
+ /* Now we can extract the actual base address from the TTBR */
+ descaddr = extract64(ttbr, 0, 48);
+ descaddr &= ~indexmask;
+
+ /* The address field in the descriptor goes up to bit 39 for ARMv7
+ * but up to bit 47 for ARMv8, but we use the descaddrmask
+ * up to bit 39 for AArch32, because we don't need other bits in that case
+ * to construct next descriptor address (anyway they should be all zeroes).
+ */
+ descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
+ ~indexmask_grainsize;
+
+ /* Secure accesses start with the page table in secure memory and
+ * can be downgraded to non-secure at any step. Non-secure accesses
+ * remain non-secure. We implement this by just ORing in the NSTable/NS
+ * bits at each step.
+ */
+ tableattrs = regime_is_secure(env, mmu_idx) ? 0 : (1 << 4);
+ for (;;) {
+ uint64_t descriptor;
+ bool nstable;
+
+ descaddr |= (address >> (stride * (4 - level))) & indexmask;
+ descaddr &= ~7ULL;
+ nstable = extract32(tableattrs, 4, 1);
+ descriptor = arm_ldq_ptw(cs, descaddr, !nstable, mmu_idx, fsr, fi);
+ if (fi->s1ptw) {
+ goto do_fault;
+ }
+
+ if (!(descriptor & 1) ||
+ (!(descriptor & 2) && (level == 3))) {
+ /* Invalid, or the Reserved level 3 encoding */
+ goto do_fault;
+ }
+ descaddr = descriptor & descaddrmask;
+
+ if ((descriptor & 2) && (level < 3)) {
+ /* Table entry. The top five bits are attributes which may
+ * propagate down through lower levels of the table (and
+ * which are all arranged so that 0 means "no effect", so
+ * we can gather them up by ORing in the bits at each level).
+ */
+ tableattrs |= extract64(descriptor, 59, 5);
+ level++;
+ indexmask = indexmask_grainsize;
+ continue;
+ }
+ /* Block entry at level 1 or 2, or page entry at level 3.
+ * These are basically the same thing, although the number
+ * of bits we pull in from the vaddr varies.
+ */
+ page_size = (1ULL << ((stride * (4 - level)) + 3));
+ descaddr |= (address & (page_size - 1));
+ /* Extract attributes from the descriptor */
+ attrs = extract64(descriptor, 2, 10)
+ | (extract64(descriptor, 52, 12) << 10);
+
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ /* Stage 2 table descriptors do not include any attribute fields */
+ break;
+ }
+ /* Merge in attributes from table descriptors */
+ attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
+ attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
+ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ * means "force PL1 access only", which means forcing AP[1] to 0.
+ */
+ if (extract32(tableattrs, 2, 1)) {
+ attrs &= ~(1 << 4);
+ }
+ attrs |= nstable << 3; /* NS */
+ break;
+ }
+ /* Here descaddr is the final physical address, and attributes
+ * are all in attrs.
+ */
+ fault_type = access_fault;
+ if ((attrs & (1 << 8)) == 0) {
+ /* Access flag */
+ goto do_fault;
+ }
+
+ ap = extract32(attrs, 4, 2);
+ xn = extract32(attrs, 12, 1);
+
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ ns = true;
+ *prot = get_S2prot(env, ap, xn);
+ } else {
+ ns = extract32(attrs, 3, 1);
+ pxn = extract32(attrs, 11, 1);
+ *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
+ }
+
+ fault_type = permission_fault;
+ if (!(*prot & (1 << access_type))) {
+ goto do_fault;
+ }
+
+ if (ns) {
+ /* The NS bit will (as required by the architecture) have no effect if
+ * the CPU doesn't support TZ or this is a non-secure translation
+ * regime, because the attribute will already be non-secure.
+ */
+ txattrs->secure = false;
+ }
+ *phys_ptr = descaddr;
+ *page_size_ptr = page_size;
+ return false;
+
+do_fault:
+ /* Long-descriptor format IFSR/DFSR value */
+ *fsr = (1 << 9) | (fault_type << 2) | level;
+ /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
+ fi->stage2 = fi->s1ptw || (mmu_idx == ARMMMUIdx_S2NS);
+ return true;
+}
+
+static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
+ ARMMMUIdx mmu_idx,
+ int32_t address, int *prot)
+{
+ *prot = PAGE_READ | PAGE_WRITE;
+ switch (address) {
+ case 0xF0000000 ... 0xFFFFFFFF:
+ if (regime_sctlr(env, mmu_idx) & SCTLR_V) { /* hivecs execing is ok */
+ *prot |= PAGE_EXEC;
+ }
+ break;
+ case 0x00000000 ... 0x7FFFFFFF:
+ *prot |= PAGE_EXEC;
+ break;
+ }
+
+}
+
+static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot, uint32_t *fsr)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int n;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ *phys_ptr = address;
+ *prot = 0;
+
+ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else { /* MPU enabled */
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ uint32_t base = env->pmsav7.drbar[n];
+ uint32_t rsize = extract32(env->pmsav7.drsr[n], 1, 5);
+ uint32_t rmask;
+ bool srdis = false;
+
+ if (!(env->pmsav7.drsr[n] & 0x1)) {
+ continue;
+ }
+
+ if (!rsize) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DRSR.Rsize field can not be 0");
+ continue;
+ }
+ rsize++;
+ rmask = (1ull << rsize) - 1;
+
+ if (base & rmask) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DRBAR %" PRIx32 " misaligned "
+ "to DRSR region size, mask = %" PRIx32,
+ base, rmask);
+ continue;
+ }
+
+ if (address < base || address > base + rmask) {
+ continue;
+ }
+
+ /* Region matched */
+
+ if (rsize >= 8) { /* no subregions for regions < 256 bytes */
+ int i, snd;
+ uint32_t srdis_mask;
+
+ rsize -= 3; /* sub region size (power of 2) */
+ snd = ((address - base) >> rsize) & 0x7;
+ srdis = extract32(env->pmsav7.drsr[n], snd + 8, 1);
+
+ srdis_mask = srdis ? 0x3 : 0x0;
+ for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
+ /* This will check in groups of 2, 4 and then 8, whether
+ * the subregion bits are consistent. rsize is incremented
+ * back up to give the region size, considering consistent
+ * adjacent subregions as one region. Stop testing if rsize
+ * is already big enough for an entire QEMU page.
+ */
+ int snd_rounded = snd & ~(i - 1);
+ uint32_t srdis_multi = extract32(env->pmsav7.drsr[n],
+ snd_rounded + 8, i);
+ if (srdis_mask ^ srdis_multi) {
+ break;
+ }
+ srdis_mask = (srdis_mask << i) | srdis_mask;
+ rsize++;
+ }
+ }
+ if (rsize < TARGET_PAGE_BITS) {
+ qemu_log_mask(LOG_UNIMP, "No support for MPU (sub)region"
+ "alignment of %" PRIu32 " bits. Minimum is %d\n",
+ rsize, TARGET_PAGE_BITS);
+ continue;
+ }
+ if (srdis) {
+ continue;
+ }
+ break;
+ }
+
+ if (n == -1) { /* no hits */
+ if (cpu->pmsav7_dregion &&
+ (is_user || !(regime_sctlr(env, mmu_idx) & SCTLR_BR))) {
+ /* background fault */
+ *fsr = 0;
+ return true;
+ }
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else { /* a MPU hit! */
+ uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
+
+ if (is_user) { /* User mode AP bit decoding */
+ switch (ap) {
+ case 0:
+ case 1:
+ case 5:
+ break; /* no access */
+ case 3:
+ *prot |= PAGE_WRITE;
+ /* fall through */
+ case 2:
+ case 6:
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Bad value for AP bits in DRACR %"
+ PRIx32 "\n", ap);
+ }
+ } else { /* Priv. mode AP bits decoding */
+ switch (ap) {
+ case 0:
+ break; /* no access */
+ case 1:
+ case 2:
+ case 3:
+ *prot |= PAGE_WRITE;
+ /* fall through */
+ case 5:
+ case 6:
+ *prot |= PAGE_READ | PAGE_EXEC;
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Bad value for AP bits in DRACR %"
+ PRIx32 "\n", ap);
+ }
+ }
+
+ /* execute never */
+ if (env->pmsav7.dracr[n] & (1 << 12)) {
+ *prot &= ~PAGE_EXEC;
+ }
+ }
+ }
+
+ *fsr = 0x00d; /* Permission fault */
+ return !(*prot & (1 << access_type));
+}
+
+static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, int *prot, uint32_t *fsr)
+{
+ int n;
+ uint32_t mask;
+ uint32_t base;
+ bool is_user = regime_is_user(env, mmu_idx);
+
+ *phys_ptr = address;
+ for (n = 7; n >= 0; n--) {
+ base = env->cp15.c6_region[n];
+ if ((base & 1) == 0) {
+ continue;
+ }
+ mask = 1 << ((base >> 1) & 0x1f);
+ /* Keep this shift separate from the above to avoid an
+ (undefined) << 32. */
+ mask = (mask << 1) - 1;
+ if (((base ^ address) & ~mask) == 0) {
+ break;
+ }
+ }
+ if (n < 0) {
+ *fsr = 2;
+ return true;
+ }
+
+ if (access_type == 2) {
+ mask = env->cp15.pmsav5_insn_ap;
+ } else {
+ mask = env->cp15.pmsav5_data_ap;
+ }
+ mask = (mask >> (n * 4)) & 0xf;
+ switch (mask) {
+ case 0:
+ *fsr = 1;
+ return true;
+ case 1:
+ if (is_user) {
+ *fsr = 1;
+ return true;
+ }
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 2:
+ *prot = PAGE_READ;
+ if (!is_user) {
+ *prot |= PAGE_WRITE;
+ }
+ break;
+ case 3:
+ *prot = PAGE_READ | PAGE_WRITE;
+ break;
+ case 5:
+ if (is_user) {
+ *fsr = 1;
+ return true;
+ }
+ *prot = PAGE_READ;
+ break;
+ case 6:
+ *prot = PAGE_READ;
+ break;
+ default:
+ /* Bad permission. */
+ *fsr = 1;
+ return true;
+ }
+ *prot |= PAGE_EXEC;
+ return false;
+}
+
+/* get_phys_addr - get the physical address for this virtual address
+ *
+ * Find the physical address corresponding to the given virtual address,
+ * by doing a translation table walk on MMU based systems or using the
+ * MPU state on MPU based systems.
+ *
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
+ * prot and page_size may not be filled in, and the populated fsr value provides
+ * information on why the translation aborted, in the format of a
+ * DFSR/IFSR fault register, with the following caveats:
+ * * we honour the short vs long DFSR format differences.
+ * * the WnR bit is never set (the caller must do this).
+ * * for PSMAv5 based systems we don't bother to return a full FSR format
+ * value.
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @mmu_idx: MMU index indicating required translation regime
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @attrs: set to the memory transaction attributes to use
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size: set to the size of the page containing phys_ptr
+ * @fsr: set to the DFSR/IFSR value on failure
+ */
+static bool get_phys_addr(CPUARMState *env, target_ulong address,
+ int access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
+ target_ulong *page_size, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
+ /* Call ourselves recursively to do the stage 1 and then stage 2
+ * translations.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL2)) {
+ hwaddr ipa;
+ int s2_prot;
+ int ret;
+
+ ret = get_phys_addr(env, address, access_type,
+ mmu_idx + ARMMMUIdx_S1NSE0, &ipa, attrs,
+ prot, page_size, fsr, fi);
+
+ /* If S1 fails or S2 is disabled, return early. */
+ if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
+ *phys_ptr = ipa;
+ return ret;
+ }
+
+ /* S1 is done. Now do S2 translation. */
+ ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
+ phys_ptr, attrs, &s2_prot,
+ page_size, fsr, fi);
+ fi->s2addr = ipa;
+ /* Combine the S1 and S2 perms. */
+ *prot &= s2_prot;
+ return ret;
+ } else {
+ /*
+ * For non-EL2 CPUs a stage1+stage2 translation is just stage 1.
+ */
+ mmu_idx += ARMMMUIdx_S1NSE0;
+ }
+ }
+
+ /* The page table entries may downgrade secure to non-secure, but
+ * cannot upgrade an non-secure translation regime's attributes
+ * to secure.
+ */
+ attrs->secure = regime_is_secure(env, mmu_idx);
+ attrs->user = regime_is_user(env, mmu_idx);
+
+ /* Fast Context Switch Extension. This doesn't exist at all in v8.
+ * In v7 and earlier it affects all stage 1 translations.
+ */
+ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_S2NS
+ && !arm_feature(env, ARM_FEATURE_V8)) {
+ if (regime_el(env, mmu_idx) == 3) {
+ address += env->cp15.fcseidr_s;
+ } else {
+ address += env->cp15.fcseidr_ns;
+ }
+ }
+
+ /* pmsav7 has special handling for when MPU is disabled so call it before
+ * the common MMU/MPU disabled check below.
+ */
+ if (arm_feature(env, ARM_FEATURE_MPU) &&
+ arm_feature(env, ARM_FEATURE_V7)) {
+ *page_size = TARGET_PAGE_SIZE;
+ return get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fsr);
+ }
+
+ if (regime_translation_disabled(env, mmu_idx)) {
+ /* MMU/MPU disabled. */
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ *page_size = TARGET_PAGE_SIZE;
+ return 0;
+ }
+
+ if (arm_feature(env, ARM_FEATURE_MPU)) {
+ /* Pre-v7 MPU */
+ *page_size = TARGET_PAGE_SIZE;
+ return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fsr);
+ }
+
+ if (regime_using_lpae_format(env, mmu_idx)) {
+ return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
+ attrs, prot, page_size, fsr, fi);
+ } else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
+ return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
+ attrs, prot, page_size, fsr, fi);
+ } else {
+ return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
+ prot, page_size, fsr, fi);
+ }
+}
+
+/* Walk the page table and (if the mapping exists) add the page
+ * to the TLB. Return false on success, or true on failure. Populate
+ * fsr with ARM DFSR/IFSR fault register format value on failure.
+ */
+bool arm_tlb_fill(CPUState *cs, vaddr address,
+ int access_type, int mmu_idx, uint32_t *fsr,
+ ARMMMUFaultInfo *fi)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot;
+ int ret;
+ MemTxAttrs attrs = {};
+
+ ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
+ &attrs, &prot, &page_size, fsr, fi);
+ if (!ret) {
+ /* Map a single [sub]page. */
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return 0;
+ }
+
+ return ret;
+}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot;
+ bool ret;
+ uint32_t fsr;
+ ARMMMUFaultInfo fi = {};
+
+ *attrs = (MemTxAttrs) {};
+
+ ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
+ attrs, &prot, &page_size, &fsr, &fi);
+
+ if (ret) {
+ return -1;
+ }
+ return phys_addr;
+}
+
+uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ switch (reg) {
+ case 0: /* APSR */
+ return xpsr_read(env) & 0xf8000000;
+ case 1: /* IAPSR */
+ return xpsr_read(env) & 0xf80001ff;
+ case 2: /* EAPSR */
+ return xpsr_read(env) & 0xff00fc00;
+ case 3: /* xPSR */
+ return xpsr_read(env) & 0xff00fdff;
+ case 5: /* IPSR */
+ return xpsr_read(env) & 0x000001ff;
+ case 6: /* EPSR */
+ return xpsr_read(env) & 0x0700fc00;
+ case 7: /* IEPSR */
+ return xpsr_read(env) & 0x0700edff;
+ case 8: /* MSP */
+ return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
+ case 9: /* PSP */
+ return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
+ case 16: /* PRIMASK */
+ return (env->daif & PSTATE_I) != 0;
+ case 17: /* BASEPRI */
+ case 18: /* BASEPRI_MAX */
+ return env->v7m.basepri;
+ case 19: /* FAULTMASK */
+ return (env->daif & PSTATE_F) != 0;
+ case 20: /* CONTROL */
+ return env->v7m.control;
+ default:
+ /* ??? For debugging only. */
+ cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
+ return 0;
+ }
+}
+
+void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ switch (reg) {
+ case 0: /* APSR */
+ xpsr_write(env, val, 0xf8000000);
+ break;
+ case 1: /* IAPSR */
+ xpsr_write(env, val, 0xf8000000);
+ break;
+ case 2: /* EAPSR */
+ xpsr_write(env, val, 0xfe00fc00);
+ break;
+ case 3: /* xPSR */
+ xpsr_write(env, val, 0xfe00fc00);
+ break;
+ case 5: /* IPSR */
+ /* IPSR bits are readonly. */
+ break;
+ case 6: /* EPSR */
+ xpsr_write(env, val, 0x0600fc00);
+ break;
+ case 7: /* IEPSR */
+ xpsr_write(env, val, 0x0600fc00);
+ break;
+ case 8: /* MSP */
+ if (env->v7m.current_sp)
+ env->v7m.other_sp = val;
+ else
+ env->regs[13] = val;
+ break;
+ case 9: /* PSP */
+ if (env->v7m.current_sp)
+ env->regs[13] = val;
+ else
+ env->v7m.other_sp = val;
+ break;
+ case 16: /* PRIMASK */
+ if (val & 1) {
+ env->daif |= PSTATE_I;
+ } else {
+ env->daif &= ~PSTATE_I;
+ }
+ break;
+ case 17: /* BASEPRI */
+ env->v7m.basepri = val & 0xff;
+ break;
+ case 18: /* BASEPRI_MAX */
+ val &= 0xff;
+ if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
+ env->v7m.basepri = val;
+ break;
+ case 19: /* FAULTMASK */
+ if (val & 1) {
+ env->daif |= PSTATE_F;
+ } else {
+ env->daif &= ~PSTATE_F;
+ }
+ break;
+ case 20: /* CONTROL */
+ env->v7m.control = val & 3;
+ switch_v7m_sp(env, (val & 2) != 0);
+ break;
+ default:
+ /* ??? For debugging only. */
+ cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
+ return;
+ }
+}
+
+#endif
+
+void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
+{
+ /* Implement DC ZVA, which zeroes a fixed-length block of memory.
+ * Note that we do not implement the (architecturally mandated)
+ * alignment fault for attempts to use this on Device memory
+ * (which matches the usual QEMU behaviour of not implementing either
+ * alignment faults or any memory attribute handling).
+ */
+
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t blocklen = 4 << cpu->dcz_blocksize;
+ uint64_t vaddr = vaddr_in & ~(blocklen - 1);
+
+#ifndef CONFIG_USER_ONLY
+ {
+ /* Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
+ * the block size so we might have to do more than one TLB lookup.
+ * We know that in fact for any v8 CPU the page size is at least 4K
+ * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
+ * 1K as an artefact of legacy v5 subpage support being present in the
+ * same QEMU executable.
+ */
+ int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
+ void *hostaddr[maxidx];
+ int try, i;
+ unsigned mmu_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
+
+ for (try = 0; try < 2; try++) {
+
+ for (i = 0; i < maxidx; i++) {
+ hostaddr[i] = tlb_vaddr_to_host(env,
+ vaddr + TARGET_PAGE_SIZE * i,
+ 1, mmu_idx);
+ if (!hostaddr[i]) {
+ break;
+ }
+ }
+ if (i == maxidx) {
+ /* If it's all in the TLB it's fair game for just writing to;
+ * we know we don't need to update dirty status, etc.
+ */
+ for (i = 0; i < maxidx - 1; i++) {
+ memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
+ }
+ memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
+ return;
+ }
+ /* OK, try a store and see if we can populate the tlb. This
+ * might cause an exception if the memory isn't writable,
+ * in which case we will longjmp out of here. We must for
+ * this purpose use the actual register value passed to us
+ * so that we get the fault address right.
+ */
+ helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
+ /* Now we can populate the other TLB entries, if any */
+ for (i = 0; i < maxidx; i++) {
+ uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
+ if (va != (vaddr_in & TARGET_PAGE_MASK)) {
+ helper_ret_stb_mmu(env, va, 0, oi, GETPC());
+ }
+ }
+ }
+
+ /* Slow path (probably attempt to do this to an I/O device or
+ * similar, or clearing of a block of code we have translations
+ * cached for). Just do a series of byte writes as the architecture
+ * demands. It's not worth trying to use a cpu_physical_memory_map(),
+ * memset(), unmap() sequence here because:
+ * + we'd need to account for the blocksize being larger than a page
+ * + the direct-RAM access case is almost always going to be dealt
+ * with in the fastpath code above, so there's no speed benefit
+ * + we would have to deal with the map returning NULL because the
+ * bounce buffer was in use
+ */
+ for (i = 0; i < blocklen; i++) {
+ helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
+ }
+ }
+#else
+ memset(g2h(vaddr), 0, blocklen);
+#endif
+}
+
+/* Note that signed overflow is undefined in C. The following routines are
+ careful to use unsigned types where modulo arithmetic is required.
+ Failure to do so _will_ break on newer gcc. */
+
+/* Signed saturating arithmetic. */
+
+/* Perform 16-bit signed saturating addition. */
+static inline uint16_t add16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
+ if (a & 0x8000)
+ res = 0x8000;
+ else
+ res = 0x7fff;
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating addition. */
+static inline uint8_t add8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a + b;
+ if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
+ if (a & 0x80)
+ res = 0x80;
+ else
+ res = 0x7f;
+ }
+ return res;
+}
+
+/* Perform 16-bit signed saturating subtraction. */
+static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
+ if (a & 0x8000)
+ res = 0x8000;
+ else
+ res = 0x7fff;
+ }
+ return res;
+}
+
+/* Perform 8-bit signed saturating subtraction. */
+static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+
+ res = a - b;
+ if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
+ if (a & 0x80)
+ res = 0x80;
+ else
+ res = 0x7f;
+ }
+ return res;
+}
+
+#define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_sat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_sat(a, b), n, 8);
+#define PFX q
+
+#include "op_addsub.h"
+
+/* Unsigned saturating arithmetic. */
+static inline uint16_t add16_usat(uint16_t a, uint16_t b)
+{
+ uint16_t res;
+ res = a + b;
+ if (res < a)
+ res = 0xffff;
+ return res;
+}
+
+static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return 0;
+}
+
+static inline uint8_t add8_usat(uint8_t a, uint8_t b)
+{
+ uint8_t res;
+ res = a + b;
+ if (res < a)
+ res = 0xff;
+ return res;
+}
+
+static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return 0;
+}
+
+#define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
+#define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
+#define ADD8(a, b, n) RESULT(add8_usat(a, b), n, 8);
+#define SUB8(a, b, n) RESULT(sub8_usat(a, b), n, 8);
+#define PFX uq
+
+#include "op_addsub.h"
+
+/* Signed modulo arithmetic. */
+#define SARITH16(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int32_t)(int16_t)(a) op (int32_t)(int16_t)(b); \
+ RESULT(sum, n, 16); \
+ if (sum >= 0) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define SARITH8(a, b, n, op) do { \
+ int32_t sum; \
+ sum = (int32_t)(int8_t)(a) op (int32_t)(int8_t)(b); \
+ RESULT(sum, n, 8); \
+ if (sum >= 0) \
+ ge |= 1 << n; \
+ } while(0)
+
+
+#define ADD16(a, b, n) SARITH16(a, b, n, +)
+#define SUB16(a, b, n) SARITH16(a, b, n, -)
+#define ADD8(a, b, n) SARITH8(a, b, n, +)
+#define SUB8(a, b, n) SARITH8(a, b, n, -)
+#define PFX s
+#define ARITH_GE
+
+#include "op_addsub.h"
+
+/* Unsigned modulo arithmetic. */
+#define ADD16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 1) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define ADD8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 1) \
+ ge |= 1 << n; \
+ } while(0)
+
+#define SUB16(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
+ RESULT(sum, n, 16); \
+ if ((sum >> 16) == 0) \
+ ge |= 3 << (n * 2); \
+ } while(0)
+
+#define SUB8(a, b, n) do { \
+ uint32_t sum; \
+ sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
+ RESULT(sum, n, 8); \
+ if ((sum >> 8) == 0) \
+ ge |= 1 << n; \
+ } while(0)
+
+#define PFX u
+#define ARITH_GE
+
+#include "op_addsub.h"
+
+/* Halved signed arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
+#define PFX sh
+
+#include "op_addsub.h"
+
+/* Halved unsigned arithmetic. */
+#define ADD16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define SUB16(a, b, n) \
+ RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
+#define ADD8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define SUB8(a, b, n) \
+ RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
+#define PFX uh
+
+#include "op_addsub.h"
+
+static inline uint8_t do_usad(uint8_t a, uint8_t b)
+{
+ if (a > b)
+ return a - b;
+ else
+ return b - a;
+}
+
+/* Unsigned sum of absolute byte differences. */
+uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
+{
+ uint32_t sum;
+ sum = do_usad(a, b);
+ sum += do_usad(a >> 8, b >> 8);
+ sum += do_usad(a >> 16, b >>16);
+ sum += do_usad(a >> 24, b >> 24);
+ return sum;
+}
+
+/* For ARMv6 SEL instruction. */
+uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (flags & 1)
+ mask |= 0xff;
+ if (flags & 2)
+ mask |= 0xff00;
+ if (flags & 4)
+ mask |= 0xff0000;
+ if (flags & 8)
+ mask |= 0xff000000;
+ return (a & mask) | (b & ~mask);
+}
+
+/* VFP support. We follow the convention used for VFP instructions:
+ Single precision routines have a "s" suffix, double precision a
+ "d" suffix. */
+
+/* Convert host exception flags to vfp form. */
+static inline int vfp_exceptbits_from_host(int host_bits)
+{
+ int target_bits = 0;
+
+ if (host_bits & float_flag_invalid)
+ target_bits |= 1;
+ if (host_bits & float_flag_divbyzero)
+ target_bits |= 2;
+ if (host_bits & float_flag_overflow)
+ target_bits |= 4;
+ if (host_bits & (float_flag_underflow | float_flag_output_denormal))
+ target_bits |= 8;
+ if (host_bits & float_flag_inexact)
+ target_bits |= 0x10;
+ if (host_bits & float_flag_input_denormal)
+ target_bits |= 0x80;
+ return target_bits;
+}
+
+uint32_t HELPER(vfp_get_fpscr)(CPUARMState *env)
+{
+ int i;
+ uint32_t fpscr;
+
+ fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
+ | (env->vfp.vec_len << 16)
+ | (env->vfp.vec_stride << 20);
+ i = get_float_exception_flags(&env->vfp.fp_status);
+ i |= get_float_exception_flags(&env->vfp.standard_fp_status);
+ fpscr |= vfp_exceptbits_from_host(i);
+ return fpscr;
+}
+
+uint32_t vfp_get_fpscr(CPUARMState *env)
+{
+ return HELPER(vfp_get_fpscr)(env);
+}
+
+/* Convert vfp exception flags to target form. */
+static inline int vfp_exceptbits_to_host(int target_bits)
+{
+ int host_bits = 0;
+
+ if (target_bits & 1)
+ host_bits |= float_flag_invalid;
+ if (target_bits & 2)
+ host_bits |= float_flag_divbyzero;
+ if (target_bits & 4)
+ host_bits |= float_flag_overflow;
+ if (target_bits & 8)
+ host_bits |= float_flag_underflow;
+ if (target_bits & 0x10)
+ host_bits |= float_flag_inexact;
+ if (target_bits & 0x80)
+ host_bits |= float_flag_input_denormal;
+ return host_bits;
+}
+
+void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val)
+{
+ int i;
+ uint32_t changed;
+
+ changed = env->vfp.xregs[ARM_VFP_FPSCR];
+ env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
+ env->vfp.vec_len = (val >> 16) & 7;
+ env->vfp.vec_stride = (val >> 20) & 3;
+
+ changed ^= val;
+ if (changed & (3 << 22)) {
+ i = (val >> 22) & 3;
+ switch (i) {
+ case FPROUNDING_TIEEVEN:
+ i = float_round_nearest_even;
+ break;
+ case FPROUNDING_POSINF:
+ i = float_round_up;
+ break;
+ case FPROUNDING_NEGINF:
+ i = float_round_down;
+ break;
+ case FPROUNDING_ZERO:
+ i = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(i, &env->vfp.fp_status);
+ }
+ if (changed & (1 << 24)) {
+ set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
+ set_flush_inputs_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
+ }
+ if (changed & (1 << 25))
+ set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
+
+ i = vfp_exceptbits_to_host(val);
+ set_float_exception_flags(i, &env->vfp.fp_status);
+ set_float_exception_flags(0, &env->vfp.standard_fp_status);
+}
+
+void vfp_set_fpscr(CPUARMState *env, uint32_t val)
+{
+ HELPER(vfp_set_fpscr)(env, val);
+}
+
+#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
+
+#define VFP_BINOP(name) \
+float32 VFP_HELPER(name, s)(float32 a, float32 b, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return float32_ ## name(a, b, fpst); \
+} \
+float64 VFP_HELPER(name, d)(float64 a, float64 b, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return float64_ ## name(a, b, fpst); \
+}
+VFP_BINOP(add)
+VFP_BINOP(sub)
+VFP_BINOP(mul)
+VFP_BINOP(div)
+VFP_BINOP(min)
+VFP_BINOP(max)
+VFP_BINOP(minnum)
+VFP_BINOP(maxnum)
+#undef VFP_BINOP
+
+float32 VFP_HELPER(neg, s)(float32 a)
+{
+ return float32_chs(a);
+}
+
+float64 VFP_HELPER(neg, d)(float64 a)
+{
+ return float64_chs(a);
+}
+
+float32 VFP_HELPER(abs, s)(float32 a)
+{
+ return float32_abs(a);
+}
+
+float64 VFP_HELPER(abs, d)(float64 a)
+{
+ return float64_abs(a);
+}
+
+float32 VFP_HELPER(sqrt, s)(float32 a, CPUARMState *env)
+{
+ return float32_sqrt(a, &env->vfp.fp_status);
+}
+
+float64 VFP_HELPER(sqrt, d)(float64 a, CPUARMState *env)
+{
+ return float64_sqrt(a, &env->vfp.fp_status);
+}
+
+/* XXX: check quiet/signaling case */
+#define DO_VFP_cmp(p, type) \
+void VFP_HELPER(cmp, p)(type a, type b, CPUARMState *env) \
+{ \
+ uint32_t flags; \
+ switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
+ case 0: flags = 0x6; break; \
+ case -1: flags = 0x8; break; \
+ case 1: flags = 0x2; break; \
+ default: case 2: flags = 0x3; break; \
+ } \
+ env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
+ | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
+} \
+void VFP_HELPER(cmpe, p)(type a, type b, CPUARMState *env) \
+{ \
+ uint32_t flags; \
+ switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
+ case 0: flags = 0x6; break; \
+ case -1: flags = 0x8; break; \
+ case 1: flags = 0x2; break; \
+ default: case 2: flags = 0x3; break; \
+ } \
+ env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
+ | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
+}
+DO_VFP_cmp(s, float32)
+DO_VFP_cmp(d, float64)
+#undef DO_VFP_cmp
+
+/* Integer to float and float to integer conversions */
+
+#define CONV_ITOF(name, fsz, sign) \
+ float##fsz HELPER(name)(uint32_t x, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ return sign##int32_to_##float##fsz((sign##int32_t)x, fpst); \
+}
+
+#define CONV_FTOI(name, fsz, sign, round) \
+uint32_t HELPER(name)(float##fsz x, void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ if (float##fsz##_is_any_nan(x)) { \
+ float_raise(float_flag_invalid, fpst); \
+ return 0; \
+ } \
+ return float##fsz##_to_##sign##int32##round(x, fpst); \
+}
+
+#define FLOAT_CONVS(name, p, fsz, sign) \
+CONV_ITOF(vfp_##name##to##p, fsz, sign) \
+CONV_FTOI(vfp_to##name##p, fsz, sign, ) \
+CONV_FTOI(vfp_to##name##z##p, fsz, sign, _round_to_zero)
+
+FLOAT_CONVS(si, s, 32, )
+FLOAT_CONVS(si, d, 64, )
+FLOAT_CONVS(ui, s, 32, u)
+FLOAT_CONVS(ui, d, 64, u)
+
+#undef CONV_ITOF
+#undef CONV_FTOI
+#undef FLOAT_CONVS
+
+/* floating point conversion */
+float64 VFP_HELPER(fcvtd, s)(float32 x, CPUARMState *env)
+{
+ float64 r = float32_to_float64(x, &env->vfp.fp_status);
+ /* ARM requires that S<->D conversion of any kind of NaN generates
+ * a quiet NaN by forcing the most significant frac bit to 1.
+ */
+ return float64_maybe_silence_nan(r, &env->vfp.fp_status);
+}
+
+float32 VFP_HELPER(fcvts, d)(float64 x, CPUARMState *env)
+{
+ float32 r = float64_to_float32(x, &env->vfp.fp_status);
+ /* ARM requires that S<->D conversion of any kind of NaN generates
+ * a quiet NaN by forcing the most significant frac bit to 1.
+ */
+ return float32_maybe_silence_nan(r, &env->vfp.fp_status);
+}
+
+/* VFP3 fixed point conversion. */
+#define VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
+float##fsz HELPER(vfp_##name##to##p)(uint##isz##_t x, uint32_t shift, \
+ void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ float##fsz tmp; \
+ tmp = itype##_to_##float##fsz(x, fpst); \
+ return float##fsz##_scalbn(tmp, -(int)shift, fpst); \
+}
+
+/* Notice that we want only input-denormal exception flags from the
+ * scalbn operation: the other possible flags (overflow+inexact if
+ * we overflow to infinity, output-denormal) aren't correct for the
+ * complete scale-and-convert operation.
+ */
+#define VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, round) \
+uint##isz##_t HELPER(vfp_to##name##p##round)(float##fsz x, \
+ uint32_t shift, \
+ void *fpstp) \
+{ \
+ float_status *fpst = fpstp; \
+ int old_exc_flags = get_float_exception_flags(fpst); \
+ float##fsz tmp; \
+ if (float##fsz##_is_any_nan(x)) { \
+ float_raise(float_flag_invalid, fpst); \
+ return 0; \
+ } \
+ tmp = float##fsz##_scalbn(x, shift, fpst); \
+ old_exc_flags |= get_float_exception_flags(fpst) \
+ & float_flag_input_denormal; \
+ set_float_exception_flags(old_exc_flags, fpst); \
+ return float##fsz##_to_##itype##round(tmp, fpst); \
+}
+
+#define VFP_CONV_FIX(name, p, fsz, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, _round_to_zero) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
+
+#define VFP_CONV_FIX_A64(name, p, fsz, isz, itype) \
+VFP_CONV_FIX_FLOAT(name, p, fsz, isz, itype) \
+VFP_CONV_FLOAT_FIX_ROUND(name, p, fsz, isz, itype, )
+
+VFP_CONV_FIX(sh, d, 64, 64, int16)
+VFP_CONV_FIX(sl, d, 64, 64, int32)
+VFP_CONV_FIX_A64(sq, d, 64, 64, int64)
+VFP_CONV_FIX(uh, d, 64, 64, uint16)
+VFP_CONV_FIX(ul, d, 64, 64, uint32)
+VFP_CONV_FIX_A64(uq, d, 64, 64, uint64)
+VFP_CONV_FIX(sh, s, 32, 32, int16)
+VFP_CONV_FIX(sl, s, 32, 32, int32)
+VFP_CONV_FIX_A64(sq, s, 32, 64, int64)
+VFP_CONV_FIX(uh, s, 32, 32, uint16)
+VFP_CONV_FIX(ul, s, 32, 32, uint32)
+VFP_CONV_FIX_A64(uq, s, 32, 64, uint64)
+#undef VFP_CONV_FIX
+#undef VFP_CONV_FIX_FLOAT
+#undef VFP_CONV_FLOAT_FIX_ROUND
+
+/* Set the current fp rounding mode and return the old one.
+ * The argument is a softfloat float_round_ value.
+ */
+uint32_t HELPER(set_rmode)(uint32_t rmode, CPUARMState *env)
+{
+ float_status *fp_status = &env->vfp.fp_status;
+
+ uint32_t prev_rmode = get_float_rounding_mode(fp_status);
+ set_float_rounding_mode(rmode, fp_status);
+
+ return prev_rmode;
+}
+
+/* Set the current fp rounding mode in the standard fp status and return
+ * the old one. This is for NEON instructions that need to change the
+ * rounding mode but wish to use the standard FPSCR values for everything
+ * else. Always set the rounding mode back to the correct value after
+ * modifying it.
+ * The argument is a softfloat float_round_ value.
+ */
+uint32_t HELPER(set_neon_rmode)(uint32_t rmode, CPUARMState *env)
+{
+ float_status *fp_status = &env->vfp.standard_fp_status;
+
+ uint32_t prev_rmode = get_float_rounding_mode(fp_status);
+ set_float_rounding_mode(rmode, fp_status);
+
+ return prev_rmode;
+}
+
+/* Half precision conversions. */
+static float32 do_fcvt_f16_to_f32(uint32_t a, CPUARMState *env, float_status *s)
+{
+ int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
+ float32 r = float16_to_float32(make_float16(a), ieee, s);
+ if (ieee) {
+ return float32_maybe_silence_nan(r, s);
+ }
+ return r;
+}
+
+static uint32_t do_fcvt_f32_to_f16(float32 a, CPUARMState *env, float_status *s)
+{
+ int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
+ float16 r = float32_to_float16(a, ieee, s);
+ if (ieee) {
+ r = float16_maybe_silence_nan(r, s);
+ }
+ return float16_val(r);
+}
+
+float32 HELPER(neon_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
+{
+ return do_fcvt_f16_to_f32(a, env, &env->vfp.standard_fp_status);
+}
+
+uint32_t HELPER(neon_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
+{
+ return do_fcvt_f32_to_f16(a, env, &env->vfp.standard_fp_status);
+}
+
+float32 HELPER(vfp_fcvt_f16_to_f32)(uint32_t a, CPUARMState *env)
+{
+ return do_fcvt_f16_to_f32(a, env, &env->vfp.fp_status);
+}
+
+uint32_t HELPER(vfp_fcvt_f32_to_f16)(float32 a, CPUARMState *env)
+{
+ return do_fcvt_f32_to_f16(a, env, &env->vfp.fp_status);
+}
+
+float64 HELPER(vfp_fcvt_f16_to_f64)(uint32_t a, CPUARMState *env)
+{
+ int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
+ float64 r = float16_to_float64(make_float16(a), ieee, &env->vfp.fp_status);
+ if (ieee) {
+ return float64_maybe_silence_nan(r, &env->vfp.fp_status);
+ }
+ return r;
+}
+
+uint32_t HELPER(vfp_fcvt_f64_to_f16)(float64 a, CPUARMState *env)
+{
+ int ieee = (env->vfp.xregs[ARM_VFP_FPSCR] & (1 << 26)) == 0;
+ float16 r = float64_to_float16(a, ieee, &env->vfp.fp_status);
+ if (ieee) {
+ r = float16_maybe_silence_nan(r, &env->vfp.fp_status);
+ }
+ return float16_val(r);
+}
+
+#define float32_two make_float32(0x40000000)
+#define float32_three make_float32(0x40400000)
+#define float32_one_point_five make_float32(0x3fc00000)
+
+float32 HELPER(recps_f32)(float32 a, float32 b, CPUARMState *env)
+{
+ float_status *s = &env->vfp.standard_fp_status;
+ if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
+ (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
+ if (!(float32_is_zero(a) || float32_is_zero(b))) {
+ float_raise(float_flag_input_denormal, s);
+ }
+ return float32_two;
+ }
+ return float32_sub(float32_two, float32_mul(a, b, s), s);
+}
+
+float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUARMState *env)
+{
+ float_status *s = &env->vfp.standard_fp_status;
+ float32 product;
+ if ((float32_is_infinity(a) && float32_is_zero_or_denormal(b)) ||
+ (float32_is_infinity(b) && float32_is_zero_or_denormal(a))) {
+ if (!(float32_is_zero(a) || float32_is_zero(b))) {
+ float_raise(float_flag_input_denormal, s);
+ }
+ return float32_one_point_five;
+ }
+ product = float32_mul(a, b, s);
+ return float32_div(float32_sub(float32_three, product, s), float32_two, s);
+}
+
+/* NEON helpers. */
+
+/* Constants 256 and 512 are used in some helpers; we avoid relying on
+ * int->float conversions at run-time. */
+#define float64_256 make_float64(0x4070000000000000LL)
+#define float64_512 make_float64(0x4080000000000000LL)
+#define float32_maxnorm make_float32(0x7f7fffff)
+#define float64_maxnorm make_float64(0x7fefffffffffffffLL)
+
+/* Reciprocal functions
+ *
+ * The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM, see FPRecipEstimate()
+ */
+
+static float64 recip_estimate(float64 a, float_status *real_fp_status)
+{
+ /* These calculations mustn't set any fp exception flags,
+ * so we use a local copy of the fp_status.
+ */
+ float_status dummy_status = *real_fp_status;
+ float_status *s = &dummy_status;
+ /* q = (int)(a * 512.0) */
+ float64 q = float64_mul(float64_512, a, s);
+ int64_t q_int = float64_to_int64_round_to_zero(q, s);
+
+ /* r = 1.0 / (((double)q + 0.5) / 512.0) */
+ q = int64_to_float64(q_int, s);
+ q = float64_add(q, float64_half, s);
+ q = float64_div(q, float64_512, s);
+ q = float64_div(float64_one, q, s);
+
+ /* s = (int)(256.0 * r + 0.5) */
+ q = float64_mul(q, float64_256, s);
+ q = float64_add(q, float64_half, s);
+ q_int = float64_to_int64_round_to_zero(q, s);
+
+ /* return (double)s / 256.0 */
+ return float64_div(int64_to_float64(q_int, s), float64_256, s);
+}
+
+/* Common wrapper to call recip_estimate */
+static float64 call_recip_estimate(float64 num, int off, float_status *fpst)
+{
+ uint64_t val64 = float64_val(num);
+ uint64_t frac = extract64(val64, 0, 52);
+ int64_t exp = extract64(val64, 52, 11);
+ uint64_t sbit;
+ float64 scaled, estimate;
+
+ /* Generate the scaled number for the estimate function */
+ if (exp == 0) {
+ if (extract64(frac, 51, 1) == 0) {
+ exp = -1;
+ frac = extract64(frac, 0, 50) << 2;
+ } else {
+ frac = extract64(frac, 0, 51) << 1;
+ }
+ }
+
+ /* scaled = '0' : '01111111110' : fraction<51:44> : Zeros(44); */
+ scaled = make_float64((0x3feULL << 52)
+ | extract64(frac, 44, 8) << 44);
+
+ estimate = recip_estimate(scaled, fpst);
+
+ /* Build new result */
+ val64 = float64_val(estimate);
+ sbit = 0x8000000000000000ULL & val64;
+ exp = off - exp;
+ frac = extract64(val64, 0, 52);
+
+ if (exp == 0) {
+ frac = 1ULL << 51 | extract64(frac, 1, 51);
+ } else if (exp == -1) {
+ frac = 1ULL << 50 | extract64(frac, 2, 50);
+ exp = 0;
+ }
+
+ return make_float64(sbit | (exp << 52) | frac);
+}
+
+static bool round_to_inf(float_status *fpst, bool sign_bit)
+{
+ switch (fpst->float_rounding_mode) {
+ case float_round_nearest_even: /* Round to Nearest */
+ return true;
+ case float_round_up: /* Round to +Inf */
+ return !sign_bit;
+ case float_round_down: /* Round to -Inf */
+ return sign_bit;
+ case float_round_to_zero: /* Round to Zero */
+ return false;
+ }
+
+ g_assert_not_reached();
+}
+
+float32 HELPER(recpe_f32)(float32 input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f32 = float32_squash_input_denormal(input, fpst);
+ uint32_t f32_val = float32_val(f32);
+ uint32_t f32_sbit = 0x80000000ULL & f32_val;
+ int32_t f32_exp = extract32(f32_val, 23, 8);
+ uint32_t f32_frac = extract32(f32_val, 0, 23);
+ float64 f64, r64;
+ uint64_t r64_val;
+ int64_t r64_exp;
+ uint64_t r64_frac;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float32_maybe_silence_nan(f32, fpst);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float32_default_nan(fpst);
+ }
+ return nan;
+ } else if (float32_is_infinity(f32)) {
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if ((f32_val & ~(1ULL << 31)) < (1ULL << 21)) {
+ /* Abs(value) < 2.0^-128 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f32_sbit)) {
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else {
+ return float32_set_sign(float32_maxnorm, float32_is_neg(f32));
+ }
+ } else if (f32_exp >= 253 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float32_set_sign(float32_zero, float32_is_neg(f32));
+ }
+
+
+ f64 = make_float64(((int64_t)(f32_exp) << 52) | (int64_t)(f32_frac) << 29);
+ r64 = call_recip_estimate(f64, 253, fpst);
+ r64_val = float64_val(r64);
+ r64_exp = extract64(r64_val, 52, 11);
+ r64_frac = extract64(r64_val, 0, 52);
+
+ /* result = sign : result_exp<7:0> : fraction<51:29>; */
+ return make_float32(f32_sbit |
+ (r64_exp & 0xff) << 23 |
+ extract64(r64_frac, 29, 24));
+}
+
+float64 HELPER(recpe_f64)(float64 input, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f64 = float64_squash_input_denormal(input, fpst);
+ uint64_t f64_val = float64_val(f64);
+ uint64_t f64_sbit = 0x8000000000000000ULL & f64_val;
+ int64_t f64_exp = extract64(f64_val, 52, 11);
+ float64 r64;
+ uint64_t r64_val;
+ int64_t r64_exp;
+ uint64_t r64_frac;
+
+ /* Deal with any special cases */
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64, fpst)) {
+ float_raise(float_flag_invalid, fpst);
+ nan = float64_maybe_silence_nan(f64, fpst);
+ }
+ if (fpst->default_nan_mode) {
+ nan = float64_default_nan(fpst);
+ }
+ return nan;
+ } else if (float64_is_infinity(f64)) {
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, fpst);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if ((f64_val & ~(1ULL << 63)) < (1ULL << 50)) {
+ /* Abs(value) < 2.0^-1024 */
+ float_raise(float_flag_overflow | float_flag_inexact, fpst);
+ if (round_to_inf(fpst, f64_sbit)) {
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else {
+ return float64_set_sign(float64_maxnorm, float64_is_neg(f64));
+ }
+ } else if (f64_exp >= 2045 && fpst->flush_to_zero) {
+ float_raise(float_flag_underflow, fpst);
+ return float64_set_sign(float64_zero, float64_is_neg(f64));
+ }
+
+ r64 = call_recip_estimate(f64, 2045, fpst);
+ r64_val = float64_val(r64);
+ r64_exp = extract64(r64_val, 52, 11);
+ r64_frac = extract64(r64_val, 0, 52);
+
+ /* result = sign : result_exp<10:0> : fraction<51:0> */
+ return make_float64(f64_sbit |
+ ((r64_exp & 0x7ff) << 52) |
+ r64_frac);
+}
+
+/* The algorithm that must be used to calculate the estimate
+ * is specified by the ARM ARM.
+ */
+static float64 recip_sqrt_estimate(float64 a, float_status *real_fp_status)
+{
+ /* These calculations mustn't set any fp exception flags,
+ * so we use a local copy of the fp_status.
+ */
+ float_status dummy_status = *real_fp_status;
+ float_status *s = &dummy_status;
+ float64 q;
+ int64_t q_int;
+
+ if (float64_lt(a, float64_half, s)) {
+ /* range 0.25 <= a < 0.5 */
+
+ /* a in units of 1/512 rounded down */
+ /* q0 = (int)(a * 512.0); */
+ q = float64_mul(float64_512, a, s);
+ q_int = float64_to_int64_round_to_zero(q, s);
+
+ /* reciprocal root r */
+ /* r = 1.0 / sqrt(((double)q0 + 0.5) / 512.0); */
+ q = int64_to_float64(q_int, s);
+ q = float64_add(q, float64_half, s);
+ q = float64_div(q, float64_512, s);
+ q = float64_sqrt(q, s);
+ q = float64_div(float64_one, q, s);
+ } else {
+ /* range 0.5 <= a < 1.0 */
+
+ /* a in units of 1/256 rounded down */
+ /* q1 = (int)(a * 256.0); */
+ q = float64_mul(float64_256, a, s);
+ int64_t q_int = float64_to_int64_round_to_zero(q, s);
+
+ /* reciprocal root r */
+ /* r = 1.0 /sqrt(((double)q1 + 0.5) / 256); */
+ q = int64_to_float64(q_int, s);
+ q = float64_add(q, float64_half, s);
+ q = float64_div(q, float64_256, s);
+ q = float64_sqrt(q, s);
+ q = float64_div(float64_one, q, s);
+ }
+ /* r in units of 1/256 rounded to nearest */
+ /* s = (int)(256.0 * r + 0.5); */
+
+ q = float64_mul(q, float64_256,s );
+ q = float64_add(q, float64_half, s);
+ q_int = float64_to_int64_round_to_zero(q, s);
+
+ /* return (double)s / 256.0;*/
+ return float64_div(int64_to_float64(q_int, s), float64_256, s);
+}
+
+float32 HELPER(rsqrte_f32)(float32 input, void *fpstp)
+{
+ float_status *s = fpstp;
+ float32 f32 = float32_squash_input_denormal(input, s);
+ uint32_t val = float32_val(f32);
+ uint32_t f32_sbit = 0x80000000 & val;
+ int32_t f32_exp = extract32(val, 23, 8);
+ uint32_t f32_frac = extract32(val, 0, 23);
+ uint64_t f64_frac;
+ uint64_t val64;
+ int result_exp;
+ float64 f64;
+
+ if (float32_is_any_nan(f32)) {
+ float32 nan = f32;
+ if (float32_is_signaling_nan(f32, s)) {
+ float_raise(float_flag_invalid, s);
+ nan = float32_maybe_silence_nan(f32, s);
+ }
+ if (s->default_nan_mode) {
+ nan = float32_default_nan(s);
+ }
+ return nan;
+ } else if (float32_is_zero(f32)) {
+ float_raise(float_flag_divbyzero, s);
+ return float32_set_sign(float32_infinity, float32_is_neg(f32));
+ } else if (float32_is_neg(f32)) {
+ float_raise(float_flag_invalid, s);
+ return float32_default_nan(s);
+ } else if (float32_is_infinity(f32)) {
+ return float32_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ f64_frac = ((uint64_t) f32_frac) << 29;
+ if (f32_exp == 0) {
+ while (extract64(f64_frac, 51, 1) == 0) {
+ f64_frac = f64_frac << 1;
+ f32_exp = f32_exp-1;
+ }
+ f64_frac = extract64(f64_frac, 0, 51) << 1;
+ }
+
+ if (extract64(f32_exp, 0, 1) == 0) {
+ f64 = make_float64(((uint64_t) f32_sbit) << 32
+ | (0x3feULL << 52)
+ | f64_frac);
+ } else {
+ f64 = make_float64(((uint64_t) f32_sbit) << 32
+ | (0x3fdULL << 52)
+ | f64_frac);
+ }
+
+ result_exp = (380 - f32_exp) / 2;
+
+ f64 = recip_sqrt_estimate(f64, s);
+
+ val64 = float64_val(f64);
+
+ val = ((result_exp & 0xff) << 23)
+ | ((val64 >> 29) & 0x7fffff);
+ return make_float32(val);
+}
+
+float64 HELPER(rsqrte_f64)(float64 input, void *fpstp)
+{
+ float_status *s = fpstp;
+ float64 f64 = float64_squash_input_denormal(input, s);
+ uint64_t val = float64_val(f64);
+ uint64_t f64_sbit = 0x8000000000000000ULL & val;
+ int64_t f64_exp = extract64(val, 52, 11);
+ uint64_t f64_frac = extract64(val, 0, 52);
+ int64_t result_exp;
+ uint64_t result_frac;
+
+ if (float64_is_any_nan(f64)) {
+ float64 nan = f64;
+ if (float64_is_signaling_nan(f64, s)) {
+ float_raise(float_flag_invalid, s);
+ nan = float64_maybe_silence_nan(f64, s);
+ }
+ if (s->default_nan_mode) {
+ nan = float64_default_nan(s);
+ }
+ return nan;
+ } else if (float64_is_zero(f64)) {
+ float_raise(float_flag_divbyzero, s);
+ return float64_set_sign(float64_infinity, float64_is_neg(f64));
+ } else if (float64_is_neg(f64)) {
+ float_raise(float_flag_invalid, s);
+ return float64_default_nan(s);
+ } else if (float64_is_infinity(f64)) {
+ return float64_zero;
+ }
+
+ /* Scale and normalize to a double-precision value between 0.25 and 1.0,
+ * preserving the parity of the exponent. */
+
+ if (f64_exp == 0) {
+ while (extract64(f64_frac, 51, 1) == 0) {
+ f64_frac = f64_frac << 1;
+ f64_exp = f64_exp - 1;
+ }
+ f64_frac = extract64(f64_frac, 0, 51) << 1;
+ }
+
+ if (extract64(f64_exp, 0, 1) == 0) {
+ f64 = make_float64(f64_sbit
+ | (0x3feULL << 52)
+ | f64_frac);
+ } else {
+ f64 = make_float64(f64_sbit
+ | (0x3fdULL << 52)
+ | f64_frac);
+ }
+
+ result_exp = (3068 - f64_exp) / 2;
+
+ f64 = recip_sqrt_estimate(f64, s);
+
+ result_frac = extract64(float64_val(f64), 0, 52);
+
+ return make_float64(f64_sbit |
+ ((result_exp & 0x7ff) << 52) |
+ result_frac);
+}
+
+uint32_t HELPER(recpe_u32)(uint32_t a, void *fpstp)
+{
+ float_status *s = fpstp;
+ float64 f64;
+
+ if ((a & 0x80000000) == 0) {
+ return 0xffffffff;
+ }
+
+ f64 = make_float64((0x3feULL << 52)
+ | ((int64_t)(a & 0x7fffffff) << 21));
+
+ f64 = recip_estimate(f64, s);
+
+ return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
+}
+
+uint32_t HELPER(rsqrte_u32)(uint32_t a, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f64;
+
+ if ((a & 0xc0000000) == 0) {
+ return 0xffffffff;
+ }
+
+ if (a & 0x80000000) {
+ f64 = make_float64((0x3feULL << 52)
+ | ((uint64_t)(a & 0x7fffffff) << 21));
+ } else { /* bits 31-30 == '01' */
+ f64 = make_float64((0x3fdULL << 52)
+ | ((uint64_t)(a & 0x3fffffff) << 22));
+ }
+
+ f64 = recip_sqrt_estimate(f64, fpst);
+
+ return 0x80000000 | ((float64_val(f64) >> 21) & 0x7fffffff);
+}
+
+/* VFPv4 fused multiply-accumulate */
+float32 VFP_HELPER(muladd, s)(float32 a, float32 b, float32 c, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return float32_muladd(a, b, c, 0, fpst);
+}
+
+float64 VFP_HELPER(muladd, d)(float64 a, float64 b, float64 c, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return float64_muladd(a, b, c, 0, fpst);
+}
+
+/* ARMv8 round to integral */
+float32 HELPER(rints_exact)(float32 x, void *fp_status)
+{
+ return float32_round_to_int(x, fp_status);
+}
+
+float64 HELPER(rintd_exact)(float64 x, void *fp_status)
+{
+ return float64_round_to_int(x, fp_status);
+}
+
+float32 HELPER(rints)(float32 x, void *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float32 ret;
+
+ ret = float32_round_to_int(x, fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+float64 HELPER(rintd)(float64 x, void *fp_status)
+{
+ int old_flags = get_float_exception_flags(fp_status), new_flags;
+ float64 ret;
+
+ ret = float64_round_to_int(x, fp_status);
+
+ new_flags = get_float_exception_flags(fp_status);
+
+ /* Suppress any inexact exceptions the conversion produced */
+ if (!(old_flags & float_flag_inexact)) {
+ new_flags = get_float_exception_flags(fp_status);
+ set_float_exception_flags(new_flags & ~float_flag_inexact, fp_status);
+ }
+
+ return ret;
+}
+
+/* Convert ARM rounding mode to softfloat */
+int arm_rmode_to_sf(int rmode)
+{
+ switch (rmode) {
+ case FPROUNDING_TIEAWAY:
+ rmode = float_round_ties_away;
+ break;
+ case FPROUNDING_ODD:
+ /* FIXME: add support for TIEAWAY and ODD */
+ qemu_log_mask(LOG_UNIMP, "arm: unimplemented rounding mode: %d\n",
+ rmode);
+ case FPROUNDING_TIEEVEN:
+ default:
+ rmode = float_round_nearest_even;
+ break;
+ case FPROUNDING_POSINF:
+ rmode = float_round_up;
+ break;
+ case FPROUNDING_NEGINF:
+ rmode = float_round_down;
+ break;
+ case FPROUNDING_ZERO:
+ rmode = float_round_to_zero;
+ break;
+ }
+ return rmode;
+}
+
+/* CRC helpers.
+ * The upper bytes of val (above the number specified by 'bytes') must have
+ * been zeroed out by the caller.
+ */
+uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, val);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, val);
+
+ /* Linux crc32c converts the output to one's complement. */
+ return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}
diff --git a/target/arm/helper.h b/target/arm/helper.h
new file mode 100644
index 0000000000..84aa637629
--- /dev/null
+++ b/target/arm/helper.h
@@ -0,0 +1,542 @@
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(sxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(uxtb16, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_2(double_saturate, i32, env, s32)
+DEF_HELPER_FLAGS_2(sdiv, TCG_CALL_NO_RWG_SE, s32, s32, s32)
+DEF_HELPER_FLAGS_2(udiv, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_1(rbit, TCG_CALL_NO_RWG_SE, i32, i32)
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_3(pfx ## add8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub8, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## sub16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## add16, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## addsubx, i32, i32, i32, ptr) \
+ DEF_HELPER_3(pfx ## subaddx, i32, i32, i32, ptr)
+
+PAS_OP(s)
+PAS_OP(u)
+#undef PAS_OP
+
+#define PAS_OP(pfx) \
+ DEF_HELPER_2(pfx ## add8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub8, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## sub16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## add16, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## addsubx, i32, i32, i32) \
+ DEF_HELPER_2(pfx ## subaddx, i32, i32, i32)
+PAS_OP(q)
+PAS_OP(sh)
+PAS_OP(uq)
+PAS_OP(uh)
+#undef PAS_OP
+
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(usad8, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
+ i32, i32, i32, i32)
+DEF_HELPER_2(exception_internal, void, env, i32)
+DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
+DEF_HELPER_1(setend, void, env)
+DEF_HELPER_1(wfi, void, env)
+DEF_HELPER_1(wfe, void, env)
+DEF_HELPER_1(yield, void, env)
+DEF_HELPER_1(pre_hvc, void, env)
+DEF_HELPER_2(pre_smc, void, env, i32)
+
+DEF_HELPER_1(check_breakpoints, void, env)
+
+DEF_HELPER_3(cpsr_write, void, env, i32, i32)
+DEF_HELPER_2(cpsr_write_eret, void, env, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
+
+DEF_HELPER_3(v7m_msr, void, env, i32, i32)
+DEF_HELPER_2(v7m_mrs, i32, env, i32)
+
+DEF_HELPER_4(access_check_cp_reg, void, env, ptr, i32, i32)
+DEF_HELPER_3(set_cp_reg, void, env, ptr, i32)
+DEF_HELPER_2(get_cp_reg, i32, env, ptr)
+DEF_HELPER_3(set_cp_reg64, void, env, ptr, i64)
+DEF_HELPER_2(get_cp_reg64, i64, env, ptr)
+
+DEF_HELPER_3(msr_i_pstate, void, env, i32, i32)
+DEF_HELPER_1(clear_pstate_ss, void, env)
+DEF_HELPER_1(exception_return, void, env)
+
+DEF_HELPER_2(get_r13_banked, i32, env, i32)
+DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
+
+DEF_HELPER_3(mrs_banked, i32, env, i32, i32)
+DEF_HELPER_4(msr_banked, void, env, i32, i32, i32)
+
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+
+DEF_HELPER_1(vfp_get_fpscr, i32, env)
+DEF_HELPER_2(vfp_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(vfp_adds, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_addd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_subs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_subd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_muls, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_muld, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_divs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_divd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxs, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_mins, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_mind, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_maxnums, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_maxnumd, f64, f64, f64, ptr)
+DEF_HELPER_3(vfp_minnums, f32, f32, f32, ptr)
+DEF_HELPER_3(vfp_minnumd, f64, f64, f64, ptr)
+DEF_HELPER_1(vfp_negs, f32, f32)
+DEF_HELPER_1(vfp_negd, f64, f64)
+DEF_HELPER_1(vfp_abss, f32, f32)
+DEF_HELPER_1(vfp_absd, f64, f64)
+DEF_HELPER_2(vfp_sqrts, f32, f32, env)
+DEF_HELPER_2(vfp_sqrtd, f64, f64, env)
+DEF_HELPER_3(vfp_cmps, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmpd, void, f64, f64, env)
+DEF_HELPER_3(vfp_cmpes, void, f32, f32, env)
+DEF_HELPER_3(vfp_cmped, void, f64, f64, env)
+
+DEF_HELPER_2(vfp_fcvtds, f64, f32, env)
+DEF_HELPER_2(vfp_fcvtsd, f32, f64, env)
+
+DEF_HELPER_2(vfp_uitos, f32, i32, ptr)
+DEF_HELPER_2(vfp_uitod, f64, i32, ptr)
+DEF_HELPER_2(vfp_sitos, f32, i32, ptr)
+DEF_HELPER_2(vfp_sitod, f64, i32, ptr)
+
+DEF_HELPER_2(vfp_touis, i32, f32, ptr)
+DEF_HELPER_2(vfp_touid, i32, f64, ptr)
+DEF_HELPER_2(vfp_touizs, i32, f32, ptr)
+DEF_HELPER_2(vfp_touizd, i32, f64, ptr)
+DEF_HELPER_2(vfp_tosis, i32, f32, ptr)
+DEF_HELPER_2(vfp_tosid, i32, f64, ptr)
+DEF_HELPER_2(vfp_tosizs, i32, f32, ptr)
+DEF_HELPER_2(vfp_tosizd, i32, f64, ptr)
+
+DEF_HELPER_3(vfp_toshs_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosls_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touhs_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touls_round_to_zero, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_toshd_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosld_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touhd_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tould_round_to_zero, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_toshs, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosls, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_tosqs, i64, f32, i32, ptr)
+DEF_HELPER_3(vfp_touhs, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touls, i32, f32, i32, ptr)
+DEF_HELPER_3(vfp_touqs, i64, f32, i32, ptr)
+DEF_HELPER_3(vfp_toshd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosld, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tosqd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touhd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_tould, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_touqd, i64, f64, i32, ptr)
+DEF_HELPER_3(vfp_shtos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_sltos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_sqtos, f32, i64, i32, ptr)
+DEF_HELPER_3(vfp_uhtos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_ultos, f32, i32, i32, ptr)
+DEF_HELPER_3(vfp_uqtos, f32, i64, i32, ptr)
+DEF_HELPER_3(vfp_shtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_sltod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_sqtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_uhtod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_ultod, f64, i64, i32, ptr)
+DEF_HELPER_3(vfp_uqtod, f64, i64, i32, ptr)
+
+DEF_HELPER_FLAGS_2(set_rmode, TCG_CALL_NO_RWG, i32, i32, env)
+DEF_HELPER_FLAGS_2(set_neon_rmode, TCG_CALL_NO_RWG, i32, i32, env)
+
+DEF_HELPER_2(vfp_fcvt_f16_to_f32, f32, i32, env)
+DEF_HELPER_2(vfp_fcvt_f32_to_f16, i32, f32, env)
+DEF_HELPER_2(neon_fcvt_f16_to_f32, f32, i32, env)
+DEF_HELPER_2(neon_fcvt_f32_to_f16, i32, f32, env)
+DEF_HELPER_FLAGS_2(vfp_fcvt_f16_to_f64, TCG_CALL_NO_RWG, f64, i32, env)
+DEF_HELPER_FLAGS_2(vfp_fcvt_f64_to_f16, TCG_CALL_NO_RWG, i32, f64, env)
+
+DEF_HELPER_4(vfp_muladdd, f64, f64, f64, f64, ptr)
+DEF_HELPER_4(vfp_muladds, f32, f32, f32, f32, ptr)
+
+DEF_HELPER_3(recps_f32, f32, f32, f32, env)
+DEF_HELPER_3(rsqrts_f32, f32, f32, f32, env)
+DEF_HELPER_FLAGS_2(recpe_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(recpe_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f32, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_f64, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_2(recpe_u32, i32, i32, ptr)
+DEF_HELPER_FLAGS_2(rsqrte_u32, TCG_CALL_NO_RWG, i32, i32, ptr)
+DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32)
+
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(rints_exact, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rintd_exact, TCG_CALL_NO_RWG, f64, f64, ptr)
+DEF_HELPER_FLAGS_2(rints, TCG_CALL_NO_RWG, f32, f32, ptr)
+DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr)
+
+/* neon_helper.c */
+DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_qadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_uqadd_s64, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(neon_sqadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u16, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u32, TCG_CALL_NO_RWG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(neon_sqadd_u64, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qsub_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qadd_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qadd_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qsub_s64, i64, env, i64, i64)
+
+DEF_HELPER_2(neon_hadd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_hadd_s32, s32, s32, s32)
+DEF_HELPER_2(neon_hadd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rhadd_s32, s32, s32, s32)
+DEF_HELPER_2(neon_rhadd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s8, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s16, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_hsub_s32, s32, s32, s32)
+DEF_HELPER_2(neon_hsub_u32, i32, i32, i32)
+
+DEF_HELPER_2(neon_cgt_u8, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_s8, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_u16, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_s16, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_u32, i32, i32, i32)
+DEF_HELPER_2(neon_cgt_s32, i32, i32, i32)
+DEF_HELPER_2(neon_cge_u8, i32, i32, i32)
+DEF_HELPER_2(neon_cge_s8, i32, i32, i32)
+DEF_HELPER_2(neon_cge_u16, i32, i32, i32)
+DEF_HELPER_2(neon_cge_s16, i32, i32, i32)
+DEF_HELPER_2(neon_cge_u32, i32, i32, i32)
+DEF_HELPER_2(neon_cge_s32, i32, i32, i32)
+
+DEF_HELPER_2(neon_min_u8, i32, i32, i32)
+DEF_HELPER_2(neon_min_s8, i32, i32, i32)
+DEF_HELPER_2(neon_min_u16, i32, i32, i32)
+DEF_HELPER_2(neon_min_s16, i32, i32, i32)
+DEF_HELPER_2(neon_min_u32, i32, i32, i32)
+DEF_HELPER_2(neon_min_s32, i32, i32, i32)
+DEF_HELPER_2(neon_max_u8, i32, i32, i32)
+DEF_HELPER_2(neon_max_s8, i32, i32, i32)
+DEF_HELPER_2(neon_max_u16, i32, i32, i32)
+DEF_HELPER_2(neon_max_s16, i32, i32, i32)
+DEF_HELPER_2(neon_max_u32, i32, i32, i32)
+DEF_HELPER_2(neon_max_s32, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmin_s16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s8, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_u16, i32, i32, i32)
+DEF_HELPER_2(neon_pmax_s16, i32, i32, i32)
+
+DEF_HELPER_2(neon_abd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_abd_s8, i32, i32, i32)
+DEF_HELPER_2(neon_abd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_abd_s16, i32, i32, i32)
+DEF_HELPER_2(neon_abd_u32, i32, i32, i32)
+DEF_HELPER_2(neon_abd_s32, i32, i32, i32)
+
+DEF_HELPER_2(neon_shl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_shl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_shl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_shl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_shl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_shl_s64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_u8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s8, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s16, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_s32, i32, i32, i32)
+DEF_HELPER_2(neon_rshl_u64, i64, i64, i64)
+DEF_HELPER_2(neon_rshl_s64, i64, i64, i64)
+DEF_HELPER_3(neon_qshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshl_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qshlu_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qshlu_s64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_u8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s8, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrshl_u64, i64, env, i64, i64)
+DEF_HELPER_3(neon_qrshl_s64, i64, env, i64, i64)
+
+DEF_HELPER_2(neon_add_u8, i32, i32, i32)
+DEF_HELPER_2(neon_add_u16, i32, i32, i32)
+DEF_HELPER_2(neon_padd_u8, i32, i32, i32)
+DEF_HELPER_2(neon_padd_u16, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u8, i32, i32, i32)
+DEF_HELPER_2(neon_sub_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u8, i32, i32, i32)
+DEF_HELPER_2(neon_mul_u16, i32, i32, i32)
+DEF_HELPER_2(neon_mul_p8, i32, i32, i32)
+DEF_HELPER_2(neon_mull_p8, i64, i32, i32)
+
+DEF_HELPER_2(neon_tst_u8, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u16, i32, i32, i32)
+DEF_HELPER_2(neon_tst_u32, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_u8, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_u16, i32, i32, i32)
+DEF_HELPER_2(neon_ceq_u32, i32, i32, i32)
+
+DEF_HELPER_1(neon_abs_s8, i32, i32)
+DEF_HELPER_1(neon_abs_s16, i32, i32)
+DEF_HELPER_1(neon_clz_u8, i32, i32)
+DEF_HELPER_1(neon_clz_u16, i32, i32)
+DEF_HELPER_1(neon_cls_s8, i32, i32)
+DEF_HELPER_1(neon_cls_s16, i32, i32)
+DEF_HELPER_1(neon_cls_s32, i32, i32)
+DEF_HELPER_1(neon_cnt_u8, i32, i32)
+DEF_HELPER_FLAGS_1(neon_rbit_u8, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(neon_qdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s16, i32, env, i32, i32)
+DEF_HELPER_3(neon_qdmulh_s32, i32, env, i32, i32)
+DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
+
+DEF_HELPER_1(neon_narrow_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
+DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
+DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_high_u16, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u8, i32, i64)
+DEF_HELPER_1(neon_narrow_round_high_u16, i32, i64)
+DEF_HELPER_1(neon_widen_u8, i64, i32)
+DEF_HELPER_1(neon_widen_s8, i64, i32)
+DEF_HELPER_1(neon_widen_u16, i64, i32)
+DEF_HELPER_1(neon_widen_s16, i64, i32)
+
+DEF_HELPER_2(neon_addl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_addl_u32, i64, i64, i64)
+DEF_HELPER_2(neon_paddl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_paddl_u32, i64, i64, i64)
+DEF_HELPER_2(neon_subl_u16, i64, i64, i64)
+DEF_HELPER_2(neon_subl_u32, i64, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s32, i64, env, i64, i64)
+DEF_HELPER_3(neon_addl_saturate_s64, i64, env, i64, i64)
+DEF_HELPER_2(neon_abdl_u16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s16, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s32, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_u64, i64, i32, i32)
+DEF_HELPER_2(neon_abdl_s64, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s8, i64, i32, i32)
+DEF_HELPER_2(neon_mull_u16, i64, i32, i32)
+DEF_HELPER_2(neon_mull_s16, i64, i32, i32)
+
+DEF_HELPER_1(neon_negl_u16, i64, i64)
+DEF_HELPER_1(neon_negl_u32, i64, i64)
+
+DEF_HELPER_FLAGS_2(neon_qabs_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qabs_s64, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(neon_qneg_s8, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s16, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s32, TCG_CALL_NO_RWG, i32, env, i32)
+DEF_HELPER_FLAGS_2(neon_qneg_s64, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_3(neon_abd_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_ceq_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_cge_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_cgt_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acge_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, ptr)
+DEF_HELPER_3(neon_acge_f64, i64, i64, i64, ptr)
+DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, ptr)
+
+/* iwmmxt_helper.c */
+DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64)
+DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64)
+DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64)
+
+#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \
+DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \
+DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \
+
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackl)
+DEF_IWMMXT_HELPER_SIZE_ENV(unpackh)
+
+DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64)
+DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu)
+DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(mins)
+DEF_IWMMXT_HELPER_SIZE_ENV(minu)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxs)
+DEF_IWMMXT_HELPER_SIZE_ENV(maxu)
+
+DEF_IWMMXT_HELPER_SIZE_ENV(subn)
+DEF_IWMMXT_HELPER_SIZE_ENV(addn)
+DEF_IWMMXT_HELPER_SIZE_ENV(subu)
+DEF_IWMMXT_HELPER_SIZE_ENV(addu)
+DEF_IWMMXT_HELPER_SIZE_ENV(subs)
+DEF_IWMMXT_HELPER_SIZE_ENV(adds)
+
+DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32)
+DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32)
+
+DEF_HELPER_1(iwmmxt_bcstb, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstw, i64, i32)
+DEF_HELPER_1(iwmmxt_bcstl, i64, i32)
+
+DEF_HELPER_1(iwmmxt_addcb, i64, i64)
+DEF_HELPER_1(iwmmxt_addcw, i64, i64)
+DEF_HELPER_1(iwmmxt_addcl, i64, i64)
+
+DEF_HELPER_1(iwmmxt_msbb, i32, i64)
+DEF_HELPER_1(iwmmxt_msbw, i32, i64)
+DEF_HELPER_1(iwmmxt_msbl, i32, i64)
+
+DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32)
+DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32)
+
+DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64)
+DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64)
+
+DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32)
+DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32)
+
+DEF_HELPER_3(neon_unzip8, void, env, i32, i32)
+DEF_HELPER_3(neon_unzip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qunzip8, void, env, i32, i32)
+DEF_HELPER_3(neon_qunzip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qunzip32, void, env, i32, i32)
+DEF_HELPER_3(neon_zip8, void, env, i32, i32)
+DEF_HELPER_3(neon_zip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qzip8, void, env, i32, i32)
+DEF_HELPER_3(neon_qzip16, void, env, i32, i32)
+DEF_HELPER_3(neon_qzip32, void, env, i32, i32)
+
+DEF_HELPER_4(crypto_aese, void, env, i32, i32, i32)
+DEF_HELPER_4(crypto_aesmc, void, env, i32, i32, i32)
+
+DEF_HELPER_5(crypto_sha1_3reg, void, env, i32, i32, i32, i32)
+DEF_HELPER_3(crypto_sha1h, void, env, i32, i32)
+DEF_HELPER_3(crypto_sha1su1, void, env, i32, i32)
+
+DEF_HELPER_4(crypto_sha256h, void, env, i32, i32, i32)
+DEF_HELPER_4(crypto_sha256h2, void, env, i32, i32, i32)
+DEF_HELPER_3(crypto_sha256su0, void, env, i32, i32)
+DEF_HELPER_4(crypto_sha256su1, void, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_2(dc_zva, void, env, i64)
+
+DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+#ifdef TARGET_AARCH64
+#include "helper-a64.h"
+#endif
diff --git a/target/arm/internals.h b/target/arm/internals.h
new file mode 100644
index 0000000000..3cae5ff3b5
--- /dev/null
+++ b/target/arm/internals.h
@@ -0,0 +1,489 @@
+/*
+ * QEMU ARM CPU -- internal functions and types
+ *
+ * Copyright (c) 2014 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This header defines functions, types, etc which need to be shared
+ * between different source files within target/arm/ but which are
+ * private to it and not required by the rest of QEMU.
+ */
+
+#ifndef TARGET_ARM_INTERNALS_H
+#define TARGET_ARM_INTERNALS_H
+
+/* register banks for CPU modes */
+#define BANK_USRSYS 0
+#define BANK_SVC 1
+#define BANK_ABT 2
+#define BANK_UND 3
+#define BANK_IRQ 4
+#define BANK_FIQ 5
+#define BANK_HYP 6
+#define BANK_MON 7
+
+static inline bool excp_is_internal(int excp)
+{
+ /* Return true if this exception number represents a QEMU-internal
+ * exception that will not be passed to the guest.
+ */
+ return excp == EXCP_INTERRUPT
+ || excp == EXCP_HLT
+ || excp == EXCP_DEBUG
+ || excp == EXCP_HALTED
+ || excp == EXCP_EXCEPTION_EXIT
+ || excp == EXCP_KERNEL_TRAP
+ || excp == EXCP_SEMIHOST;
+}
+
+/* Exception names for debug logging; note that not all of these
+ * precisely correspond to architectural exceptions.
+ */
+static const char * const excnames[] = {
+ [EXCP_UDEF] = "Undefined Instruction",
+ [EXCP_SWI] = "SVC",
+ [EXCP_PREFETCH_ABORT] = "Prefetch Abort",
+ [EXCP_DATA_ABORT] = "Data Abort",
+ [EXCP_IRQ] = "IRQ",
+ [EXCP_FIQ] = "FIQ",
+ [EXCP_BKPT] = "Breakpoint",
+ [EXCP_EXCEPTION_EXIT] = "QEMU v7M exception exit",
+ [EXCP_KERNEL_TRAP] = "QEMU intercept of kernel commpage",
+ [EXCP_HVC] = "Hypervisor Call",
+ [EXCP_HYP_TRAP] = "Hypervisor Trap",
+ [EXCP_SMC] = "Secure Monitor Call",
+ [EXCP_VIRQ] = "Virtual IRQ",
+ [EXCP_VFIQ] = "Virtual FIQ",
+ [EXCP_SEMIHOST] = "Semihosting call",
+};
+
+/* Scale factor for generic timers, ie number of ns per tick.
+ * This gives a 62.5MHz timer.
+ */
+#define GTIMER_SCALE 16
+
+/*
+ * For AArch64, map a given EL to an index in the banked_spsr array.
+ * Note that this mapping and the AArch32 mapping defined in bank_number()
+ * must agree such that the AArch64<->AArch32 SPSRs have the architecturally
+ * mandated mapping between each other.
+ */
+static inline unsigned int aarch64_banked_spsr_index(unsigned int el)
+{
+ static const unsigned int map[4] = {
+ [1] = BANK_SVC, /* EL1. */
+ [2] = BANK_HYP, /* EL2. */
+ [3] = BANK_MON, /* EL3. */
+ };
+ assert(el >= 1 && el <= 3);
+ return map[el];
+}
+
+/* Map CPU modes onto saved register banks. */
+static inline int bank_number(int mode)
+{
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_SYS:
+ return BANK_USRSYS;
+ case ARM_CPU_MODE_SVC:
+ return BANK_SVC;
+ case ARM_CPU_MODE_ABT:
+ return BANK_ABT;
+ case ARM_CPU_MODE_UND:
+ return BANK_UND;
+ case ARM_CPU_MODE_IRQ:
+ return BANK_IRQ;
+ case ARM_CPU_MODE_FIQ:
+ return BANK_FIQ;
+ case ARM_CPU_MODE_HYP:
+ return BANK_HYP;
+ case ARM_CPU_MODE_MON:
+ return BANK_MON;
+ }
+ g_assert_not_reached();
+}
+
+void switch_mode(CPUARMState *, int);
+void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
+void arm_translate_init(void);
+
+enum arm_fprounding {
+ FPROUNDING_TIEEVEN,
+ FPROUNDING_POSINF,
+ FPROUNDING_NEGINF,
+ FPROUNDING_ZERO,
+ FPROUNDING_TIEAWAY,
+ FPROUNDING_ODD
+};
+
+int arm_rmode_to_sf(int rmode);
+
+static inline void aarch64_save_sp(CPUARMState *env, int el)
+{
+ if (env->pstate & PSTATE_SP) {
+ env->sp_el[el] = env->xregs[31];
+ } else {
+ env->sp_el[0] = env->xregs[31];
+ }
+}
+
+static inline void aarch64_restore_sp(CPUARMState *env, int el)
+{
+ if (env->pstate & PSTATE_SP) {
+ env->xregs[31] = env->sp_el[el];
+ } else {
+ env->xregs[31] = env->sp_el[0];
+ }
+}
+
+static inline void update_spsel(CPUARMState *env, uint32_t imm)
+{
+ unsigned int cur_el = arm_current_el(env);
+ /* Update PSTATE SPSel bit; this requires us to update the
+ * working stack pointer in xregs[31].
+ */
+ if (!((imm ^ env->pstate) & PSTATE_SP)) {
+ return;
+ }
+ aarch64_save_sp(env, cur_el);
+ env->pstate = deposit32(env->pstate, 0, 1, imm);
+
+ /* We rely on illegal updates to SPsel from EL0 to get trapped
+ * at translation time.
+ */
+ assert(cur_el >= 1 && cur_el <= 3);
+ aarch64_restore_sp(env, cur_el);
+}
+
+/*
+ * arm_pamax
+ * @cpu: ARMCPU
+ *
+ * Returns the implementation defined bit-width of physical addresses.
+ * The ARMv8 reference manuals refer to this as PAMax().
+ */
+static inline unsigned int arm_pamax(ARMCPU *cpu)
+{
+ static const unsigned int pamax_map[] = {
+ [0] = 32,
+ [1] = 36,
+ [2] = 40,
+ [3] = 42,
+ [4] = 44,
+ [5] = 48,
+ };
+ unsigned int parange = extract32(cpu->id_aa64mmfr0, 0, 4);
+
+ /* id_aa64mmfr0 is a read-only register so values outside of the
+ * supported mappings can be considered an implementation error. */
+ assert(parange < ARRAY_SIZE(pamax_map));
+ return pamax_map[parange];
+}
+
+/* Return true if extended addresses are enabled.
+ * This is always the case if our translation regime is 64 bit,
+ * but depends on TTBCR.EAE for 32 bit.
+ */
+static inline bool extended_addresses_enabled(CPUARMState *env)
+{
+ TCR *tcr = &env->cp15.tcr_el[arm_is_secure(env) ? 3 : 1];
+ return arm_el_is_aa64(env, 1) ||
+ (arm_feature(env, ARM_FEATURE_LPAE) && (tcr->raw_tcr & TTBCR_EAE));
+}
+
+/* Valid Syndrome Register EC field values */
+enum arm_exception_class {
+ EC_UNCATEGORIZED = 0x00,
+ EC_WFX_TRAP = 0x01,
+ EC_CP15RTTRAP = 0x03,
+ EC_CP15RRTTRAP = 0x04,
+ EC_CP14RTTRAP = 0x05,
+ EC_CP14DTTRAP = 0x06,
+ EC_ADVSIMDFPACCESSTRAP = 0x07,
+ EC_FPIDTRAP = 0x08,
+ EC_CP14RRTTRAP = 0x0c,
+ EC_ILLEGALSTATE = 0x0e,
+ EC_AA32_SVC = 0x11,
+ EC_AA32_HVC = 0x12,
+ EC_AA32_SMC = 0x13,
+ EC_AA64_SVC = 0x15,
+ EC_AA64_HVC = 0x16,
+ EC_AA64_SMC = 0x17,
+ EC_SYSTEMREGISTERTRAP = 0x18,
+ EC_INSNABORT = 0x20,
+ EC_INSNABORT_SAME_EL = 0x21,
+ EC_PCALIGNMENT = 0x22,
+ EC_DATAABORT = 0x24,
+ EC_DATAABORT_SAME_EL = 0x25,
+ EC_SPALIGNMENT = 0x26,
+ EC_AA32_FPTRAP = 0x28,
+ EC_AA64_FPTRAP = 0x2c,
+ EC_SERROR = 0x2f,
+ EC_BREAKPOINT = 0x30,
+ EC_BREAKPOINT_SAME_EL = 0x31,
+ EC_SOFTWARESTEP = 0x32,
+ EC_SOFTWARESTEP_SAME_EL = 0x33,
+ EC_WATCHPOINT = 0x34,
+ EC_WATCHPOINT_SAME_EL = 0x35,
+ EC_AA32_BKPT = 0x38,
+ EC_VECTORCATCH = 0x3a,
+ EC_AA64_BKPT = 0x3c,
+};
+
+#define ARM_EL_EC_SHIFT 26
+#define ARM_EL_IL_SHIFT 25
+#define ARM_EL_ISV_SHIFT 24
+#define ARM_EL_IL (1 << ARM_EL_IL_SHIFT)
+#define ARM_EL_ISV (1 << ARM_EL_ISV_SHIFT)
+
+/* Utility functions for constructing various kinds of syndrome value.
+ * Note that in general we follow the AArch64 syndrome values; in a
+ * few cases the value in HSR for exceptions taken to AArch32 Hyp
+ * mode differs slightly, so if we ever implemented Hyp mode then the
+ * syndrome value would need some massaging on exception entry.
+ * (One example of this is that AArch64 defaults to IL bit set for
+ * exceptions which don't specifically indicate information about the
+ * trapping instruction, whereas AArch32 defaults to IL bit clear.)
+ */
+static inline uint32_t syn_uncategorized(void)
+{
+ return (EC_UNCATEGORIZED << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
+static inline uint32_t syn_aa64_svc(uint32_t imm16)
+{
+ return (EC_AA64_SVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_hvc(uint32_t imm16)
+{
+ return (EC_AA64_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa64_smc(uint32_t imm16)
+{
+ return (EC_AA64_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_svc(uint32_t imm16, bool is_16bit)
+{
+ return (EC_AA32_SVC << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
+ | (is_16bit ? 0 : ARM_EL_IL);
+}
+
+static inline uint32_t syn_aa32_hvc(uint32_t imm16)
+{
+ return (EC_AA32_HVC << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_smc(void)
+{
+ return (EC_AA32_SMC << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
+static inline uint32_t syn_aa64_bkpt(uint32_t imm16)
+{
+ return (EC_AA64_BKPT << ARM_EL_EC_SHIFT) | ARM_EL_IL | (imm16 & 0xffff);
+}
+
+static inline uint32_t syn_aa32_bkpt(uint32_t imm16, bool is_16bit)
+{
+ return (EC_AA32_BKPT << ARM_EL_EC_SHIFT) | (imm16 & 0xffff)
+ | (is_16bit ? 0 : ARM_EL_IL);
+}
+
+static inline uint32_t syn_aa64_sysregtrap(int op0, int op1, int op2,
+ int crn, int crm, int rt,
+ int isread)
+{
+ return (EC_SYSTEMREGISTERTRAP << ARM_EL_EC_SHIFT) | ARM_EL_IL
+ | (op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (rt << 5)
+ | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp14_rt_trap(int cv, int cond, int opc1, int opc2,
+ int crn, int crm, int rt, int isread,
+ bool is_16bit)
+{
+ return (EC_CP14RTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp15_rt_trap(int cv, int cond, int opc1, int opc2,
+ int crn, int crm, int rt, int isread,
+ bool is_16bit)
+{
+ return (EC_CP15RTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc2 << 17) | (opc1 << 14)
+ | (crn << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp14_rrt_trap(int cv, int cond, int opc1, int crm,
+ int rt, int rt2, int isread,
+ bool is_16bit)
+{
+ return (EC_CP14RRTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_cp15_rrt_trap(int cv, int cond, int opc1, int crm,
+ int rt, int rt2, int isread,
+ bool is_16bit)
+{
+ return (EC_CP15RRTTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20) | (opc1 << 16)
+ | (rt2 << 10) | (rt << 5) | (crm << 1) | isread;
+}
+
+static inline uint32_t syn_fp_access_trap(int cv, int cond, bool is_16bit)
+{
+ return (EC_ADVSIMDFPACCESSTRAP << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | (cv << 24) | (cond << 20);
+}
+
+static inline uint32_t syn_insn_abort(int same_el, int ea, int s1ptw, int fsc)
+{
+ return (EC_INSNABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | (ea << 9) | (s1ptw << 7) | fsc;
+}
+
+static inline uint32_t syn_data_abort_no_iss(int same_el,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+}
+
+static inline uint32_t syn_data_abort_with_iss(int same_el,
+ int sas, int sse, int srt,
+ int sf, int ar,
+ int ea, int cm, int s1ptw,
+ int wnr, int fsc,
+ bool is_16bit)
+{
+ return (EC_DATAABORT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | (is_16bit ? 0 : ARM_EL_IL)
+ | ARM_EL_ISV | (sas << 22) | (sse << 21) | (srt << 16)
+ | (sf << 15) | (ar << 14)
+ | (ea << 9) | (cm << 8) | (s1ptw << 7) | (wnr << 6) | fsc;
+}
+
+static inline uint32_t syn_swstep(int same_el, int isv, int ex)
+{
+ return (EC_SOFTWARESTEP << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | (isv << 24) | (ex << 6) | 0x22;
+}
+
+static inline uint32_t syn_watchpoint(int same_el, int cm, int wnr)
+{
+ return (EC_WATCHPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | (cm << 8) | (wnr << 6) | 0x22;
+}
+
+static inline uint32_t syn_breakpoint(int same_el)
+{
+ return (EC_BREAKPOINT << ARM_EL_EC_SHIFT) | (same_el << ARM_EL_EC_SHIFT)
+ | ARM_EL_IL | 0x22;
+}
+
+static inline uint32_t syn_wfx(int cv, int cond, int ti)
+{
+ return (EC_WFX_TRAP << ARM_EL_EC_SHIFT) |
+ (cv << 24) | (cond << 20) | ti;
+}
+
+/* Update a QEMU watchpoint based on the information the guest has set in the
+ * DBGWCR<n>_EL1 and DBGWVR<n>_EL1 registers.
+ */
+void hw_watchpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU watchpoints for every guest watchpoint. This does a
+ * complete delete-and-reinstate of the QEMU watchpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_watchpoint_update_all(ARMCPU *cpu);
+/* Update a QEMU breakpoint based on the information the guest has set in the
+ * DBGBCR<n>_EL1 and DBGBVR<n>_EL1 registers.
+ */
+void hw_breakpoint_update(ARMCPU *cpu, int n);
+/* Update the QEMU breakpoints for every guest breakpoint. This does a
+ * complete delete-and-reinstate of the QEMU breakpoint list and so is
+ * suitable for use after migration or on reset.
+ */
+void hw_breakpoint_update_all(ARMCPU *cpu);
+
+/* Callback function for checking if a watchpoint should trigger. */
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp);
+
+/* Callback function for when a watchpoint or breakpoint triggers. */
+void arm_debug_excp_handler(CPUState *cs);
+
+#ifdef CONFIG_USER_ONLY
+static inline bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
+{
+ return false;
+}
+#else
+/* Return true if the r0/x0 value indicates that this SMC/HVC is a PSCI call. */
+bool arm_is_psci_call(ARMCPU *cpu, int excp_type);
+/* Actually handle a PSCI call */
+void arm_handle_psci_call(ARMCPU *cpu);
+#endif
+
+/**
+ * ARMMMUFaultInfo: Information describing an ARM MMU Fault
+ * @s2addr: Address that caused a fault at stage 2
+ * @stage2: True if we faulted at stage 2
+ * @s1ptw: True if we faulted at stage 2 while doing a stage 1 page-table walk
+ */
+typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
+struct ARMMMUFaultInfo {
+ target_ulong s2addr;
+ bool stage2;
+ bool s1ptw;
+};
+
+/* Do a page table walk and add page to TLB if possible */
+bool arm_tlb_fill(CPUState *cpu, vaddr address, int rw, int mmu_idx,
+ uint32_t *fsr, ARMMMUFaultInfo *fi);
+
+/* Return true if the stage 1 translation regime is using LPAE format page
+ * tables */
+bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx);
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+/* Call the EL change hook if one has been registered */
+static inline void arm_call_el_change_hook(ARMCPU *cpu)
+{
+ if (cpu->el_change_hook) {
+ cpu->el_change_hook(cpu, cpu->el_change_hook_opaque);
+ }
+}
+
+#endif
diff --git a/target/arm/iwmmxt_helper.c b/target/arm/iwmmxt_helper.c
new file mode 100644
index 0000000000..7d87e1a0a8
--- /dev/null
+++ b/target/arm/iwmmxt_helper.c
@@ -0,0 +1,671 @@
+/*
+ * iwMMXt micro operations for XScale.
+ *
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ * Written by Andrzej Zaborowski <andrew@openedhand.com>
+ * Copyright (c) 2008 CodeSourcery
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+/* iwMMXt macros extracted from GNU gdb. */
+
+/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */
+#define SIMD8_SET( v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n)))
+#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n)))
+#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n)))
+#define SIMD64_SET(v, n) ((v != 0) << (32 + (n)))
+/* Flags to pass as "n" above. */
+#define SIMD_NBIT -1
+#define SIMD_ZBIT -2
+#define SIMD_CBIT -3
+#define SIMD_VBIT -4
+/* Various status bit macros. */
+#define NBIT8(x) ((x) & 0x80)
+#define NBIT16(x) ((x) & 0x8000)
+#define NBIT32(x) ((x) & 0x80000000)
+#define NBIT64(x) ((x) & 0x8000000000000000ULL)
+#define ZBIT8(x) (((x) & 0xff) == 0)
+#define ZBIT16(x) (((x) & 0xffff) == 0)
+#define ZBIT32(x) (((x) & 0xffffffff) == 0)
+#define ZBIT64(x) (x == 0)
+/* Sign extension macros. */
+#define EXTEND8H(a) ((uint16_t) (int8_t) (a))
+#define EXTEND8(a) ((uint32_t) (int8_t) (a))
+#define EXTEND16(a) ((uint32_t) (int16_t) (a))
+#define EXTEND16S(a) ((int32_t) (int16_t) (a))
+#define EXTEND32(a) ((uint64_t) (int32_t) (a))
+
+uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b)
+{
+ a = ((
+ EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) +
+ EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff)
+ ) & 0xffffffff) | ((uint64_t) (
+ EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) +
+ EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff)
+ ) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b)
+{
+ a = ((
+ ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) +
+ ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff)
+ ) & 0xffffffff) | ((
+ ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) +
+ ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff)
+ ) << 32);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b)
+{
+#define abs(x) (((x) >= 0) ? x : -x)
+#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff))
+ return
+ SADB(0) + SADB(8) + SADB(16) + SADB(24) +
+ SADB(32) + SADB(40) + SADB(48) + SADB(56);
+#undef SADB
+}
+
+uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b)
+{
+#define SADW(SHR) \
+ abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff))
+ return SADW(0) + SADW(16) + SADW(32) + SADW(48);
+#undef SADW
+}
+
+uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b)
+{
+#define MULS(SHR) ((uint64_t) ((( \
+ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
+ ) >> 0) & 0xffff) << SHR)
+ return MULS(0) | MULS(16) | MULS(32) | MULS(48);
+#undef MULS
+}
+
+uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b)
+{
+#define MULS(SHR) ((uint64_t) ((( \
+ EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \
+ ) >> 16) & 0xffff) << SHR)
+ return MULS(0) | MULS(16) | MULS(32) | MULS(48);
+#undef MULS
+}
+
+uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b)
+{
+#define MULU(SHR) ((uint64_t) ((( \
+ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
+ ) >> 0) & 0xffff) << SHR)
+ return MULU(0) | MULU(16) | MULU(32) | MULU(48);
+#undef MULU
+}
+
+uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b)
+{
+#define MULU(SHR) ((uint64_t) ((( \
+ ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \
+ ) >> 16) & 0xffff) << SHR)
+ return MULU(0) | MULU(16) | MULU(32) | MULU(48);
+#undef MULU
+}
+
+uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b)
+{
+#define MACS(SHR) ( \
+ EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff))
+ return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48));
+#undef MACS
+}
+
+uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b)
+{
+#define MACU(SHR) ( \
+ (uint32_t) ((a >> SHR) & 0xffff) * \
+ (uint32_t) ((b >> SHR) & 0xffff))
+ return MACU(0) + MACU(16) + MACU(32) + MACU(48);
+#undef MACU
+}
+
+#define NZBIT8(x, i) \
+ SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \
+ SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i)
+#define NZBIT16(x, i) \
+ SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \
+ SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i)
+#define NZBIT32(x, i) \
+ SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \
+ SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i)
+#define NZBIT64(x) \
+ SIMD64_SET(NBIT64(x), SIMD_NBIT) | \
+ SIMD64_SET(ZBIT64(x), SIMD_ZBIT)
+#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \
+ (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \
+ (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \
+ (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xffff) << 0) | \
+ (((b >> SH0) & 0xffff) << 16) | \
+ (((a >> SH2) & 0xffff) << 32) | \
+ (((b >> SH2) & 0xffff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \
+ NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ (((a >> SH0) & 0xffffffff) << 0) | \
+ (((b >> SH0) & 0xffffffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ (((x >> SH0) & 0xff) << 0) | \
+ (((x >> SH1) & 0xff) << 16) | \
+ (((x >> SH2) & 0xff) << 32) | \
+ (((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ (((x >> SH0) & 0xffff) << 0) | \
+ (((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = (((x >> SH0) & 0xffffffff) << 0); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \
+ ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \
+ ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \
+ ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = \
+ ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \
+ ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \
+ return x; \
+} \
+uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \
+ uint64_t x) \
+{ \
+ x = EXTEND32((x >> SH0) & 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \
+ return x; \
+}
+IWMMXT_OP_UNPACK(l, 0, 8, 16, 24)
+IWMMXT_OP_UNPACK(h, 32, 40, 48, 56)
+
+#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = \
+ CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \
+ CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \
+ CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \
+ CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \
+ CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \
+ return a; \
+} \
+uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \
+ uint64_t a, uint64_t b) \
+{ \
+ a = CMP(0, Tl, O, 0xffffffff) | \
+ CMP(32, Tl, O, 0xffffffff); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \
+ return a; \
+}
+#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
+ (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR)
+IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==)
+IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >)
+IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >)
+#undef CMP
+#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \
+ (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR))
+IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <)
+IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <)
+IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >)
+IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >)
+#undef CMP
+#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
+ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
+IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -)
+IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +)
+#undef CMP
+/* TODO Signed- and Unsigned-Saturation */
+#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \
+ OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR)
+IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -)
+IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +)
+IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -)
+IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +)
+#undef CMP
+#undef IWMMXT_OP_CMP
+
+#define AVGB(SHR) ((( \
+ ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR)
+#define IWMMXT_OP_AVGB(r) \
+uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \
+{ \
+ const int round = r; \
+ a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \
+ AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \
+ SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \
+ SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \
+ SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \
+ SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \
+ SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \
+ SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \
+ SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \
+ return a; \
+}
+IWMMXT_OP_AVGB(0)
+IWMMXT_OP_AVGB(1)
+#undef IWMMXT_OP_AVGB
+#undef AVGB
+
+#define AVGW(SHR) ((( \
+ ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR)
+#define IWMMXT_OP_AVGW(r) \
+uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \
+{ \
+ const int round = r; \
+ a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \
+ SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \
+ SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \
+ SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \
+ SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \
+ return a; \
+}
+IWMMXT_OP_AVGW(0)
+IWMMXT_OP_AVGW(1)
+#undef IWMMXT_OP_AVGW
+#undef AVGW
+
+uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n)
+{
+ a >>= n << 3;
+ a |= b << (64 - (n << 3));
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n)
+{
+ x &= ~((uint64_t) b << n);
+ x |= (uint64_t) (a & b) << n;
+ return x;
+}
+
+uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x)
+{
+ return SIMD64_SET((x == 0), SIMD_ZBIT) |
+ SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT);
+}
+
+uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg)
+{
+ arg &= 0xff;
+ return
+ ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) |
+ ((uint64_t) arg << 16) | ((uint64_t) arg << 24) |
+ ((uint64_t) arg << 32) | ((uint64_t) arg << 40) |
+ ((uint64_t) arg << 48) | ((uint64_t) arg << 56);
+}
+
+uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg)
+{
+ arg &= 0xffff;
+ return
+ ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) |
+ ((uint64_t) arg << 32) | ((uint64_t) arg << 48);
+}
+
+uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg)
+{
+ return arg | ((uint64_t) arg << 32);
+}
+
+uint64_t HELPER(iwmmxt_addcb)(uint64_t x)
+{
+ return
+ ((x >> 0) & 0xff) + ((x >> 8) & 0xff) +
+ ((x >> 16) & 0xff) + ((x >> 24) & 0xff) +
+ ((x >> 32) & 0xff) + ((x >> 40) & 0xff) +
+ ((x >> 48) & 0xff) + ((x >> 56) & 0xff);
+}
+
+uint64_t HELPER(iwmmxt_addcw)(uint64_t x)
+{
+ return
+ ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) +
+ ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff);
+}
+
+uint64_t HELPER(iwmmxt_addcl)(uint64_t x)
+{
+ return (x & 0xffffffff) + (x >> 32);
+}
+
+uint32_t HELPER(iwmmxt_msbb)(uint64_t x)
+{
+ return
+ ((x >> 7) & 0x01) | ((x >> 14) & 0x02) |
+ ((x >> 21) & 0x04) | ((x >> 28) & 0x08) |
+ ((x >> 35) & 0x10) | ((x >> 42) & 0x20) |
+ ((x >> 49) & 0x40) | ((x >> 56) & 0x80);
+}
+
+uint32_t HELPER(iwmmxt_msbw)(uint64_t x)
+{
+ return
+ ((x >> 15) & 0x01) | ((x >> 30) & 0x02) |
+ ((x >> 45) & 0x04) | ((x >> 52) & 0x08);
+}
+
+uint32_t HELPER(iwmmxt_msbl)(uint64_t x)
+{
+ return ((x >> 31) & 0x01) | ((x >> 62) & 0x02);
+}
+
+/* FIXME: Split wCASF setting into a separate op to avoid env use. */
+uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) |
+ (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) |
+ (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) |
+ (((x & (0xffffll << 48)) >> n) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((x & (0xffffffffll << 0)) >> n) |
+ ((x >> n) & (0xffffffffll << 32));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x >>= n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) |
+ (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) |
+ (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) |
+ (((x & (0xffffll << 48)) << n) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((x << n) & (0xffffffffll << 0)) |
+ ((x & (0xffffffffll << 32)) << n);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x <<= n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) |
+ ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) |
+ ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) |
+ ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) |
+ (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (int64_t) x >> n;
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((((x & (0xffffll << 0)) >> n) |
+ ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) |
+ ((((x & (0xffffll << 16)) >> n) |
+ ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) |
+ ((((x & (0xffffll << 32)) >> n) |
+ ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) |
+ ((((x & (0xffffll << 48)) >> n) |
+ ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ((x & (0xffffffffll << 0)) >> n) |
+ ((x >> n) & (0xffffffffll << 32)) |
+ ((x << (32 - n)) & (0xffffffffll << 0)) |
+ ((x & (0xffffffffll << 32)) << (32 - n));
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = ror64(x, n);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x);
+ return x;
+}
+
+uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n)
+{
+ x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) |
+ (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) |
+ (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) |
+ (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) |
+ NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3);
+ return x;
+}
+
+/* TODO: Unsigned-Saturation */
+uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
+ (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
+ (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
+ (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
+ (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
+ return a;
+}
+
+/* TODO: Signed-Saturation */
+uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) |
+ (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) |
+ (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) |
+ (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) |
+ NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) |
+ NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) |
+ NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) |
+ (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) |
+ NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ a = (a & 0xffffffff) | ((b & 0xffffffff) << 32);
+ env->iwmmxt.cregs[ARM_IWMMXT_wCASF] =
+ NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1);
+ return a;
+}
+
+uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b)
+{
+ return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b));
+}
+
+uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b)
+{
+ c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) *
+ EXTEND16S((b >> 0) & 0xffff));
+ c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) *
+ EXTEND16S((b >> 16) & 0xffff));
+ return c;
+}
+
+uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b)
+{
+ return c + (EXTEND32(EXTEND16S(a & 0xffff) *
+ EXTEND16S(b & 0xffff)));
+}
diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h
new file mode 100644
index 0000000000..a2c9518592
--- /dev/null
+++ b/target/arm/kvm-consts.h
@@ -0,0 +1,185 @@
+/*
+ * KVM ARM ABI constant definitions
+ *
+ * Copyright (c) 2013 Linaro Limited
+ *
+ * Provide versions of KVM constant defines that can be used even
+ * when CONFIG_KVM is not set and we don't have access to the
+ * KVM headers. If CONFIG_KVM is set, we do a compile-time check
+ * that we haven't got out of sync somehow.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef ARM_KVM_CONSTS_H
+#define ARM_KVM_CONSTS_H
+
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#include <linux/psci.h>
+
+#define MISMATCH_CHECK(X, Y) QEMU_BUILD_BUG_ON(X != Y)
+
+#else
+#define MISMATCH_CHECK(X, Y)
+#endif
+
+#define CP_REG_SIZE_SHIFT 52
+#define CP_REG_SIZE_MASK 0x00f0000000000000ULL
+#define CP_REG_SIZE_U32 0x0020000000000000ULL
+#define CP_REG_SIZE_U64 0x0030000000000000ULL
+#define CP_REG_ARM 0x4000000000000000ULL
+#define CP_REG_ARCH_MASK 0xff00000000000000ULL
+
+MISMATCH_CHECK(CP_REG_SIZE_SHIFT, KVM_REG_SIZE_SHIFT)
+MISMATCH_CHECK(CP_REG_SIZE_MASK, KVM_REG_SIZE_MASK)
+MISMATCH_CHECK(CP_REG_SIZE_U32, KVM_REG_SIZE_U32)
+MISMATCH_CHECK(CP_REG_SIZE_U64, KVM_REG_SIZE_U64)
+MISMATCH_CHECK(CP_REG_ARM, KVM_REG_ARM)
+MISMATCH_CHECK(CP_REG_ARCH_MASK, KVM_REG_ARCH_MASK)
+
+#define QEMU_PSCI_0_1_FN_BASE 0x95c1ba5e
+#define QEMU_PSCI_0_1_FN(n) (QEMU_PSCI_0_1_FN_BASE + (n))
+#define QEMU_PSCI_0_1_FN_CPU_SUSPEND QEMU_PSCI_0_1_FN(0)
+#define QEMU_PSCI_0_1_FN_CPU_OFF QEMU_PSCI_0_1_FN(1)
+#define QEMU_PSCI_0_1_FN_CPU_ON QEMU_PSCI_0_1_FN(2)
+#define QEMU_PSCI_0_1_FN_MIGRATE QEMU_PSCI_0_1_FN(3)
+
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_SUSPEND, KVM_PSCI_FN_CPU_SUSPEND)
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_OFF, KVM_PSCI_FN_CPU_OFF)
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_CPU_ON, KVM_PSCI_FN_CPU_ON)
+MISMATCH_CHECK(QEMU_PSCI_0_1_FN_MIGRATE, KVM_PSCI_FN_MIGRATE)
+
+#define QEMU_PSCI_0_2_FN_BASE 0x84000000
+#define QEMU_PSCI_0_2_FN(n) (QEMU_PSCI_0_2_FN_BASE + (n))
+
+#define QEMU_PSCI_0_2_64BIT 0x40000000
+#define QEMU_PSCI_0_2_FN64_BASE \
+ (QEMU_PSCI_0_2_FN_BASE + QEMU_PSCI_0_2_64BIT)
+#define QEMU_PSCI_0_2_FN64(n) (QEMU_PSCI_0_2_FN64_BASE + (n))
+
+#define QEMU_PSCI_0_2_FN_PSCI_VERSION QEMU_PSCI_0_2_FN(0)
+#define QEMU_PSCI_0_2_FN_CPU_SUSPEND QEMU_PSCI_0_2_FN(1)
+#define QEMU_PSCI_0_2_FN_CPU_OFF QEMU_PSCI_0_2_FN(2)
+#define QEMU_PSCI_0_2_FN_CPU_ON QEMU_PSCI_0_2_FN(3)
+#define QEMU_PSCI_0_2_FN_AFFINITY_INFO QEMU_PSCI_0_2_FN(4)
+#define QEMU_PSCI_0_2_FN_MIGRATE QEMU_PSCI_0_2_FN(5)
+#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE QEMU_PSCI_0_2_FN(6)
+#define QEMU_PSCI_0_2_FN_MIGRATE_INFO_UP_CPU QEMU_PSCI_0_2_FN(7)
+#define QEMU_PSCI_0_2_FN_SYSTEM_OFF QEMU_PSCI_0_2_FN(8)
+#define QEMU_PSCI_0_2_FN_SYSTEM_RESET QEMU_PSCI_0_2_FN(9)
+
+#define QEMU_PSCI_0_2_FN64_CPU_SUSPEND QEMU_PSCI_0_2_FN64(1)
+#define QEMU_PSCI_0_2_FN64_CPU_OFF QEMU_PSCI_0_2_FN64(2)
+#define QEMU_PSCI_0_2_FN64_CPU_ON QEMU_PSCI_0_2_FN64(3)
+#define QEMU_PSCI_0_2_FN64_AFFINITY_INFO QEMU_PSCI_0_2_FN64(4)
+#define QEMU_PSCI_0_2_FN64_MIGRATE QEMU_PSCI_0_2_FN64(5)
+
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_SUSPEND, PSCI_0_2_FN_CPU_SUSPEND)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_OFF, PSCI_0_2_FN_CPU_OFF)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_CPU_ON, PSCI_0_2_FN_CPU_ON)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN_MIGRATE, PSCI_0_2_FN_MIGRATE)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_SUSPEND, PSCI_0_2_FN64_CPU_SUSPEND)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_CPU_ON, PSCI_0_2_FN64_CPU_ON)
+MISMATCH_CHECK(QEMU_PSCI_0_2_FN64_MIGRATE, PSCI_0_2_FN64_MIGRATE)
+
+/* PSCI v0.2 return values used by TCG emulation of PSCI */
+
+/* No Trusted OS migration to worry about when offlining CPUs */
+#define QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED 2
+
+/* We implement version 0.2 only */
+#define QEMU_PSCI_0_2_RET_VERSION_0_2 2
+
+MISMATCH_CHECK(QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED, PSCI_0_2_TOS_MP)
+MISMATCH_CHECK(QEMU_PSCI_0_2_RET_VERSION_0_2,
+ (PSCI_VERSION_MAJOR(0) | PSCI_VERSION_MINOR(2)))
+
+/* PSCI return values (inclusive of all PSCI versions) */
+#define QEMU_PSCI_RET_SUCCESS 0
+#define QEMU_PSCI_RET_NOT_SUPPORTED -1
+#define QEMU_PSCI_RET_INVALID_PARAMS -2
+#define QEMU_PSCI_RET_DENIED -3
+#define QEMU_PSCI_RET_ALREADY_ON -4
+#define QEMU_PSCI_RET_ON_PENDING -5
+#define QEMU_PSCI_RET_INTERNAL_FAILURE -6
+#define QEMU_PSCI_RET_NOT_PRESENT -7
+#define QEMU_PSCI_RET_DISABLED -8
+
+MISMATCH_CHECK(QEMU_PSCI_RET_SUCCESS, PSCI_RET_SUCCESS)
+MISMATCH_CHECK(QEMU_PSCI_RET_NOT_SUPPORTED, PSCI_RET_NOT_SUPPORTED)
+MISMATCH_CHECK(QEMU_PSCI_RET_INVALID_PARAMS, PSCI_RET_INVALID_PARAMS)
+MISMATCH_CHECK(QEMU_PSCI_RET_DENIED, PSCI_RET_DENIED)
+MISMATCH_CHECK(QEMU_PSCI_RET_ALREADY_ON, PSCI_RET_ALREADY_ON)
+MISMATCH_CHECK(QEMU_PSCI_RET_ON_PENDING, PSCI_RET_ON_PENDING)
+MISMATCH_CHECK(QEMU_PSCI_RET_INTERNAL_FAILURE, PSCI_RET_INTERNAL_FAILURE)
+MISMATCH_CHECK(QEMU_PSCI_RET_NOT_PRESENT, PSCI_RET_NOT_PRESENT)
+MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED)
+
+/* Note that KVM uses overlapping values for AArch32 and AArch64
+ * target CPU numbers. AArch32 targets:
+ */
+#define QEMU_KVM_ARM_TARGET_CORTEX_A15 0
+#define QEMU_KVM_ARM_TARGET_CORTEX_A7 1
+
+/* AArch64 targets: */
+#define QEMU_KVM_ARM_TARGET_AEM_V8 0
+#define QEMU_KVM_ARM_TARGET_FOUNDATION_V8 1
+#define QEMU_KVM_ARM_TARGET_CORTEX_A57 2
+#define QEMU_KVM_ARM_TARGET_XGENE_POTENZA 3
+#define QEMU_KVM_ARM_TARGET_CORTEX_A53 4
+
+/* There's no kernel define for this: sentinel value which
+ * matches no KVM target value for either 64 or 32 bit
+ */
+#define QEMU_KVM_ARM_TARGET_NONE UINT_MAX
+
+#ifdef TARGET_AARCH64
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_AEM_V8, KVM_ARM_TARGET_AEM_V8)
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8)
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57)
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_XGENE_POTENZA, KVM_ARM_TARGET_XGENE_POTENZA)
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53)
+#else
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A15, KVM_ARM_TARGET_CORTEX_A15)
+MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A7, KVM_ARM_TARGET_CORTEX_A7)
+#endif
+
+#define CP_REG_ARM64 0x6000000000000000ULL
+#define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000
+#define CP_REG_ARM_COPROC_SHIFT 16
+#define CP_REG_ARM64_SYSREG (0x0013 << CP_REG_ARM_COPROC_SHIFT)
+#define CP_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
+#define CP_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define CP_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
+#define CP_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define CP_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
+#define CP_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define CP_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
+#define CP_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
+#define CP_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+/* No kernel define but it's useful to QEMU */
+#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT)
+
+#ifdef TARGET_AARCH64
+MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64)
+MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK)
+MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG, KVM_REG_ARM64_SYSREG)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_MASK, KVM_REG_ARM64_SYSREG_OP0_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP0_SHIFT, KVM_REG_ARM64_SYSREG_OP0_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_MASK, KVM_REG_ARM64_SYSREG_OP1_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP1_SHIFT, KVM_REG_ARM64_SYSREG_OP1_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_MASK, KVM_REG_ARM64_SYSREG_CRN_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRN_SHIFT, KVM_REG_ARM64_SYSREG_CRN_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_MASK, KVM_REG_ARM64_SYSREG_CRM_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_CRM_SHIFT, KVM_REG_ARM64_SYSREG_CRM_SHIFT)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_MASK, KVM_REG_ARM64_SYSREG_OP2_MASK)
+MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT)
+#endif
+
+#undef MISMATCH_CHECK
+
+#endif
diff --git a/target/arm/kvm-stub.c b/target/arm/kvm-stub.c
new file mode 100644
index 0000000000..b2c66df532
--- /dev/null
+++ b/target/arm/kvm-stub.c
@@ -0,0 +1,25 @@
+/*
+ * QEMU KVM ARM specific function stubs
+ *
+ * Copyright Linaro Limited 2013
+ *
+ * Author: Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "kvm_arm.h"
+
+bool write_kvmstate_to_list(ARMCPU *cpu)
+{
+ abort();
+}
+
+bool write_list_to_kvmstate(ARMCPU *cpu, int level)
+{
+ abort();
+}
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
new file mode 100644
index 0000000000..c00b94e42a
--- /dev/null
+++ b/target/arm/kvm.c
@@ -0,0 +1,640 @@
+/*
+ * ARM implementation of KVM hooks
+ *
+ * Copyright Christoffer Dall 2009-2010
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "cpu.h"
+#include "internals.h"
+#include "hw/arm/arm.h"
+#include "exec/memattrs.h"
+#include "exec/address-spaces.h"
+#include "hw/boards.h"
+#include "qemu/log.h"
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static bool cap_has_mp_state;
+
+int kvm_arm_vcpu_init(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ struct kvm_vcpu_init init;
+
+ init.target = cpu->kvm_target;
+ memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
+
+ return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
+}
+
+bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
+ int *fdarray,
+ struct kvm_vcpu_init *init)
+{
+ int ret, kvmfd = -1, vmfd = -1, cpufd = -1;
+
+ kvmfd = qemu_open("/dev/kvm", O_RDWR);
+ if (kvmfd < 0) {
+ goto err;
+ }
+ vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
+ if (vmfd < 0) {
+ goto err;
+ }
+ cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
+ if (cpufd < 0) {
+ goto err;
+ }
+
+ if (!init) {
+ /* Caller doesn't want the VCPU to be initialized, so skip it */
+ goto finish;
+ }
+
+ ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, init);
+ if (ret >= 0) {
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret < 0) {
+ goto err;
+ }
+ } else if (cpus_to_try) {
+ /* Old kernel which doesn't know about the
+ * PREFERRED_TARGET ioctl: we know it will only support
+ * creating one kind of guest CPU which is its preferred
+ * CPU type.
+ */
+ while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
+ init->target = *cpus_to_try++;
+ memset(init->features, 0, sizeof(init->features));
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret >= 0) {
+ break;
+ }
+ }
+ if (ret < 0) {
+ goto err;
+ }
+ } else {
+ /* Treat a NULL cpus_to_try argument the same as an empty
+ * list, which means we will fail the call since this must
+ * be an old kernel which doesn't support PREFERRED_TARGET.
+ */
+ goto err;
+ }
+
+finish:
+ fdarray[0] = kvmfd;
+ fdarray[1] = vmfd;
+ fdarray[2] = cpufd;
+
+ return true;
+
+err:
+ if (cpufd >= 0) {
+ close(cpufd);
+ }
+ if (vmfd >= 0) {
+ close(vmfd);
+ }
+ if (kvmfd >= 0) {
+ close(kvmfd);
+ }
+
+ return false;
+}
+
+void kvm_arm_destroy_scratch_host_vcpu(int *fdarray)
+{
+ int i;
+
+ for (i = 2; i >= 0; i--) {
+ close(fdarray[i]);
+ }
+}
+
+static void kvm_arm_host_cpu_class_init(ObjectClass *oc, void *data)
+{
+ ARMHostCPUClass *ahcc = ARM_HOST_CPU_CLASS(oc);
+
+ /* All we really need to set up for the 'host' CPU
+ * is the feature bits -- we rely on the fact that the
+ * various ID register values in ARMCPU are only used for
+ * TCG CPUs.
+ */
+ if (!kvm_arm_get_host_cpu_features(ahcc)) {
+ fprintf(stderr, "Failed to retrieve host CPU features!\n");
+ abort();
+ }
+}
+
+static void kvm_arm_host_cpu_initfn(Object *obj)
+{
+ ARMHostCPUClass *ahcc = ARM_HOST_CPU_GET_CLASS(obj);
+ ARMCPU *cpu = ARM_CPU(obj);
+ CPUARMState *env = &cpu->env;
+
+ cpu->kvm_target = ahcc->target;
+ cpu->dtb_compatible = ahcc->dtb_compatible;
+ env->features = ahcc->features;
+}
+
+static const TypeInfo host_arm_cpu_type_info = {
+ .name = TYPE_ARM_HOST_CPU,
+#ifdef TARGET_AARCH64
+ .parent = TYPE_AARCH64_CPU,
+#else
+ .parent = TYPE_ARM_CPU,
+#endif
+ .instance_init = kvm_arm_host_cpu_initfn,
+ .class_init = kvm_arm_host_cpu_class_init,
+ .class_size = sizeof(ARMHostCPUClass),
+};
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ /* For ARM interrupt delivery is always asynchronous,
+ * whether we are using an in-kernel VGIC or not.
+ */
+ kvm_async_interrupts_allowed = true;
+
+ cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
+
+ type_register_static(&host_arm_cpu_type_info);
+
+ return 0;
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return cpu->cpu_index;
+}
+
+/* We track all the KVM devices which need their memory addresses
+ * passing to the kernel in a list of these structures.
+ * When board init is complete we run through the list and
+ * tell the kernel the base addresses of the memory regions.
+ * We use a MemoryListener to track mapping and unmapping of
+ * the regions during board creation, so the board models don't
+ * need to do anything special for the KVM case.
+ */
+typedef struct KVMDevice {
+ struct kvm_arm_device_addr kda;
+ struct kvm_device_attr kdattr;
+ MemoryRegion *mr;
+ QSLIST_ENTRY(KVMDevice) entries;
+ int dev_fd;
+} KVMDevice;
+
+static QSLIST_HEAD(kvm_devices_head, KVMDevice) kvm_devices_head;
+
+static void kvm_arm_devlistener_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMDevice *kd;
+
+ QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
+ if (section->mr == kd->mr) {
+ kd->kda.addr = section->offset_within_address_space;
+ }
+ }
+}
+
+static void kvm_arm_devlistener_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMDevice *kd;
+
+ QSLIST_FOREACH(kd, &kvm_devices_head, entries) {
+ if (section->mr == kd->mr) {
+ kd->kda.addr = -1;
+ }
+ }
+}
+
+static MemoryListener devlistener = {
+ .region_add = kvm_arm_devlistener_add,
+ .region_del = kvm_arm_devlistener_del,
+};
+
+static void kvm_arm_set_device_addr(KVMDevice *kd)
+{
+ struct kvm_device_attr *attr = &kd->kdattr;
+ int ret;
+
+ /* If the device control API is available and we have a device fd on the
+ * KVMDevice struct, let's use the newer API
+ */
+ if (kd->dev_fd >= 0) {
+ uint64_t addr = kd->kda.addr;
+ attr->addr = (uintptr_t)&addr;
+ ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
+ } else {
+ ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
+ }
+
+ if (ret < 0) {
+ fprintf(stderr, "Failed to set device address: %s\n",
+ strerror(-ret));
+ abort();
+ }
+}
+
+static void kvm_arm_machine_init_done(Notifier *notifier, void *data)
+{
+ KVMDevice *kd, *tkd;
+
+ memory_listener_unregister(&devlistener);
+ QSLIST_FOREACH_SAFE(kd, &kvm_devices_head, entries, tkd) {
+ if (kd->kda.addr != -1) {
+ kvm_arm_set_device_addr(kd);
+ }
+ memory_region_unref(kd->mr);
+ g_free(kd);
+ }
+}
+
+static Notifier notify = {
+ .notify = kvm_arm_machine_init_done,
+};
+
+void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
+ uint64_t attr, int dev_fd)
+{
+ KVMDevice *kd;
+
+ if (!kvm_irqchip_in_kernel()) {
+ return;
+ }
+
+ if (QSLIST_EMPTY(&kvm_devices_head)) {
+ memory_listener_register(&devlistener, &address_space_memory);
+ qemu_add_machine_init_done_notifier(&notify);
+ }
+ kd = g_new0(KVMDevice, 1);
+ kd->mr = mr;
+ kd->kda.id = devid;
+ kd->kda.addr = -1;
+ kd->kdattr.flags = 0;
+ kd->kdattr.group = group;
+ kd->kdattr.attr = attr;
+ kd->dev_fd = dev_fd;
+ QSLIST_INSERT_HEAD(&kvm_devices_head, kd, entries);
+ memory_region_ref(kd->mr);
+}
+
+static int compare_u64(const void *a, const void *b)
+{
+ if (*(uint64_t *)a > *(uint64_t *)b) {
+ return 1;
+ }
+ if (*(uint64_t *)a < *(uint64_t *)b) {
+ return -1;
+ }
+ return 0;
+}
+
+/* Initialize the CPUState's cpreg list according to the kernel's
+ * definition of what CPU registers it knows about (and throw away
+ * the previous TCG-created cpreg list).
+ */
+int kvm_arm_init_cpreg_list(ARMCPU *cpu)
+{
+ struct kvm_reg_list rl;
+ struct kvm_reg_list *rlp;
+ int i, ret, arraylen;
+ CPUState *cs = CPU(cpu);
+
+ rl.n = 0;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, &rl);
+ if (ret != -E2BIG) {
+ return ret;
+ }
+ rlp = g_malloc(sizeof(struct kvm_reg_list) + rl.n * sizeof(uint64_t));
+ rlp->n = rl.n;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REG_LIST, rlp);
+ if (ret) {
+ goto out;
+ }
+ /* Sort the list we get back from the kernel, since cpreg_tuples
+ * must be in strictly ascending order.
+ */
+ qsort(&rlp->reg, rlp->n, sizeof(rlp->reg[0]), compare_u64);
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ if (!kvm_arm_reg_syncs_via_cpreg_list(rlp->reg[i])) {
+ continue;
+ }
+ switch (rlp->reg[i] & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ case KVM_REG_SIZE_U64:
+ break;
+ default:
+ fprintf(stderr, "Can't handle size of register in kernel list\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ arraylen++;
+ }
+
+ cpu->cpreg_indexes = g_renew(uint64_t, cpu->cpreg_indexes, arraylen);
+ cpu->cpreg_values = g_renew(uint64_t, cpu->cpreg_values, arraylen);
+ cpu->cpreg_vmstate_indexes = g_renew(uint64_t, cpu->cpreg_vmstate_indexes,
+ arraylen);
+ cpu->cpreg_vmstate_values = g_renew(uint64_t, cpu->cpreg_vmstate_values,
+ arraylen);
+ cpu->cpreg_array_len = arraylen;
+ cpu->cpreg_vmstate_array_len = arraylen;
+
+ for (i = 0, arraylen = 0; i < rlp->n; i++) {
+ uint64_t regidx = rlp->reg[i];
+ if (!kvm_arm_reg_syncs_via_cpreg_list(regidx)) {
+ continue;
+ }
+ cpu->cpreg_indexes[arraylen] = regidx;
+ arraylen++;
+ }
+ assert(cpu->cpreg_array_len == arraylen);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ /* Shouldn't happen unless kernel is inconsistent about
+ * what registers exist.
+ */
+ fprintf(stderr, "Initial read of kernel register state failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ g_free(rlp);
+ return ret;
+}
+
+bool write_kvmstate_to_list(ARMCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ struct kvm_one_reg r;
+ uint64_t regidx = cpu->cpreg_indexes[i];
+ uint32_t v32;
+ int ret;
+
+ r.id = regidx;
+
+ switch (regidx & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ r.addr = (uintptr_t)&v32;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (!ret) {
+ cpu->cpreg_values[i] = v32;
+ }
+ break;
+ case KVM_REG_SIZE_U64:
+ r.addr = (uintptr_t)(cpu->cpreg_values + i);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ break;
+ default:
+ abort();
+ }
+ if (ret) {
+ ok = false;
+ }
+ }
+ return ok;
+}
+
+bool write_list_to_kvmstate(ARMCPU *cpu, int level)
+{
+ CPUState *cs = CPU(cpu);
+ int i;
+ bool ok = true;
+
+ for (i = 0; i < cpu->cpreg_array_len; i++) {
+ struct kvm_one_reg r;
+ uint64_t regidx = cpu->cpreg_indexes[i];
+ uint32_t v32;
+ int ret;
+
+ if (kvm_arm_cpreg_level(regidx) > level) {
+ continue;
+ }
+
+ r.id = regidx;
+ switch (regidx & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ v32 = cpu->cpreg_values[i];
+ r.addr = (uintptr_t)&v32;
+ break;
+ case KVM_REG_SIZE_U64:
+ r.addr = (uintptr_t)(cpu->cpreg_values + i);
+ break;
+ default:
+ abort();
+ }
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ /* We might fail for "unknown register" and also for
+ * "you tried to set a register which is constant with
+ * a different value from what it actually contains".
+ */
+ ok = false;
+ }
+ }
+ return ok;
+}
+
+void kvm_arm_reset_vcpu(ARMCPU *cpu)
+{
+ int ret;
+
+ /* Re-init VCPU so that all registers are set to
+ * their respective reset values.
+ */
+ ret = kvm_arm_vcpu_init(CPU(cpu));
+ if (ret < 0) {
+ fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
+ abort();
+ }
+ if (!write_kvmstate_to_list(cpu)) {
+ fprintf(stderr, "write_kvmstate_to_list failed\n");
+ abort();
+ }
+}
+
+/*
+ * Update KVM's MP_STATE based on what QEMU thinks it is
+ */
+int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
+{
+ if (cap_has_mp_state) {
+ struct kvm_mp_state mp_state = {
+ .mp_state =
+ cpu->powered_off ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
+ };
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+ if (ret) {
+ fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
+ __func__, ret, strerror(-ret));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Sync the KVM MP_STATE into QEMU
+ */
+int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
+{
+ if (cap_has_mp_state) {
+ struct kvm_mp_state mp_state;
+ int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
+ if (ret) {
+ fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
+ __func__, ret, strerror(-ret));
+ abort();
+ }
+ cpu->powered_off = (mp_state.mp_state == KVM_MP_STATE_STOPPED);
+ }
+
+ return 0;
+}
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ int ret = 0;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_DEBUG:
+ if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
+ ret = EXCP_DEBUG;
+ } /* otherwise return to guest */
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
+ __func__, run->exit_reason);
+ break;
+ }
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ return true;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return 0;
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
+{
+ return 1;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+ return 1;
+}
+
+/* The #ifdef protections are until 32bit headers are imported and can
+ * be removed once both 32 and 64 bit reach feature parity.
+ */
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+#ifdef KVM_GUESTDBG_USE_SW_BP
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+#endif
+#ifdef KVM_GUESTDBG_USE_HW
+ if (kvm_arm_hw_debug_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
+ kvm_arm_copy_hw_debug_data(&dbg->arch);
+ }
+#endif
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+ if (machine_kernel_irqchip_split(ms)) {
+ perror("-machine kernel_irqchip=split is not supported on ARM.");
+ exit(1);
+ }
+
+ /* If we can create the VGIC using the newer device control API, we
+ * let the device do this when it initializes itself, otherwise we
+ * fall back to the old API */
+ return kvm_check_extension(s, KVM_CAP_DEVICE_CTRL);
+}
+
+int kvm_arm_vgic_probe(void)
+{
+ if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V3, true) == 0) {
+ return 3;
+ } else if (kvm_create_device(kvm_state,
+ KVM_DEV_TYPE_ARM_VGIC_V2, true) == 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ return (data - 32) & 0xffff;
+}
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
new file mode 100644
index 0000000000..069da0c5fd
--- /dev/null
+++ b/target/arm/kvm32.c
@@ -0,0 +1,529 @@
+/*
+ * ARM implementation of KVM hooks, 32 bit specific code.
+ *
+ * Copyright Christoffer Dall 2009-2010
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "internals.h"
+#include "hw/arm/arm.h"
+#include "qemu/log.h"
+
+static inline void set_feature(uint64_t *features, int feature)
+{
+ *features |= 1ULL << feature;
+}
+
+bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
+{
+ /* Identify the feature bits corresponding to the host CPU, and
+ * fill out the ARMHostCPUClass fields accordingly. To do this
+ * we have to create a scratch VM, create a single CPU inside it,
+ * and then query that CPU for the relevant ID registers.
+ */
+ int i, ret, fdarray[3];
+ uint32_t midr, id_pfr0, id_isar0, mvfr1;
+ uint64_t features = 0;
+ /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
+ * we know these will only support creating one kind of guest CPU,
+ * which is its preferred CPU type.
+ */
+ static const uint32_t cpus_to_try[] = {
+ QEMU_KVM_ARM_TARGET_CORTEX_A15,
+ QEMU_KVM_ARM_TARGET_NONE
+ };
+ struct kvm_vcpu_init init;
+ struct kvm_one_reg idregs[] = {
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | ENCODE_CP_REG(15, 0, 0, 0, 0, 0, 0),
+ .addr = (uintptr_t)&midr,
+ },
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | ENCODE_CP_REG(15, 0, 0, 0, 1, 0, 0),
+ .addr = (uintptr_t)&id_pfr0,
+ },
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | ENCODE_CP_REG(15, 0, 0, 0, 2, 0, 0),
+ .addr = (uintptr_t)&id_isar0,
+ },
+ {
+ .id = KVM_REG_ARM | KVM_REG_SIZE_U32
+ | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR1,
+ .addr = (uintptr_t)&mvfr1,
+ },
+ };
+
+ if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ return false;
+ }
+
+ ahcc->target = init.target;
+
+ /* This is not strictly blessed by the device tree binding docs yet,
+ * but in practice the kernel does not care about this string so
+ * there is no point maintaining an KVM_ARM_TARGET_* -> string table.
+ */
+ ahcc->dtb_compatible = "arm,arm-v7";
+
+ for (i = 0; i < ARRAY_SIZE(idregs); i++) {
+ ret = ioctl(fdarray[2], KVM_GET_ONE_REG, &idregs[i]);
+ if (ret) {
+ break;
+ }
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ if (ret) {
+ return false;
+ }
+
+ /* Now we've retrieved all the register information we can
+ * set the feature bits based on the ID register fields.
+ * We can assume any KVM supporting CPU is at least a v7
+ * with VFPv3, LPAE and the generic timers; this in turn implies
+ * most of the other feature bits, but a few must be tested.
+ */
+ set_feature(&features, ARM_FEATURE_V7);
+ set_feature(&features, ARM_FEATURE_VFP3);
+ set_feature(&features, ARM_FEATURE_LPAE);
+ set_feature(&features, ARM_FEATURE_GENERIC_TIMER);
+
+ switch (extract32(id_isar0, 24, 4)) {
+ case 1:
+ set_feature(&features, ARM_FEATURE_THUMB_DIV);
+ break;
+ case 2:
+ set_feature(&features, ARM_FEATURE_ARM_DIV);
+ set_feature(&features, ARM_FEATURE_THUMB_DIV);
+ break;
+ default:
+ break;
+ }
+
+ if (extract32(id_pfr0, 12, 4) == 1) {
+ set_feature(&features, ARM_FEATURE_THUMB2EE);
+ }
+ if (extract32(mvfr1, 20, 4) == 1) {
+ set_feature(&features, ARM_FEATURE_VFP_FP16);
+ }
+ if (extract32(mvfr1, 12, 4) == 1) {
+ set_feature(&features, ARM_FEATURE_NEON);
+ }
+ if (extract32(mvfr1, 28, 4) == 1) {
+ /* FMAC support implies VFPv4 */
+ set_feature(&features, ARM_FEATURE_VFP4);
+ }
+
+ ahcc->features = features;
+
+ return true;
+}
+
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
+{
+ /* Return true if the regidx is a register we should synchronize
+ * via the cpreg_tuples array (ie is not a core reg we sync by
+ * hand in kvm_arch_get/put_registers())
+ */
+ switch (regidx & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ case KVM_REG_ARM_VFP:
+ return false;
+ default:
+ return true;
+ }
+}
+
+typedef struct CPRegStateLevel {
+ uint64_t regidx;
+ int level;
+} CPRegStateLevel;
+
+/* All coprocessor registers not listed in the following table are assumed to
+ * be of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
+ * often, you must add it to this table with a state of either
+ * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
+ */
+static const CPRegStateLevel non_runtime_cpregs[] = {
+ { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
+};
+
+int kvm_arm_cpreg_level(uint64_t regidx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
+ const CPRegStateLevel *l = &non_runtime_cpregs[i];
+ if (l->regidx == regidx) {
+ return l->level;
+ }
+ }
+
+ return KVM_PUT_RUNTIME_STATE;
+}
+
+#define ARM_CPU_ID_MPIDR 0, 0, 0, 5
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret;
+ uint64_t v;
+ uint32_t mpidr;
+ struct kvm_one_reg r;
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
+ fprintf(stderr, "KVM is not supported for this guest CPU type\n");
+ return -EINVAL;
+ }
+
+ /* Determine init features for this CPU */
+ memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
+ if (cpu->start_powered_off) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
+ }
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
+ cpu->psci_version = 2;
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
+ }
+
+ /* Do KVM_ARM_VCPU_INIT ioctl */
+ ret = kvm_arm_vcpu_init(cs);
+ if (ret) {
+ return ret;
+ }
+
+ /* Query the kernel to make sure it supports 32 VFP
+ * registers: QEMU's "cortex-a15" CPU is always a
+ * VFP-D32 core. The simplest way to do this is just
+ * to attempt to read register d31.
+ */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP | 31;
+ r.addr = (uintptr_t)(&v);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret == -ENOENT) {
+ return -EINVAL;
+ }
+
+ /*
+ * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
+ * Currently KVM has its own idea about MPIDR assignment, so we
+ * override our defaults with what we get from KVM.
+ */
+ ret = kvm_get_one_reg(cs, ARM_CP15_REG32(ARM_CPU_ID_MPIDR), &mpidr);
+ if (ret) {
+ return ret;
+ }
+ cpu->mp_affinity = mpidr & ARM32_AFFINITY_MASK;
+
+ return kvm_arm_init_cpreg_list(cpu);
+}
+
+typedef struct Reg {
+ uint64_t id;
+ int offset;
+} Reg;
+
+#define COREREG(KERNELNAME, QEMUFIELD) \
+ { \
+ KVM_REG_ARM | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
+ offsetof(CPUARMState, QEMUFIELD) \
+ }
+
+#define VFPSYSREG(R) \
+ { \
+ KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | \
+ KVM_REG_ARM_VFP_##R, \
+ offsetof(CPUARMState, vfp.xregs[ARM_VFP_##R]) \
+ }
+
+/* Like COREREG, but handle fields which are in a uint64_t in CPUARMState. */
+#define COREREG64(KERNELNAME, QEMUFIELD) \
+ { \
+ KVM_REG_ARM | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(KERNELNAME), \
+ offsetoflow32(CPUARMState, QEMUFIELD) \
+ }
+
+static const Reg regs[] = {
+ /* R0_usr .. R14_usr */
+ COREREG(usr_regs.uregs[0], regs[0]),
+ COREREG(usr_regs.uregs[1], regs[1]),
+ COREREG(usr_regs.uregs[2], regs[2]),
+ COREREG(usr_regs.uregs[3], regs[3]),
+ COREREG(usr_regs.uregs[4], regs[4]),
+ COREREG(usr_regs.uregs[5], regs[5]),
+ COREREG(usr_regs.uregs[6], regs[6]),
+ COREREG(usr_regs.uregs[7], regs[7]),
+ COREREG(usr_regs.uregs[8], usr_regs[0]),
+ COREREG(usr_regs.uregs[9], usr_regs[1]),
+ COREREG(usr_regs.uregs[10], usr_regs[2]),
+ COREREG(usr_regs.uregs[11], usr_regs[3]),
+ COREREG(usr_regs.uregs[12], usr_regs[4]),
+ COREREG(usr_regs.uregs[13], banked_r13[BANK_USRSYS]),
+ COREREG(usr_regs.uregs[14], banked_r14[BANK_USRSYS]),
+ /* R13, R14, SPSR for SVC, ABT, UND, IRQ banks */
+ COREREG(svc_regs[0], banked_r13[BANK_SVC]),
+ COREREG(svc_regs[1], banked_r14[BANK_SVC]),
+ COREREG64(svc_regs[2], banked_spsr[BANK_SVC]),
+ COREREG(abt_regs[0], banked_r13[BANK_ABT]),
+ COREREG(abt_regs[1], banked_r14[BANK_ABT]),
+ COREREG64(abt_regs[2], banked_spsr[BANK_ABT]),
+ COREREG(und_regs[0], banked_r13[BANK_UND]),
+ COREREG(und_regs[1], banked_r14[BANK_UND]),
+ COREREG64(und_regs[2], banked_spsr[BANK_UND]),
+ COREREG(irq_regs[0], banked_r13[BANK_IRQ]),
+ COREREG(irq_regs[1], banked_r14[BANK_IRQ]),
+ COREREG64(irq_regs[2], banked_spsr[BANK_IRQ]),
+ /* R8_fiq .. R14_fiq and SPSR_fiq */
+ COREREG(fiq_regs[0], fiq_regs[0]),
+ COREREG(fiq_regs[1], fiq_regs[1]),
+ COREREG(fiq_regs[2], fiq_regs[2]),
+ COREREG(fiq_regs[3], fiq_regs[3]),
+ COREREG(fiq_regs[4], fiq_regs[4]),
+ COREREG(fiq_regs[5], banked_r13[BANK_FIQ]),
+ COREREG(fiq_regs[6], banked_r14[BANK_FIQ]),
+ COREREG64(fiq_regs[7], banked_spsr[BANK_FIQ]),
+ /* R15 */
+ COREREG(usr_regs.uregs[15], regs[15]),
+ /* VFP system registers */
+ VFPSYSREG(FPSID),
+ VFPSYSREG(MVFR1),
+ VFPSYSREG(MVFR0),
+ VFPSYSREG(FPEXC),
+ VFPSYSREG(FPINST),
+ VFPSYSREG(FPINST2),
+};
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg r;
+ int mode, bn;
+ int ret, i;
+ uint32_t cpsr, fpscr;
+
+ /* Make sure the banked regs are properly set */
+ mode = env->uncached_cpsr & CPSR_M;
+ bn = bank_number(mode);
+ if (mode == ARM_CPU_MODE_FIQ) {
+ memcpy(env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ } else {
+ memcpy(env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
+ }
+ env->banked_r13[bn] = env->regs[13];
+ env->banked_r14[bn] = env->regs[14];
+ env->banked_spsr[bn] = env->spsr;
+
+ /* Now we can safely copy stuff down to the kernel */
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ r.id = regs[i].id;
+ r.addr = (uintptr_t)(env) + regs[i].offset;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Special cases which aren't a single CPUARMState field */
+ cpsr = cpsr_read(env);
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
+ r.addr = (uintptr_t)(&cpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+
+ /* VFP registers */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
+ for (i = 0; i < 32; i++) {
+ r.addr = (uintptr_t)(&env->vfp.regs[i]);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ r.id++;
+ }
+
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
+ KVM_REG_ARM_VFP_FPSCR;
+ fpscr = vfp_get_fpscr(env);
+ r.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+
+ /* Note that we do not call write_cpustate_to_list()
+ * here, so we are only writing the tuple list back to
+ * KVM. This is safe because nothing can change the
+ * CPUARMState cp15 fields (in particular gdb accesses cannot)
+ * and so there are no changes to sync. In fact syncing would
+ * be wrong at this point: for a constant register where TCG and
+ * KVM disagree about its value, the preceding write_list_to_cpustate()
+ * would not have had any effect on the CPUARMState value (since the
+ * register is read-only), and a write_cpustate_to_list() here would
+ * then try to write the TCG value back into KVM -- this would either
+ * fail or incorrectly change the value the guest sees.
+ *
+ * If we ever want to allow the user to modify cp15 registers via
+ * the gdb stub, we would need to be more clever here (for instance
+ * tracking the set of registers kvm_arch_get_registers() successfully
+ * managed to update the CPUARMState with, and only allowing those
+ * to be written back up into the kernel).
+ */
+ if (!write_list_to_kvmstate(cpu, level)) {
+ return EINVAL;
+ }
+
+ kvm_arm_sync_mpstate_to_kvm(cpu);
+
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ struct kvm_one_reg r;
+ int mode, bn;
+ int ret, i;
+ uint32_t cpsr, fpscr;
+
+ for (i = 0; i < ARRAY_SIZE(regs); i++) {
+ r.id = regs[i].id;
+ r.addr = (uintptr_t)(env) + regs[i].offset;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Special cases which aren't a single CPUARMState field */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 |
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr);
+ r.addr = (uintptr_t)(&cpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ cpsr_write(env, cpsr, 0xffffffff, CPSRWriteRaw);
+
+ /* Make sure the current mode regs are properly set */
+ mode = env->uncached_cpsr & CPSR_M;
+ bn = bank_number(mode);
+ if (mode == ARM_CPU_MODE_FIQ) {
+ memcpy(env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
+ } else {
+ memcpy(env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
+ }
+ env->regs[13] = env->banked_r13[bn];
+ env->regs[14] = env->banked_r14[bn];
+ env->spsr = env->banked_spsr[bn];
+
+ /* VFP registers */
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP;
+ for (i = 0; i < 32; i++) {
+ r.addr = (uintptr_t)(&env->vfp.regs[i]);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ r.id++;
+ }
+
+ r.id = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP |
+ KVM_REG_ARM_VFP_FPSCR;
+ r.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &r);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpscr(env, fpscr);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ return EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
+ */
+ write_list_to_cpustate(cpu);
+
+ kvm_arm_sync_mpstate_to_qemu(cpu);
+
+ return 0;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
+ return -EINVAL;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
+ return -EINVAL;
+}
+
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: guest debug not yet implemented\n", __func__);
+ return false;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+ return -EINVAL;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+ return -EINVAL;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+}
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+}
+
+bool kvm_arm_hw_debug_active(CPUState *cs)
+{
+ return false;
+}
+
+int kvm_arm_pmu_create(CPUState *cs, int irq)
+{
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
+ return 0;
+}
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
new file mode 100644
index 0000000000..61111091ad
--- /dev/null
+++ b/target/arm/kvm64.c
@@ -0,0 +1,982 @@
+/*
+ * ARM implementation of KVM hooks, 64 bit specific code
+ *
+ * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
+ * Copyright Alex Bennée 2014, Linaro
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#include <sys/ptrace.h>
+
+#include <linux/elf.h>
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "qemu/host-utils.h"
+#include "exec/gdbstub.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "internals.h"
+#include "hw/arm/arm.h"
+
+static bool have_guest_debug;
+
+/*
+ * Although the ARM implementation of hardware assisted debugging
+ * allows for different breakpoints per-core, the current GDB
+ * interface treats them as a global pool of registers (which seems to
+ * be the case for x86, ppc and s390). As a result we store one copy
+ * of registers which is used for all active cores.
+ *
+ * Write access is serialised by virtue of the GDB protocol which
+ * updates things. Read access (i.e. when the values are copied to the
+ * vCPU) is also gated by GDB's run control.
+ *
+ * This is not unreasonable as most of the time debugging kernels you
+ * never know which core will eventually execute your function.
+ */
+
+typedef struct {
+ uint64_t bcr;
+ uint64_t bvr;
+} HWBreakpoint;
+
+/* The watchpoint registers can cover more area than the requested
+ * watchpoint so we need to store the additional information
+ * somewhere. We also need to supply a CPUWatchpoint to the GDB stub
+ * when the watchpoint is hit.
+ */
+typedef struct {
+ uint64_t wcr;
+ uint64_t wvr;
+ CPUWatchpoint details;
+} HWWatchpoint;
+
+/* Maximum and current break/watch point counts */
+int max_hw_bps, max_hw_wps;
+GArray *hw_breakpoints, *hw_watchpoints;
+
+#define cur_hw_wps (hw_watchpoints->len)
+#define cur_hw_bps (hw_breakpoints->len)
+#define get_hw_bp(i) (&g_array_index(hw_breakpoints, HWBreakpoint, i))
+#define get_hw_wp(i) (&g_array_index(hw_watchpoints, HWWatchpoint, i))
+
+/**
+ * kvm_arm_init_debug() - check for guest debug capabilities
+ * @cs: CPUState
+ *
+ * kvm_check_extension returns the number of debug registers we have
+ * or 0 if we have none.
+ *
+ */
+static void kvm_arm_init_debug(CPUState *cs)
+{
+ have_guest_debug = kvm_check_extension(cs->kvm_state,
+ KVM_CAP_SET_GUEST_DEBUG);
+
+ max_hw_wps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_WPS);
+ hw_watchpoints = g_array_sized_new(true, true,
+ sizeof(HWWatchpoint), max_hw_wps);
+
+ max_hw_bps = kvm_check_extension(cs->kvm_state, KVM_CAP_GUEST_DEBUG_HW_BPS);
+ hw_breakpoints = g_array_sized_new(true, true,
+ sizeof(HWBreakpoint), max_hw_bps);
+ return;
+}
+
+/**
+ * insert_hw_breakpoint()
+ * @addr: address of breakpoint
+ *
+ * See ARM ARM D2.9.1 for details but here we are only going to create
+ * simple un-linked breakpoints (i.e. we don't chain breakpoints
+ * together to match address and context or vmid). The hardware is
+ * capable of fancier matching but that will require exposing that
+ * fanciness to GDB's interface
+ *
+ * D7.3.2 DBGBCR<n>_EL1, Debug Breakpoint Control Registers
+ *
+ * 31 24 23 20 19 16 15 14 13 12 9 8 5 4 3 2 1 0
+ * +------+------+-------+-----+----+------+-----+------+-----+---+
+ * | RES0 | BT | LBN | SSC | HMC| RES0 | BAS | RES0 | PMC | E |
+ * +------+------+-------+-----+----+------+-----+------+-----+---+
+ *
+ * BT: Breakpoint type (0 = unlinked address match)
+ * LBN: Linked BP number (0 = unused)
+ * SSC/HMC/PMC: Security, Higher and Priv access control (Table D-12)
+ * BAS: Byte Address Select (RES1 for AArch64)
+ * E: Enable bit
+ */
+static int insert_hw_breakpoint(target_ulong addr)
+{
+ HWBreakpoint brk = {
+ .bcr = 0x1, /* BCR E=1, enable */
+ .bvr = addr
+ };
+
+ if (cur_hw_bps >= max_hw_bps) {
+ return -ENOBUFS;
+ }
+
+ brk.bcr = deposit32(brk.bcr, 1, 2, 0x3); /* PMC = 11 */
+ brk.bcr = deposit32(brk.bcr, 5, 4, 0xf); /* BAS = RES1 */
+
+ g_array_append_val(hw_breakpoints, brk);
+
+ return 0;
+}
+
+/**
+ * delete_hw_breakpoint()
+ * @pc: address of breakpoint
+ *
+ * Delete a breakpoint and shuffle any above down
+ */
+
+static int delete_hw_breakpoint(target_ulong pc)
+{
+ int i;
+ for (i = 0; i < hw_breakpoints->len; i++) {
+ HWBreakpoint *brk = get_hw_bp(i);
+ if (brk->bvr == pc) {
+ g_array_remove_index(hw_breakpoints, i);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+/**
+ * insert_hw_watchpoint()
+ * @addr: address of watch point
+ * @len: size of area
+ * @type: type of watch point
+ *
+ * See ARM ARM D2.10. As with the breakpoints we can do some advanced
+ * stuff if we want to. The watch points can be linked with the break
+ * points above to make them context aware. However for simplicity
+ * currently we only deal with simple read/write watch points.
+ *
+ * D7.3.11 DBGWCR<n>_EL1, Debug Watchpoint Control Registers
+ *
+ * 31 29 28 24 23 21 20 19 16 15 14 13 12 5 4 3 2 1 0
+ * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
+ * | RES0 | MASK | RES0 | WT | LBN | SSC | HMC | BAS | LSC | PAC | E |
+ * +------+-------+------+----+-----+-----+-----+-----+-----+-----+---+
+ *
+ * MASK: num bits addr mask (0=none,01/10=res,11=3 bits (8 bytes))
+ * WT: 0 - unlinked, 1 - linked (not currently used)
+ * LBN: Linked BP number (not currently used)
+ * SSC/HMC/PAC: Security, Higher and Priv access control (Table D2-11)
+ * BAS: Byte Address Select
+ * LSC: Load/Store control (01: load, 10: store, 11: both)
+ * E: Enable
+ *
+ * The bottom 2 bits of the value register are masked. Therefore to
+ * break on any sizes smaller than an unaligned word you need to set
+ * MASK=0, BAS=bit per byte in question. For larger regions (^2) you
+ * need to ensure you mask the address as required and set BAS=0xff
+ */
+
+static int insert_hw_watchpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ HWWatchpoint wp = {
+ .wcr = 1, /* E=1, enable */
+ .wvr = addr & (~0x7ULL),
+ .details = { .vaddr = addr, .len = len }
+ };
+
+ if (cur_hw_wps >= max_hw_wps) {
+ return -ENOBUFS;
+ }
+
+ /*
+ * HMC=0 SSC=0 PAC=3 will hit EL0 or EL1, any security state,
+ * valid whether EL3 is implemented or not
+ */
+ wp.wcr = deposit32(wp.wcr, 1, 2, 3);
+
+ switch (type) {
+ case GDB_WATCHPOINT_READ:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 1);
+ wp.details.flags = BP_MEM_READ;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 2);
+ wp.details.flags = BP_MEM_WRITE;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ wp.wcr = deposit32(wp.wcr, 3, 2, 3);
+ wp.details.flags = BP_MEM_ACCESS;
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+ if (len <= 8) {
+ /* we align the address and set the bits in BAS */
+ int off = addr & 0x7;
+ int bas = (1 << len) - 1;
+
+ wp.wcr = deposit32(wp.wcr, 5 + off, 8 - off, bas);
+ } else {
+ /* For ranges above 8 bytes we need to be a power of 2 */
+ if (is_power_of_2(len)) {
+ int bits = ctz64(len);
+
+ wp.wvr &= ~((1 << bits) - 1);
+ wp.wcr = deposit32(wp.wcr, 24, 4, bits);
+ wp.wcr = deposit32(wp.wcr, 5, 8, 0xff);
+ } else {
+ return -ENOBUFS;
+ }
+ }
+
+ g_array_append_val(hw_watchpoints, wp);
+ return 0;
+}
+
+
+static bool check_watchpoint_in_range(int i, target_ulong addr)
+{
+ HWWatchpoint *wp = get_hw_wp(i);
+ uint64_t addr_top, addr_bottom = wp->wvr;
+ int bas = extract32(wp->wcr, 5, 8);
+ int mask = extract32(wp->wcr, 24, 4);
+
+ if (mask) {
+ addr_top = addr_bottom + (1 << mask);
+ } else {
+ /* BAS must be contiguous but can offset against the base
+ * address in DBGWVR */
+ addr_bottom = addr_bottom + ctz32(bas);
+ addr_top = addr_bottom + clo32(bas);
+ }
+
+ if (addr >= addr_bottom && addr <= addr_top) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * delete_hw_watchpoint()
+ * @addr: address of breakpoint
+ *
+ * Delete a breakpoint and shuffle any above down
+ */
+
+static int delete_hw_watchpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int i;
+ for (i = 0; i < cur_hw_wps; i++) {
+ if (check_watchpoint_in_range(i, addr)) {
+ g_array_remove_index(hw_watchpoints, i);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return insert_hw_breakpoint(addr);
+ break;
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return insert_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return delete_hw_breakpoint(addr);
+ break;
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return delete_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ if (cur_hw_wps > 0) {
+ g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
+ }
+ if (cur_hw_bps > 0) {
+ g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
+ }
+}
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
+{
+ int i;
+ memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
+
+ for (i = 0; i < max_hw_wps; i++) {
+ HWWatchpoint *wp = get_hw_wp(i);
+ ptr->dbg_wcr[i] = wp->wcr;
+ ptr->dbg_wvr[i] = wp->wvr;
+ }
+ for (i = 0; i < max_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ ptr->dbg_bcr[i] = bp->bcr;
+ ptr->dbg_bvr[i] = bp->bvr;
+ }
+}
+
+bool kvm_arm_hw_debug_active(CPUState *cs)
+{
+ return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
+}
+
+static bool find_hw_breakpoint(CPUState *cpu, target_ulong pc)
+{
+ int i;
+
+ for (i = 0; i < cur_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ if (bp->bvr == pc) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static CPUWatchpoint *find_hw_watchpoint(CPUState *cpu, target_ulong addr)
+{
+ int i;
+
+ for (i = 0; i < cur_hw_wps; i++) {
+ if (check_watchpoint_in_range(i, addr)) {
+ return &get_hw_wp(i)->details;
+ }
+ }
+ return NULL;
+}
+
+static bool kvm_arm_pmu_support_ctrl(CPUState *cs, struct kvm_device_attr *attr)
+{
+ return kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr) == 0;
+}
+
+int kvm_arm_pmu_create(CPUState *cs, int irq)
+{
+ int err;
+
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .addr = (intptr_t)&irq,
+ .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
+ .flags = 0,
+ };
+
+ if (!kvm_arm_pmu_support_ctrl(cs, &attr)) {
+ return 0;
+ }
+
+ err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, &attr);
+ if (err < 0) {
+ fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
+ strerror(-err));
+ abort();
+ }
+
+ attr.group = KVM_ARM_VCPU_PMU_V3_CTRL;
+ attr.attr = KVM_ARM_VCPU_PMU_V3_INIT;
+ attr.addr = 0;
+ attr.flags = 0;
+
+ err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, &attr);
+ if (err < 0) {
+ fprintf(stderr, "KVM_SET_DEVICE_ATTR failed: %s\n",
+ strerror(-err));
+ abort();
+ }
+
+ return 1;
+}
+
+static inline void set_feature(uint64_t *features, int feature)
+{
+ *features |= 1ULL << feature;
+}
+
+static inline void unset_feature(uint64_t *features, int feature)
+{
+ *features &= ~(1ULL << feature);
+}
+
+bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc)
+{
+ /* Identify the feature bits corresponding to the host CPU, and
+ * fill out the ARMHostCPUClass fields accordingly. To do this
+ * we have to create a scratch VM, create a single CPU inside it,
+ * and then query that CPU for the relevant ID registers.
+ * For AArch64 we currently don't care about ID registers at
+ * all; we just want to know the CPU type.
+ */
+ int fdarray[3];
+ uint64_t features = 0;
+ /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
+ * we know these will only support creating one kind of guest CPU,
+ * which is its preferred CPU type. Fortunately these old kernels
+ * support only a very limited number of CPUs.
+ */
+ static const uint32_t cpus_to_try[] = {
+ KVM_ARM_TARGET_AEM_V8,
+ KVM_ARM_TARGET_FOUNDATION_V8,
+ KVM_ARM_TARGET_CORTEX_A57,
+ QEMU_KVM_ARM_TARGET_NONE
+ };
+ struct kvm_vcpu_init init;
+
+ if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ return false;
+ }
+
+ ahcc->target = init.target;
+ ahcc->dtb_compatible = "arm,arm-v8";
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ /* We can assume any KVM supporting CPU is at least a v8
+ * with VFPv4+Neon; this in turn implies most of the other
+ * feature bits.
+ */
+ set_feature(&features, ARM_FEATURE_V8);
+ set_feature(&features, ARM_FEATURE_VFP4);
+ set_feature(&features, ARM_FEATURE_NEON);
+ set_feature(&features, ARM_FEATURE_AARCH64);
+ set_feature(&features, ARM_FEATURE_PMU);
+
+ ahcc->features = features;
+
+ return true;
+}
+
+#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret;
+ uint64_t mpidr;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
+ !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ fprintf(stderr, "KVM is not supported for this guest CPU type\n");
+ return -EINVAL;
+ }
+
+ /* Determine init features for this CPU */
+ memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
+ if (cpu->start_powered_off) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
+ }
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
+ cpu->psci_version = 2;
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
+ }
+ if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
+ }
+ if (!kvm_irqchip_in_kernel() ||
+ !kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
+ cpu->has_pmu = false;
+ }
+ if (cpu->has_pmu) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
+ } else {
+ unset_feature(&env->features, ARM_FEATURE_PMU);
+ }
+
+ /* Do KVM_ARM_VCPU_INIT ioctl */
+ ret = kvm_arm_vcpu_init(cs);
+ if (ret) {
+ return ret;
+ }
+
+ /*
+ * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
+ * Currently KVM has its own idea about MPIDR assignment, so we
+ * override our defaults with what we get from KVM.
+ */
+ ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
+ if (ret) {
+ return ret;
+ }
+ cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
+
+ kvm_arm_init_debug(cs);
+
+ return kvm_arm_init_cpreg_list(cpu);
+}
+
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
+{
+ /* Return true if the regidx is a register we should synchronize
+ * via the cpreg_tuples array (ie is not a core reg we sync by
+ * hand in kvm_arch_get/put_registers())
+ */
+ switch (regidx & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+typedef struct CPRegStateLevel {
+ uint64_t regidx;
+ int level;
+} CPRegStateLevel;
+
+/* All system registers not listed in the following table are assumed to be
+ * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
+ * often, you must add it to this table with a state of either
+ * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
+ */
+static const CPRegStateLevel non_runtime_cpregs[] = {
+ { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
+};
+
+int kvm_arm_cpreg_level(uint64_t regidx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
+ const CPRegStateLevel *l = &non_runtime_cpregs[i];
+ if (l->regidx == regidx) {
+ return l->level;
+ }
+ }
+
+ return KVM_PUT_RUNTIME_STATE;
+}
+
+#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ struct kvm_one_reg reg;
+ uint32_t fpr;
+ uint64_t val;
+ int i;
+ int ret;
+ unsigned int el;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
+ * AArch64 registers before pushing them out to 64-bit KVM.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ }
+
+ for (i = 0; i < 31; i++) {
+ reg.id = AARCH64_CORE_REG(regs.regs[i]);
+ reg.addr = (uintptr_t) &env->xregs[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ aarch64_save_sp(env, 1);
+
+ reg.id = AARCH64_CORE_REG(regs.sp);
+ reg.addr = (uintptr_t) &env->sp_el[0];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(sp_el1);
+ reg.addr = (uintptr_t) &env->sp_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
+ reg.id = AARCH64_CORE_REG(regs.pstate);
+ reg.addr = (uintptr_t) &val;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.pc);
+ reg.addr = (uintptr_t) &env->pc;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(elr_el1);
+ reg.addr = (uintptr_t) &env->elr_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Saved Program State Registers
+ *
+ * Before we restore from the banked_spsr[] array we need to
+ * ensure that any modifications to env->spsr are correctly
+ * reflected in the banks.
+ */
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->banked_spsr[i] = env->spsr;
+ }
+
+ /* KVM 0-4 map to QEMU banks 1-5 */
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ reg.id = AARCH64_CORE_REG(spsr[i]);
+ reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* Advanced SIMD and FP registers
+ * We map Qn = regs[2n+1]:regs[2n]
+ */
+ for (i = 0; i < 32; i++) {
+ int rd = i << 1;
+ uint64_t fp_val[2];
+#ifdef HOST_WORDS_BIGENDIAN
+ fp_val[0] = env->vfp.regs[rd + 1];
+ fp_val[1] = env->vfp.regs[rd];
+#else
+ fp_val[1] = env->vfp.regs[rd + 1];
+ fp_val[0] = env->vfp.regs[rd];
+#endif
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ reg.addr = (uintptr_t)(&fp_val);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ fpr = vfp_get_fpsr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ fpr = vfp_get_fpcr(env);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ if (!write_list_to_kvmstate(cpu, level)) {
+ return EINVAL;
+ }
+
+ kvm_arm_sync_mpstate_to_kvm(cpu);
+
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ struct kvm_one_reg reg;
+ uint64_t val;
+ uint32_t fpr;
+ unsigned int el;
+ int i;
+ int ret;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ for (i = 0; i < 31; i++) {
+ reg.id = AARCH64_CORE_REG(regs.regs[i]);
+ reg.addr = (uintptr_t) &env->xregs[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.sp);
+ reg.addr = (uintptr_t) &env->sp_el[0];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(sp_el1);
+ reg.addr = (uintptr_t) &env->sp_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ reg.id = AARCH64_CORE_REG(regs.pstate);
+ reg.addr = (uintptr_t) &val;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ } else {
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
+ }
+
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ aarch64_restore_sp(env, 1);
+
+ reg.id = AARCH64_CORE_REG(regs.pc);
+ reg.addr = (uintptr_t) &env->pc;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
+ * incoming AArch64 regs received from 64-bit KVM.
+ * We must perform this after all of the registers have been acquired from
+ * the kernel.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_64_to_32(env);
+ }
+
+ reg.id = AARCH64_CORE_REG(elr_el1);
+ reg.addr = (uintptr_t) &env->elr_el[1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+
+ /* Fetch the SPSR registers
+ *
+ * KVM SPSRs 0-4 map to QEMU banks 1-5
+ */
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ reg.id = AARCH64_CORE_REG(spsr[i]);
+ reg.addr = (uintptr_t) &env->banked_spsr[i + 1];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->spsr = env->banked_spsr[i];
+ }
+
+ /* Advanced SIMD and FP registers
+ * We map Qn = regs[2n+1]:regs[2n]
+ */
+ for (i = 0; i < 32; i++) {
+ uint64_t fp_val[2];
+ reg.id = AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]);
+ reg.addr = (uintptr_t)(&fp_val);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ } else {
+ int rd = i << 1;
+#ifdef HOST_WORDS_BIGENDIAN
+ env->vfp.regs[rd + 1] = fp_val[0];
+ env->vfp.regs[rd] = fp_val[1];
+#else
+ env->vfp.regs[rd + 1] = fp_val[1];
+ env->vfp.regs[rd] = fp_val[0];
+#endif
+ }
+ }
+
+ reg.addr = (uintptr_t)(&fpr);
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpsr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpsr(env, fpr);
+
+ reg.id = AARCH64_SIMD_CTRL_REG(fp_regs.fpcr);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpcr(env, fpr);
+
+ if (!write_kvmstate_to_list(cpu)) {
+ return EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
+ */
+ write_list_to_cpustate(cpu);
+
+ kvm_arm_sync_mpstate_to_qemu(cpu);
+
+ /* TODO: other registers */
+ return ret;
+}
+
+/* C6.6.29 BRK instruction */
+static const uint32_t brk_insn = 0xd4200000;
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ if (have_guest_debug) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ error_report("guest debug not supported on this kernel");
+ return -EINVAL;
+ }
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static uint32_t brk;
+
+ if (have_guest_debug) {
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
+ brk != brk_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+ } else {
+ error_report("guest debug not supported on this kernel");
+ return -EINVAL;
+ }
+}
+
+/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
+ *
+ * To minimise translating between kernel and user-space the kernel
+ * ABI just provides user-space with the full exception syndrome
+ * register value to be decoded in QEMU.
+ */
+
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
+{
+ int hsr_ec = debug_exit->hsr >> ARM_EL_EC_SHIFT;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUARMState *env = &cpu->env;
+
+ /* Ensure PC is synchronised */
+ kvm_cpu_synchronize_state(cs);
+
+ switch (hsr_ec) {
+ case EC_SOFTWARESTEP:
+ if (cs->singlestep_enabled) {
+ return true;
+ } else {
+ /*
+ * The kernel should have suppressed the guest's ability to
+ * single step at this point so something has gone wrong.
+ */
+ error_report("%s: guest single-step while debugging unsupported"
+ " (%"PRIx64", %"PRIx32")\n",
+ __func__, env->pc, debug_exit->hsr);
+ return false;
+ }
+ break;
+ case EC_AA64_BKPT:
+ if (kvm_find_sw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_BREAKPOINT:
+ if (find_hw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_WATCHPOINT:
+ {
+ CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
+ if (wp) {
+ cs->watchpoint_hit = wp;
+ return true;
+ }
+ break;
+ }
+ default:
+ error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")\n",
+ __func__, debug_exit->hsr, env->pc);
+ }
+
+ /* If we are not handling the debug exception it must belong to
+ * the guest. Let's re-use the existing TCG interrupt code to set
+ * everything up properly.
+ */
+ cs->exception_index = EXCP_BKPT;
+ env->exception.syndrome = debug_exit->hsr;
+ env->exception.vaddress = debug_exit->far;
+ cc->do_interrupt(cs);
+
+ return false;
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
new file mode 100644
index 0000000000..633d08828a
--- /dev/null
+++ b/target/arm/kvm_arm.h
@@ -0,0 +1,291 @@
+/*
+ * QEMU KVM support -- ARM specific functions.
+ *
+ * Copyright (c) 2012 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_ARM_H
+#define QEMU_KVM_ARM_H
+
+#include "sysemu/kvm.h"
+#include "exec/memory.h"
+#include "qemu/error-report.h"
+
+/**
+ * kvm_arm_vcpu_init:
+ * @cs: CPUState
+ *
+ * Initialize (or reinitialize) the VCPU by invoking the
+ * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
+ * bitmask specified in the CPUState.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+int kvm_arm_vcpu_init(CPUState *cs);
+
+/**
+ * kvm_arm_register_device:
+ * @mr: memory region for this device
+ * @devid: the KVM device ID
+ * @group: device control API group for setting addresses
+ * @attr: device control API address type
+ * @dev_fd: device control device file descriptor (or -1 if not supported)
+ *
+ * Remember the memory region @mr, and when it is mapped by the
+ * machine model, tell the kernel that base address using the
+ * KVM_ARM_SET_DEVICE_ADDRESS ioctl or the newer device control API. @devid
+ * should be the ID of the device as defined by KVM_ARM_SET_DEVICE_ADDRESS or
+ * the arm-vgic device in the device control API.
+ * The machine model may map
+ * and unmap the device multiple times; the kernel will only be told the final
+ * address at the point where machine init is complete.
+ */
+void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
+ uint64_t attr, int dev_fd);
+
+/**
+ * kvm_arm_init_cpreg_list:
+ * @cs: CPUState
+ *
+ * Initialize the CPUState's cpreg list according to the kernel's
+ * definition of what CPU registers it knows about (and throw away
+ * the previous TCG-created cpreg list).
+ *
+ * Returns: 0 if success, else < 0 error code
+ */
+int kvm_arm_init_cpreg_list(ARMCPU *cpu);
+
+/**
+ * kvm_arm_reg_syncs_via_cpreg_list
+ * regidx: KVM register index
+ *
+ * Return true if this KVM register should be synchronized via the
+ * cpreg list of arbitrary system registers, false if it is synchronized
+ * by hand using code in kvm_arch_get/put_registers().
+ */
+bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
+
+/**
+ * kvm_arm_cpreg_level
+ * regidx: KVM register index
+ *
+ * Return the level of this coprocessor/system register. Return value is
+ * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
+ */
+int kvm_arm_cpreg_level(uint64_t regidx);
+
+/**
+ * write_list_to_kvmstate:
+ * @cpu: ARMCPU
+ * @level: the state level to sync
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the cpreg_values list into the kernel (via ioctl).
+ * This updates KVM's working data structures from TCG data or
+ * from incoming migration state.
+ *
+ * Returns: true if all register values were updated correctly,
+ * false if some register was unknown to the kernel or could not
+ * be written (eg constant register with the wrong value).
+ * Note that we do not stop early on failure -- we will attempt
+ * writing all registers in the list.
+ */
+bool write_list_to_kvmstate(ARMCPU *cpu, int level);
+
+/**
+ * write_kvmstate_to_list:
+ * @cpu: ARMCPU
+ *
+ * For each register listed in the ARMCPU cpreg_indexes list, write
+ * its value from the kernel into the cpreg_values list. This is used to
+ * copy info from KVM's working data structures into TCG or
+ * for outbound migration.
+ *
+ * Returns: true if all register values were read correctly,
+ * false if some register was unknown or could not be read.
+ * Note that we do not stop early on failure -- we will attempt
+ * reading all registers in the list.
+ */
+bool write_kvmstate_to_list(ARMCPU *cpu);
+
+/**
+ * kvm_arm_reset_vcpu:
+ * @cpu: ARMCPU
+ *
+ * Called at reset time to kernel registers to their initial values.
+ */
+void kvm_arm_reset_vcpu(ARMCPU *cpu);
+
+#ifdef CONFIG_KVM
+/**
+ * kvm_arm_create_scratch_host_vcpu:
+ * @cpus_to_try: array of QEMU_KVM_ARM_TARGET_* values (terminated with
+ * QEMU_KVM_ARM_TARGET_NONE) to try as fallback if the kernel does not
+ * know the PREFERRED_TARGET ioctl. Passing NULL is the same as passing
+ * an empty array.
+ * @fdarray: filled in with kvmfd, vmfd, cpufd file descriptors in that order
+ * @init: filled in with the necessary values for creating a host
+ * vcpu. If NULL is provided, will not init the vCPU (though the cpufd
+ * will still be set up).
+ *
+ * Create a scratch vcpu in its own VM of the type preferred by the host
+ * kernel (as would be used for '-cpu host'), for purposes of probing it
+ * for capabilities.
+ *
+ * Returns: true on success (and fdarray and init are filled in),
+ * false on failure (and fdarray and init are not valid).
+ */
+bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
+ int *fdarray,
+ struct kvm_vcpu_init *init);
+
+/**
+ * kvm_arm_destroy_scratch_host_vcpu:
+ * @fdarray: array of fds as set up by kvm_arm_create_scratch_host_vcpu
+ *
+ * Tear down the scratch vcpu created by kvm_arm_create_scratch_host_vcpu.
+ */
+void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
+
+#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
+#define ARM_HOST_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(ARMHostCPUClass, (klass), TYPE_ARM_HOST_CPU)
+#define ARM_HOST_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(ARMHostCPUClass, (obj), TYPE_ARM_HOST_CPU)
+
+typedef struct ARMHostCPUClass {
+ /*< private >*/
+ ARMCPUClass parent_class;
+ /*< public >*/
+
+ uint64_t features;
+ uint32_t target;
+ const char *dtb_compatible;
+} ARMHostCPUClass;
+
+/**
+ * kvm_arm_get_host_cpu_features:
+ * @ahcc: ARMHostCPUClass to fill in
+ *
+ * Probe the capabilities of the host kernel's preferred CPU and fill
+ * in the ARMHostCPUClass struct accordingly.
+ */
+bool kvm_arm_get_host_cpu_features(ARMHostCPUClass *ahcc);
+
+
+/**
+ * kvm_arm_sync_mpstate_to_kvm
+ * @cpu: ARMCPU
+ *
+ * If supported set the KVM MP_STATE based on QEMU's model.
+ */
+int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu);
+
+/**
+ * kvm_arm_sync_mpstate_to_qemu
+ * @cpu: ARMCPU
+ *
+ * If supported get the MP_STATE from KVM and store in QEMU's model.
+ */
+int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
+
+int kvm_arm_vgic_probe(void);
+
+int kvm_arm_pmu_create(CPUState *cs, int irq);
+
+#else
+
+static inline int kvm_arm_vgic_probe(void)
+{
+ return 0;
+}
+
+static inline int kvm_arm_pmu_create(CPUState *cs, int irq)
+{
+ return 0;
+}
+
+#endif
+
+static inline const char *gic_class_name(void)
+{
+ return kvm_irqchip_in_kernel() ? "kvm-arm-gic" : "arm_gic";
+}
+
+/**
+ * gicv3_class_name
+ *
+ * Return name of GICv3 class to use depending on whether KVM acceleration is
+ * in use. May throw an error if the chosen implementation is not available.
+ *
+ * Returns: class name to use
+ */
+static inline const char *gicv3_class_name(void)
+{
+ if (kvm_irqchip_in_kernel()) {
+#ifdef TARGET_AARCH64
+ return "kvm-arm-gicv3";
+#else
+ error_report("KVM GICv3 acceleration is not supported on this "
+ "platform");
+ exit(1);
+#endif
+ } else {
+ return "arm-gicv3";
+ }
+}
+
+/**
+ * kvm_arm_handle_debug:
+ * @cs: CPUState
+ * @debug_exit: debug part of the KVM exit structure
+ *
+ * Returns: TRUE if the debug exception was handled.
+ */
+bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit);
+
+/**
+ * kvm_arm_hw_debug_active:
+ * @cs: CPU State
+ *
+ * Return: TRUE if any hardware breakpoints in use.
+ */
+
+bool kvm_arm_hw_debug_active(CPUState *cs);
+
+/**
+ * kvm_arm_copy_hw_debug_data:
+ *
+ * @ptr: kvm_guest_debug_arch structure
+ *
+ * Copy the architecture specific debug registers into the
+ * kvm_guest_debug ioctl structure.
+ */
+struct kvm_guest_debug_arch;
+
+void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);
+
+/**
+ * its_class_name
+ *
+ * Return the ITS class name to use depending on whether KVM acceleration
+ * and KVM CAP_SIGNAL_MSI are supported
+ *
+ * Returns: class name to use or NULL
+ */
+static inline const char *its_class_name(void)
+{
+ if (kvm_irqchip_in_kernel()) {
+ /* KVM implementation requires this capability */
+ return kvm_direct_msi_enabled() ? "arm-its-kvm" : NULL;
+ } else {
+ /* Software emulation is not implemented yet */
+ return NULL;
+ }
+}
+
+#endif
diff --git a/target/arm/machine.c b/target/arm/machine.c
new file mode 100644
index 0000000000..d90943b6db
--- /dev/null
+++ b/target/arm/machine.c
@@ -0,0 +1,333 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "kvm_arm.h"
+#include "internals.h"
+#include "migration/cpu.h"
+
+static bool vfp_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_VFP);
+}
+
+static int get_fpscr(QEMUFile *f, void *opaque, size_t size)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint32_t val = qemu_get_be32(f);
+
+ vfp_set_fpscr(env, val);
+ return 0;
+}
+
+static void put_fpscr(QEMUFile *f, void *opaque, size_t size)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ qemu_put_be32(f, vfp_get_fpscr(env));
+}
+
+static const VMStateInfo vmstate_fpscr = {
+ .name = "fpscr",
+ .get = get_fpscr,
+ .put = put_fpscr,
+};
+
+static const VMStateDescription vmstate_vfp = {
+ .name = "cpu/vfp",
+ .version_id = 3,
+ .minimum_version_id = 3,
+ .needed = vfp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_FLOAT64_ARRAY(env.vfp.regs, ARMCPU, 64),
+ /* The xregs array is a little awkward because element 1 (FPSCR)
+ * requires a specific accessor, so we have to split it up in
+ * the vmstate:
+ */
+ VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
+ VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
+ {
+ .name = "fpscr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_fpscr,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool iwmmxt_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_IWMMXT);
+}
+
+static const VMStateDescription vmstate_iwmmxt = {
+ .name = "cpu/iwmmxt",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = iwmmxt_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
+ VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool m_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_M);
+}
+
+static const VMStateDescription vmstate_m = {
+ .name = "cpu/m",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = m_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
+ VMSTATE_UINT32(env.v7m.vecbase, ARMCPU),
+ VMSTATE_UINT32(env.v7m.basepri, ARMCPU),
+ VMSTATE_UINT32(env.v7m.control, ARMCPU),
+ VMSTATE_INT32(env.v7m.current_sp, ARMCPU),
+ VMSTATE_INT32(env.v7m.exception, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool thumb2ee_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_THUMB2EE);
+}
+
+static const VMStateDescription vmstate_thumb2ee = {
+ .name = "cpu/thumb2ee",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = thumb2ee_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(env.teecr, ARMCPU),
+ VMSTATE_UINT32(env.teehbr, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pmsav7_needed(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+
+ return arm_feature(env, ARM_FEATURE_MPU) &&
+ arm_feature(env, ARM_FEATURE_V7);
+}
+
+static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+
+ return cpu->env.cp15.c6_rgnr < cpu->pmsav7_dregion;
+}
+
+static const VMStateDescription vmstate_pmsav7 = {
+ .name = "cpu/pmsav7",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmsav7_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
+ vmstate_info_uint32, uint32_t),
+ VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint32_t val = qemu_get_be32(f);
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ return 0;
+ }
+
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
+ return 0;
+}
+
+static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
+{
+ ARMCPU *cpu = opaque;
+ CPUARMState *env = &cpu->env;
+ uint32_t val;
+
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
+
+ qemu_put_be32(f, val);
+}
+
+static const VMStateInfo vmstate_cpsr = {
+ .name = "cpsr",
+ .get = get_cpsr,
+ .put = put_cpsr,
+};
+
+static void cpu_pre_save(void *opaque)
+{
+ ARMCPU *cpu = opaque;
+
+ if (kvm_enabled()) {
+ if (!write_kvmstate_to_list(cpu)) {
+ /* This should never fail */
+ abort();
+ }
+ } else {
+ if (!write_cpustate_to_list(cpu)) {
+ /* This should never fail. */
+ abort();
+ }
+ }
+
+ cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
+ memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
+ cpu->cpreg_array_len * sizeof(uint64_t));
+ memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
+ cpu->cpreg_array_len * sizeof(uint64_t));
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ ARMCPU *cpu = opaque;
+ int i, v;
+
+ /* Update the values list from the incoming migration data.
+ * Anything in the incoming data which we don't know about is
+ * a migration failure; anything we know about but the incoming
+ * data doesn't specify retains its current (reset) value.
+ * The indexes list remains untouched -- we only inspect the
+ * incoming migration index list so we can match the values array
+ * entries with the right slots in our own values array.
+ */
+
+ for (i = 0, v = 0; i < cpu->cpreg_array_len
+ && v < cpu->cpreg_vmstate_array_len; i++) {
+ if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
+ /* register in our list but not incoming : skip it */
+ continue;
+ }
+ if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
+ /* register in their list but not ours: fail migration */
+ return -1;
+ }
+ /* matching register, copy the value over */
+ cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
+ v++;
+ }
+
+ if (kvm_enabled()) {
+ if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ return -1;
+ }
+ /* Note that it's OK for the TCG side not to know about
+ * every register in the list; KVM is authoritative if
+ * we're using it.
+ */
+ write_list_to_cpustate(cpu);
+ } else {
+ if (!write_list_to_cpustate(cpu)) {
+ return -1;
+ }
+ }
+
+ hw_breakpoint_update_all(cpu);
+ hw_watchpoint_update_all(cpu);
+
+ return 0;
+}
+
+const VMStateDescription vmstate_arm_cpu = {
+ .name = "cpu",
+ .version_id = 22,
+ .minimum_version_id = 22,
+ .pre_save = cpu_pre_save,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
+ VMSTATE_UINT64(env.pc, ARMCPU),
+ {
+ .name = "cpsr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_cpsr,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_UINT32(env.spsr, ARMCPU),
+ VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
+ VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
+ VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
+ VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
+ VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
+ /* The length-check must come before the arrays to avoid
+ * incoming data possibly overflowing the array.
+ */
+ VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
+ VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
+ cpreg_vmstate_array_len,
+ 0, vmstate_info_uint64, uint64_t),
+ VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
+ cpreg_vmstate_array_len,
+ 0, vmstate_info_uint64, uint64_t),
+ VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_val, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_high, ARMCPU),
+ VMSTATE_UINT64(env.features, ARMCPU),
+ VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
+ VMSTATE_UINT32(env.exception.fsr, ARMCPU),
+ VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
+ VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
+ VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
+ VMSTATE_BOOL(powered_off, ARMCPU),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vfp,
+ &vmstate_iwmmxt,
+ &vmstate_m,
+ &vmstate_thumb2ee,
+ &vmstate_pmsav7,
+ NULL
+ }
+};
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
new file mode 100644
index 0000000000..299cb80ae7
--- /dev/null
+++ b/target/arm/monitor.c
@@ -0,0 +1,83 @@
+/*
+ * QEMU monitor.c for ARM.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "qmp-commands.h"
+#include "hw/boards.h"
+#include "kvm_arm.h"
+
+static GICCapability *gic_cap_new(int version)
+{
+ GICCapability *cap = g_new0(GICCapability, 1);
+ cap->version = version;
+ /* by default, support none */
+ cap->emulated = false;
+ cap->kernel = false;
+ return cap;
+}
+
+static GICCapabilityList *gic_cap_list_add(GICCapabilityList *head,
+ GICCapability *cap)
+{
+ GICCapabilityList *item = g_new0(GICCapabilityList, 1);
+ item->value = cap;
+ item->next = head;
+ return item;
+}
+
+static inline void gic_cap_kvm_probe(GICCapability *v2, GICCapability *v3)
+{
+#ifdef CONFIG_KVM
+ int fdarray[3];
+
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, NULL)) {
+ return;
+ }
+
+ /* Test KVM GICv2 */
+ if (kvm_device_supported(fdarray[1], KVM_DEV_TYPE_ARM_VGIC_V2)) {
+ v2->kernel = true;
+ }
+
+ /* Test KVM GICv3 */
+ if (kvm_device_supported(fdarray[1], KVM_DEV_TYPE_ARM_VGIC_V3)) {
+ v3->kernel = true;
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+#endif
+}
+
+GICCapabilityList *qmp_query_gic_capabilities(Error **errp)
+{
+ GICCapabilityList *head = NULL;
+ GICCapability *v2 = gic_cap_new(2), *v3 = gic_cap_new(3);
+
+ v2->emulated = true;
+ v3->emulated = true;
+
+ gic_cap_kvm_probe(v2, v3);
+
+ head = gic_cap_list_add(head, v2);
+ head = gic_cap_list_add(head, v3);
+
+ return head;
+}
diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c
new file mode 100644
index 0000000000..ebdf7c9b10
--- /dev/null
+++ b/target/arm/neon_helper.c
@@ -0,0 +1,2242 @@
+/*
+ * ARM NEON vector operations.
+ *
+ * Copyright (c) 2007, 2008 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GNU GPL v2.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+#define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
+
+#define NEON_TYPE1(name, type) \
+typedef struct \
+{ \
+ type v1; \
+} neon_##name;
+#ifdef HOST_WORDS_BIGENDIAN
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+ type v2; \
+ type v1; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+ type v4; \
+ type v3; \
+ type v2; \
+ type v1; \
+} neon_##name;
+#else
+#define NEON_TYPE2(name, type) \
+typedef struct \
+{ \
+ type v1; \
+ type v2; \
+} neon_##name;
+#define NEON_TYPE4(name, type) \
+typedef struct \
+{ \
+ type v1; \
+ type v2; \
+ type v3; \
+ type v4; \
+} neon_##name;
+#endif
+
+NEON_TYPE4(s8, int8_t)
+NEON_TYPE4(u8, uint8_t)
+NEON_TYPE2(s16, int16_t)
+NEON_TYPE2(u16, uint16_t)
+NEON_TYPE1(s32, int32_t)
+NEON_TYPE1(u32, uint32_t)
+#undef NEON_TYPE4
+#undef NEON_TYPE2
+#undef NEON_TYPE1
+
+/* Copy from a uint32_t to a vector structure type. */
+#define NEON_UNPACK(vtype, dest, val) do { \
+ union { \
+ vtype v; \
+ uint32_t i; \
+ } conv_u; \
+ conv_u.i = (val); \
+ dest = conv_u.v; \
+ } while(0)
+
+/* Copy from a vector structure type to a uint32_t. */
+#define NEON_PACK(vtype, dest, val) do { \
+ union { \
+ vtype v; \
+ uint32_t i; \
+ } conv_u; \
+ conv_u.v = (val); \
+ dest = conv_u.i; \
+ } while(0)
+
+#define NEON_DO1 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
+#define NEON_DO2 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
+#define NEON_DO4 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
+ NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
+ NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
+ NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
+
+#define NEON_VOP_BODY(vtype, n) \
+{ \
+ uint32_t res; \
+ vtype vsrc1; \
+ vtype vsrc2; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg1); \
+ NEON_UNPACK(vtype, vsrc2, arg2); \
+ NEON_DO##n; \
+ NEON_PACK(vtype, res, vdest); \
+ return res; \
+}
+
+#define NEON_VOP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+#define NEON_VOP_ENV(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
+NEON_VOP_BODY(vtype, n)
+
+/* Pairwise operations. */
+/* For 32-bit elements each segment only contains a single element, so
+ the elementwise and pairwise operations are the same. */
+#define NEON_PDO2 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+ NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
+#define NEON_PDO4 \
+ NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
+ NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
+ NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
+ NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
+
+#define NEON_POP(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
+{ \
+ uint32_t res; \
+ vtype vsrc1; \
+ vtype vsrc2; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg1); \
+ NEON_UNPACK(vtype, vsrc2, arg2); \
+ NEON_PDO##n; \
+ NEON_PACK(vtype, res, vdest); \
+ return res; \
+}
+
+/* Unary operators. */
+#define NEON_VOP1(name, vtype, n) \
+uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
+{ \
+ vtype vsrc1; \
+ vtype vdest; \
+ NEON_UNPACK(vtype, vsrc1, arg); \
+ NEON_DO##n; \
+ NEON_PACK(vtype, arg, vdest); \
+ return arg; \
+}
+
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+ uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = tmp; \
+ }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qadd_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qadd_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (res < a) {
+ SET_QC();
+ res = ~0;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 + src2;
+ if (res < src1) {
+ SET_QC();
+ res = ~(uint64_t)0;
+ }
+ return res;
+}
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+ int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ if (src2 > 0) { \
+ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+ } else { \
+ tmp = 1 << (sizeof(type) * 8 - 1); \
+ } \
+ } \
+ dest = tmp; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qadd_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qadd_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
+ SET_QC();
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 + src2;
+ if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
+ SET_QC();
+ res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+ }
+ return res;
+}
+
+/* Unsigned saturating accumulate of signed value
+ *
+ * Op1/Rn is treated as signed
+ * Op2/Rd is treated as unsigned
+ *
+ * Explicit casting is used to ensure the correct sign extension of
+ * inputs. The result is treated as a unsigned value and saturated as such.
+ *
+ * We use a macro for the 8/16 bit cases which expects signed integers of va,
+ * vb, and vr for interim calculation and an unsigned 32 bit result value r.
+ */
+
+#define USATACC(bits, shift) \
+ do { \
+ va = sextract32(a, shift, bits); \
+ vb = extract32(b, shift, bits); \
+ vr = va + vb; \
+ if (vr > UINT##bits##_MAX) { \
+ SET_QC(); \
+ vr = UINT##bits##_MAX; \
+ } else if (vr < 0) { \
+ SET_QC(); \
+ vr = 0; \
+ } \
+ r = deposit32(r, shift, bits, vr); \
+ } while (0)
+
+uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int16_t va, vb, vr;
+ uint32_t r = 0;
+
+ USATACC(8, 0);
+ USATACC(8, 8);
+ USATACC(8, 16);
+ USATACC(8, 24);
+ return r;
+}
+
+uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int32_t va, vb, vr;
+ uint64_t r = 0;
+
+ USATACC(16, 0);
+ USATACC(16, 16);
+ return r;
+}
+
+#undef USATACC
+
+uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int64_t va = (int32_t)a;
+ int64_t vb = (uint32_t)b;
+ int64_t vr = va + vb;
+ if (vr > UINT32_MAX) {
+ SET_QC();
+ vr = UINT32_MAX;
+ } else if (vr < 0) {
+ SET_QC();
+ vr = 0;
+ }
+ return vr;
+}
+
+uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t res;
+ res = a + b;
+ /* We only need to look at the pattern of SIGN bits to detect
+ * +ve/-ve saturation
+ */
+ if (~a & b & ~res & SIGNBIT64) {
+ SET_QC();
+ res = UINT64_MAX;
+ } else if (a & ~b & res & SIGNBIT64) {
+ SET_QC();
+ res = 0;
+ }
+ return res;
+}
+
+/* Signed saturating accumulate of unsigned value
+ *
+ * Op1/Rn is treated as unsigned
+ * Op2/Rd is treated as signed
+ *
+ * The result is treated as a signed value and saturated as such
+ *
+ * We use a macro for the 8/16 bit cases which expects signed integers of va,
+ * vb, and vr for interim calculation and an unsigned 32 bit result value r.
+ */
+
+#define SSATACC(bits, shift) \
+ do { \
+ va = extract32(a, shift, bits); \
+ vb = sextract32(b, shift, bits); \
+ vr = va + vb; \
+ if (vr > INT##bits##_MAX) { \
+ SET_QC(); \
+ vr = INT##bits##_MAX; \
+ } else if (vr < INT##bits##_MIN) { \
+ SET_QC(); \
+ vr = INT##bits##_MIN; \
+ } \
+ r = deposit32(r, shift, bits, vr); \
+ } while (0)
+
+uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int16_t va, vb, vr;
+ uint32_t r = 0;
+
+ SSATACC(8, 0);
+ SSATACC(8, 8);
+ SSATACC(8, 16);
+ SSATACC(8, 24);
+ return r;
+}
+
+uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int32_t va, vb, vr;
+ uint32_t r = 0;
+
+ SSATACC(16, 0);
+ SSATACC(16, 16);
+
+ return r;
+}
+
+#undef SSATACC
+
+uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ int64_t res;
+ int64_t op1 = (uint32_t)a;
+ int64_t op2 = (int32_t)b;
+ res = op1 + op2;
+ if (res > INT32_MAX) {
+ SET_QC();
+ res = INT32_MAX;
+ } else if (res < INT32_MIN) {
+ SET_QC();
+ res = INT32_MIN;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t res;
+ res = a + b;
+ /* We only need to look at the pattern of SIGN bits to detect an overflow */
+ if (((a & res)
+ | (~b & res)
+ | (a & ~b)) & SIGNBIT64) {
+ SET_QC();
+ res = INT64_MAX;
+ }
+ return res;
+}
+
+
+#define NEON_USAT(dest, src1, src2, type) do { \
+ uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ dest = 0; \
+ } else { \
+ dest = tmp; \
+ }} while(0)
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
+NEON_VOP_ENV(qsub_u8, neon_u8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
+NEON_VOP_ENV(qsub_u16, neon_u16, 2)
+#undef NEON_FN
+#undef NEON_USAT
+
+uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (res > a) {
+ SET_QC();
+ res = 0;
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ if (src1 < src2) {
+ SET_QC();
+ res = 0;
+ } else {
+ res = src1 - src2;
+ }
+ return res;
+}
+
+#define NEON_SSAT(dest, src1, src2, type) do { \
+ int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
+ if (tmp != (type)tmp) { \
+ SET_QC(); \
+ if (src2 < 0) { \
+ tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
+ } else { \
+ tmp = 1 << (sizeof(type) * 8 - 1); \
+ } \
+ } \
+ dest = tmp; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
+NEON_VOP_ENV(qsub_s8, neon_s8, 4)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
+NEON_VOP_ENV(qsub_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_SSAT
+
+uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
+ SET_QC();
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
+{
+ uint64_t res;
+
+ res = src1 - src2;
+ if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
+ SET_QC();
+ res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
+ }
+ return res;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
+NEON_VOP(hadd_s8, neon_s8, 4)
+NEON_VOP(hadd_u8, neon_u8, 4)
+NEON_VOP(hadd_s16, neon_s16, 2)
+NEON_VOP(hadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if (src1 & src2 & 1)
+ dest++;
+ return dest;
+}
+
+uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if (src1 & src2 & 1)
+ dest++;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
+NEON_VOP(rhadd_s8, neon_s8, 4)
+NEON_VOP(rhadd_u8, neon_u8, 4)
+NEON_VOP(rhadd_s16, neon_s16, 2)
+NEON_VOP(rhadd_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if ((src1 | src2) & 1)
+ dest++;
+ return dest;
+}
+
+uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) + (src2 >> 1);
+ if ((src1 | src2) & 1)
+ dest++;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
+NEON_VOP(hsub_s8, neon_s8, 4)
+NEON_VOP(hsub_u8, neon_u8, 4)
+NEON_VOP(hsub_s16, neon_s16, 2)
+NEON_VOP(hsub_u16, neon_u16, 2)
+#undef NEON_FN
+
+int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
+{
+ int32_t dest;
+
+ dest = (src1 >> 1) - (src2 >> 1);
+ if ((~src1) & src2 & 1)
+ dest--;
+ return dest;
+}
+
+uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
+{
+ uint32_t dest;
+
+ dest = (src1 >> 1) - (src2 >> 1);
+ if ((~src1) & src2 & 1)
+ dest--;
+ return dest;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
+NEON_VOP(cgt_s8, neon_s8, 4)
+NEON_VOP(cgt_u8, neon_u8, 4)
+NEON_VOP(cgt_s16, neon_s16, 2)
+NEON_VOP(cgt_u16, neon_u16, 2)
+NEON_VOP(cgt_s32, neon_s32, 1)
+NEON_VOP(cgt_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
+NEON_VOP(cge_s8, neon_s8, 4)
+NEON_VOP(cge_u8, neon_u8, 4)
+NEON_VOP(cge_s16, neon_s16, 2)
+NEON_VOP(cge_u16, neon_u16, 2)
+NEON_VOP(cge_s32, neon_s32, 1)
+NEON_VOP(cge_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
+NEON_VOP(min_s8, neon_s8, 4)
+NEON_VOP(min_u8, neon_u8, 4)
+NEON_VOP(min_s16, neon_s16, 2)
+NEON_VOP(min_u16, neon_u16, 2)
+NEON_VOP(min_s32, neon_s32, 1)
+NEON_VOP(min_u32, neon_u32, 1)
+NEON_POP(pmin_s8, neon_s8, 4)
+NEON_POP(pmin_u8, neon_u8, 4)
+NEON_POP(pmin_s16, neon_s16, 2)
+NEON_POP(pmin_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
+NEON_VOP(max_s8, neon_s8, 4)
+NEON_VOP(max_u8, neon_u8, 4)
+NEON_VOP(max_s16, neon_s16, 2)
+NEON_VOP(max_u16, neon_u16, 2)
+NEON_VOP(max_s32, neon_s32, 1)
+NEON_VOP(max_u32, neon_u32, 1)
+NEON_POP(pmax_s8, neon_s8, 4)
+NEON_POP(pmax_u8, neon_u8, 4)
+NEON_POP(pmax_s16, neon_s16, 2)
+NEON_POP(pmax_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) \
+ dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
+NEON_VOP(abd_s8, neon_s8, 4)
+NEON_VOP(abd_u8, neon_u8, 4)
+NEON_VOP(abd_s16, neon_s16, 2)
+NEON_VOP(abd_u16, neon_u16, 2)
+NEON_VOP(abd_s32, neon_s32, 1)
+NEON_VOP(abd_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8 || \
+ tmp <= -(ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(shl_u8, neon_u8, 4)
+NEON_VOP(shl_u16, neon_u16, 2)
+NEON_VOP(shl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 64 || shift <= -64) {
+ val = 0;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
+ dest = src1 >> (sizeof(src1) * 8 - 1); \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(shl_s8, neon_s8, 4)
+NEON_VOP(shl_s16, neon_s16, 2)
+NEON_VOP(shl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ int64_t val = valop;
+ if (shift >= 64) {
+ val = 0;
+ } else if (shift <= -64) {
+ val >>= 63;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if ((tmp >= (ssize_t)sizeof(src1) * 8) \
+ || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(rshl_s8, neon_s8, 4)
+NEON_VOP(rshl_s16, neon_s16, 2)
+#undef NEON_FN
+
+/* The addition of the rounding constant may overflow, so we use an
+ * intermediate 64 bit accumulator. */
+uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
+{
+ int32_t dest;
+ int32_t val = (int32_t)valop;
+ int8_t shift = (int8_t)shiftop;
+ if ((shift >= 32) || (shift <= -32)) {
+ dest = 0;
+ } else if (shift < 0) {
+ int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
+ dest = big_dest >> -shift;
+ } else {
+ dest = val << shift;
+ }
+ return dest;
+}
+
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
+uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ int64_t val = valop;
+ if ((shift >= 64) || (shift <= -64)) {
+ val = 0;
+ } else if (shift < 0) {
+ val >>= (-shift - 1);
+ if (val == INT64_MAX) {
+ /* In this case, it means that the rounding constant is 1,
+ * and the addition would overflow. Return the actual
+ * result directly. */
+ val = 0x4000000000000000LL;
+ } else {
+ val++;
+ val >>= 1;
+ }
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8 || \
+ tmp < -(ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
+ dest = src1 >> (-tmp - 1); \
+ } else if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ }} while (0)
+NEON_VOP(rshl_u8, neon_u8, 4)
+NEON_VOP(rshl_u16, neon_u16, 2)
+#undef NEON_FN
+
+/* The addition of the rounding constant may overflow, so we use an
+ * intermediate 64 bit accumulator. */
+uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
+{
+ uint32_t dest;
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 32 || shift < -32) {
+ dest = 0;
+ } else if (shift == -32) {
+ dest = val >> 31;
+ } else if (shift < 0) {
+ uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
+ dest = big_dest >> -shift;
+ } else {
+ dest = val << shift;
+ }
+ return dest;
+}
+
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
+uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (uint8_t)shiftop;
+ if (shift >= 64 || shift < -64) {
+ val = 0;
+ } else if (shift == -64) {
+ /* Rounding a 1-bit result just preserves that bit. */
+ val >>= 63;
+ } else if (shift < 0) {
+ val >>= (-shift - 1);
+ if (val == UINT64_MAX) {
+ /* In this case, it means that the rounding constant is 1,
+ * and the addition would overflow. Return the actual
+ * result directly. */
+ val = 0x8000000000000000ULL;
+ } else {
+ val++;
+ val >>= 1;
+ }
+ } else {
+ val <<= shift;
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8) { \
+ if (src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = 0; \
+ } \
+ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qshl_u16, neon_u16, 2)
+NEON_VOP_ENV(qshl_u32, neon_u32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 64) {
+ if (val) {
+ val = ~(uint64_t)0;
+ SET_QC();
+ }
+ } else if (shift <= -64) {
+ val = 0;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ uint64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = ~(uint64_t)0;
+ }
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8) { \
+ if (src1) { \
+ SET_QC(); \
+ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
+ if (src1 > 0) { \
+ dest--; \
+ } \
+ } else { \
+ dest = src1; \
+ } \
+ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
+ dest = src1 >> 31; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
+ if (src1 > 0) { \
+ dest--; \
+ } \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qshl_s16, neon_s16, 2)
+NEON_VOP_ENV(qshl_s32, neon_s32, 1)
+#undef NEON_FN
+
+uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (uint8_t)shiftop;
+ int64_t val = valop;
+ if (shift >= 64) {
+ if (val) {
+ SET_QC();
+ val = (val >> 63) ^ ~SIGNBIT64;
+ }
+ } else if (shift <= -64) {
+ val >>= 63;
+ } else if (shift < 0) {
+ val >>= -shift;
+ } else {
+ int64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = (tmp >> 63) ^ ~SIGNBIT64;
+ }
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
+ SET_QC(); \
+ dest = 0; \
+ } else { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8) { \
+ if (src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = 0; \
+ } \
+ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = src1 >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
+NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
+{
+ if ((int32_t)valop < 0) {
+ SET_QC();
+ return 0;
+ }
+ return helper_neon_qshl_u32(env, valop, shiftop);
+}
+
+uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
+{
+ if ((int64_t)valop < 0) {
+ SET_QC();
+ return 0;
+ }
+ return helper_neon_qshl_u64(env, valop, shiftop);
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8) { \
+ if (src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } else { \
+ dest = 0; \
+ } \
+ } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
+ dest = src1 >> (sizeof(src1) * 8 - 1); \
+ } else if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = ~0; \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
+NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
+#undef NEON_FN
+
+/* The addition of the rounding constant may overflow, so we use an
+ * intermediate 64 bit accumulator. */
+uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
+{
+ uint32_t dest;
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 32) {
+ if (val) {
+ SET_QC();
+ dest = ~0;
+ } else {
+ dest = 0;
+ }
+ } else if (shift < -32) {
+ dest = 0;
+ } else if (shift == -32) {
+ dest = val >> 31;
+ } else if (shift < 0) {
+ uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
+ dest = big_dest >> -shift;
+ } else {
+ dest = val << shift;
+ if ((dest >> shift) != val) {
+ SET_QC();
+ dest = ~0;
+ }
+ }
+ return dest;
+}
+
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
+uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
+{
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 64) {
+ if (val) {
+ SET_QC();
+ val = ~0;
+ }
+ } else if (shift < -64) {
+ val = 0;
+ } else if (shift == -64) {
+ val >>= 63;
+ } else if (shift < 0) {
+ val >>= (-shift - 1);
+ if (val == UINT64_MAX) {
+ /* In this case, it means that the rounding constant is 1,
+ * and the addition would overflow. Return the actual
+ * result directly. */
+ val = 0x8000000000000000ULL;
+ } else {
+ val++;
+ val >>= 1;
+ }
+ } else { \
+ uint64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = ~0;
+ }
+ }
+ return val;
+}
+
+#define NEON_FN(dest, src1, src2) do { \
+ int8_t tmp; \
+ tmp = (int8_t)src2; \
+ if (tmp >= (ssize_t)sizeof(src1) * 8) { \
+ if (src1) { \
+ SET_QC(); \
+ dest = (typeof(dest))(1 << (sizeof(src1) * 8 - 1)); \
+ if (src1 > 0) { \
+ dest--; \
+ } \
+ } else { \
+ dest = 0; \
+ } \
+ } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
+ dest = 0; \
+ } else if (tmp < 0) { \
+ dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
+ } else { \
+ dest = src1 << tmp; \
+ if ((dest >> tmp) != src1) { \
+ SET_QC(); \
+ dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
+ if (src1 > 0) { \
+ dest--; \
+ } \
+ } \
+ }} while (0)
+NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
+NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
+#undef NEON_FN
+
+/* The addition of the rounding constant may overflow, so we use an
+ * intermediate 64 bit accumulator. */
+uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
+{
+ int32_t dest;
+ int32_t val = (int32_t)valop;
+ int8_t shift = (int8_t)shiftop;
+ if (shift >= 32) {
+ if (val) {
+ SET_QC();
+ dest = (val >> 31) ^ ~SIGNBIT;
+ } else {
+ dest = 0;
+ }
+ } else if (shift <= -32) {
+ dest = 0;
+ } else if (shift < 0) {
+ int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
+ dest = big_dest >> -shift;
+ } else {
+ dest = val << shift;
+ if ((dest >> shift) != val) {
+ SET_QC();
+ dest = (val >> 31) ^ ~SIGNBIT;
+ }
+ }
+ return dest;
+}
+
+/* Handling addition overflow with 64 bit input values is more
+ * tricky than with 32 bit values. */
+uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
+{
+ int8_t shift = (uint8_t)shiftop;
+ int64_t val = valop;
+
+ if (shift >= 64) {
+ if (val) {
+ SET_QC();
+ val = (val >> 63) ^ ~SIGNBIT64;
+ }
+ } else if (shift <= -64) {
+ val = 0;
+ } else if (shift < 0) {
+ val >>= (-shift - 1);
+ if (val == INT64_MAX) {
+ /* In this case, it means that the rounding constant is 1,
+ * and the addition would overflow. Return the actual
+ * result directly. */
+ val = 0x4000000000000000ULL;
+ } else {
+ val++;
+ val >>= 1;
+ }
+ } else {
+ int64_t tmp = val;
+ val <<= shift;
+ if ((val >> shift) != tmp) {
+ SET_QC();
+ val = (tmp >> 63) ^ ~SIGNBIT64;
+ }
+ }
+ return val;
+}
+
+uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+ mask = (a ^ b) & 0x80808080u;
+ a &= ~0x80808080u;
+ b &= ~0x80808080u;
+ return (a + b) ^ mask;
+}
+
+uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
+{
+ uint32_t mask;
+ mask = (a ^ b) & 0x80008000u;
+ a &= ~0x80008000u;
+ b &= ~0x80008000u;
+ return (a + b) ^ mask;
+}
+
+#define NEON_FN(dest, src1, src2) dest = src1 + src2
+NEON_POP(padd_u8, neon_u8, 4)
+NEON_POP(padd_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 - src2
+NEON_VOP(sub_u8, neon_u8, 4)
+NEON_VOP(sub_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = src1 * src2
+NEON_VOP(mul_u8, neon_u8, 4)
+NEON_VOP(mul_u16, neon_u16, 2)
+#undef NEON_FN
+
+/* Polynomial multiplication is like integer multiplication except the
+ partial products are XORed, not added. */
+uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
+{
+ uint32_t mask;
+ uint32_t result;
+ result = 0;
+ while (op1) {
+ mask = 0;
+ if (op1 & 1)
+ mask |= 0xff;
+ if (op1 & (1 << 8))
+ mask |= (0xff << 8);
+ if (op1 & (1 << 16))
+ mask |= (0xff << 16);
+ if (op1 & (1 << 24))
+ mask |= (0xff << 24);
+ result ^= op2 & mask;
+ op1 = (op1 >> 1) & 0x7f7f7f7f;
+ op2 = (op2 << 1) & 0xfefefefe;
+ }
+ return result;
+}
+
+uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
+{
+ uint64_t result = 0;
+ uint64_t mask;
+ uint64_t op2ex = op2;
+ op2ex = (op2ex & 0xff) |
+ ((op2ex & 0xff00) << 8) |
+ ((op2ex & 0xff0000) << 16) |
+ ((op2ex & 0xff000000) << 24);
+ while (op1) {
+ mask = 0;
+ if (op1 & 1) {
+ mask |= 0xffff;
+ }
+ if (op1 & (1 << 8)) {
+ mask |= (0xffffU << 16);
+ }
+ if (op1 & (1 << 16)) {
+ mask |= (0xffffULL << 32);
+ }
+ if (op1 & (1 << 24)) {
+ mask |= (0xffffULL << 48);
+ }
+ result ^= op2ex & mask;
+ op1 = (op1 >> 1) & 0x7f7f7f7f;
+ op2ex <<= 1;
+ }
+ return result;
+}
+
+#define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
+NEON_VOP(tst_u8, neon_u8, 4)
+NEON_VOP(tst_u16, neon_u16, 2)
+NEON_VOP(tst_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
+NEON_VOP(ceq_u8, neon_u8, 4)
+NEON_VOP(ceq_u16, neon_u16, 2)
+NEON_VOP(ceq_u32, neon_u32, 1)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
+NEON_VOP1(abs_s8, neon_s8, 4)
+NEON_VOP1(abs_s16, neon_s16, 2)
+#undef NEON_FN
+
+/* Count Leading Sign/Zero Bits. */
+static inline int do_clz8(uint8_t x)
+{
+ int n;
+ for (n = 8; x; n--)
+ x >>= 1;
+ return n;
+}
+
+static inline int do_clz16(uint16_t x)
+{
+ int n;
+ for (n = 16; x; n--)
+ x >>= 1;
+ return n;
+}
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8(src)
+NEON_VOP1(clz_u8, neon_u8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16(src)
+NEON_VOP1(clz_u16, neon_u16, 2)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s8, neon_s8, 4)
+#undef NEON_FN
+
+#define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
+NEON_VOP1(cls_s16, neon_s16, 2)
+#undef NEON_FN
+
+uint32_t HELPER(neon_cls_s32)(uint32_t x)
+{
+ int count;
+ if ((int32_t)x < 0)
+ x = ~x;
+ for (count = 32; x; count--)
+ x = x >> 1;
+ return count - 1;
+}
+
+/* Bit count. */
+uint32_t HELPER(neon_cnt_u8)(uint32_t x)
+{
+ x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
+ return x;
+}
+
+/* Reverse bits in each 8 bit word */
+uint32_t HELPER(neon_rbit_u8)(uint32_t x)
+{
+ x = ((x & 0xf0f0f0f0) >> 4)
+ | ((x & 0x0f0f0f0f) << 4);
+ x = ((x & 0x88888888) >> 3)
+ | ((x & 0x44444444) >> 1)
+ | ((x & 0x22222222) << 1)
+ | ((x & 0x11111111) << 3);
+ return x;
+}
+
+#define NEON_QDMULH16(dest, src1, src2, round) do { \
+ uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
+ if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
+ SET_QC(); \
+ tmp = (tmp >> 31) ^ ~SIGNBIT; \
+ } else { \
+ tmp <<= 1; \
+ } \
+ if (round) { \
+ int32_t old = tmp; \
+ tmp += 1 << 15; \
+ if ((int32_t)tmp < old) { \
+ SET_QC(); \
+ tmp = SIGNBIT - 1; \
+ } \
+ } \
+ dest = tmp >> 16; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
+#undef NEON_FN
+#undef NEON_QDMULH16
+
+#define NEON_QDMULH32(dest, src1, src2, round) do { \
+ uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
+ if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
+ SET_QC(); \
+ tmp = (tmp >> 63) ^ ~SIGNBIT64; \
+ } else { \
+ tmp <<= 1; \
+ } \
+ if (round) { \
+ int64_t old = tmp; \
+ tmp += (int64_t)1 << 31; \
+ if ((int64_t)tmp < old) { \
+ SET_QC(); \
+ tmp = SIGNBIT64 - 1; \
+ } \
+ } \
+ dest = tmp >> 32; \
+ } while(0)
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
+NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
+NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
+#undef NEON_FN
+#undef NEON_QDMULH32
+
+uint32_t HELPER(neon_narrow_u8)(uint64_t x)
+{
+ return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
+ | ((x >> 24) & 0xff000000u);
+}
+
+uint32_t HELPER(neon_narrow_u16)(uint64_t x)
+{
+ return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
+}
+
+uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
+{
+ return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+ | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
+{
+ return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
+{
+ x &= 0xff80ff80ff80ff80ull;
+ x += 0x0080008000800080ull;
+ return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
+ | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
+}
+
+uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
+{
+ x &= 0xffff8000ffff8000ull;
+ x += 0x0000800000008000ull;
+ return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
+}
+
+uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s & 0x8000) { \
+ SET_QC(); \
+ } else { \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2); \
+ }
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2);
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
+{
+ int16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s != (int8_t)s) { \
+ d = (s >> 15) ^ 0x7f; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2);
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
+uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low & 0x80000000) {
+ low = 0;
+ SET_QC();
+ } else if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high & 0x80000000) {
+ high = 0;
+ SET_QC();
+ } else if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
+uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
+{
+ int32_t low;
+ int32_t high;
+ low = x;
+ if (low != (int16_t)low) {
+ low = (low >> 31) ^ 0x7fff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high != (int16_t)high) {
+ high = (high >> 31) ^ 0x7fff;
+ SET_QC();
+ }
+ return (uint16_t)low | (high << 16);
+}
+
+uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
+{
+ if (x & 0x8000000000000000ull) {
+ SET_QC();
+ return 0;
+ }
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
+{
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
+{
+ if ((int64_t)x != (int32_t)x) {
+ SET_QC();
+ return ((int64_t)x >> 63) ^ 0x7fffffff;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_widen_u8)(uint32_t x)
+{
+ uint64_t tmp;
+ uint64_t ret;
+ ret = (uint8_t)x;
+ tmp = (uint8_t)(x >> 8);
+ ret |= tmp << 16;
+ tmp = (uint8_t)(x >> 16);
+ ret |= tmp << 32;
+ tmp = (uint8_t)(x >> 24);
+ ret |= tmp << 48;
+ return ret;
+}
+
+uint64_t HELPER(neon_widen_s8)(uint32_t x)
+{
+ uint64_t tmp;
+ uint64_t ret;
+ ret = (uint16_t)(int8_t)x;
+ tmp = (uint16_t)(int8_t)(x >> 8);
+ ret |= tmp << 16;
+ tmp = (uint16_t)(int8_t)(x >> 16);
+ ret |= tmp << 32;
+ tmp = (uint16_t)(int8_t)(x >> 24);
+ ret |= tmp << 48;
+ return ret;
+}
+
+uint64_t HELPER(neon_widen_u16)(uint32_t x)
+{
+ uint64_t high = (uint16_t)(x >> 16);
+ return ((uint16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_widen_s16)(uint32_t x)
+{
+ uint64_t high = (int16_t)(x >> 16);
+ return ((uint32_t)(int16_t)x) | (high << 32);
+}
+
+uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ b) & 0x8000800080008000ull;
+ a &= ~0x8000800080008000ull;
+ b &= ~0x8000800080008000ull;
+ return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ b) & 0x8000000080000000ull;
+ a &= ~0x8000000080000000ull;
+ b &= ~0x8000000080000000ull;
+ return (a + b) ^ mask;
+}
+
+uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t tmp;
+ uint64_t tmp2;
+
+ tmp = a & 0x0000ffff0000ffffull;
+ tmp += (a >> 16) & 0x0000ffff0000ffffull;
+ tmp2 = b & 0xffff0000ffff0000ull;
+ tmp2 += (b << 16) & 0xffff0000ffff0000ull;
+ return ( tmp & 0xffff)
+ | ((tmp >> 16) & 0xffff0000ull)
+ | ((tmp2 << 16) & 0xffff00000000ull)
+ | ( tmp2 & 0xffff000000000000ull);
+}
+
+uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
+{
+ uint32_t low = a + (a >> 32);
+ uint32_t high = b + (b >> 32);
+ return low + ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ ~b) & 0x8000800080008000ull;
+ a |= 0x8000800080008000ull;
+ b &= ~0x8000800080008000ull;
+ return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
+{
+ uint64_t mask;
+ mask = (a ^ ~b) & 0x8000000080000000ull;
+ a |= 0x8000000080000000ull;
+ b &= ~0x8000000080000000ull;
+ return (a - b) ^ mask;
+}
+
+uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint32_t x, y;
+ uint32_t low, high;
+
+ x = a;
+ y = b;
+ low = x + y;
+ if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+ SET_QC();
+ low = ((int32_t)x >> 31) ^ ~SIGNBIT;
+ }
+ x = a >> 32;
+ y = b >> 32;
+ high = x + y;
+ if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
+ SET_QC();
+ high = ((int32_t)x >> 31) ^ ~SIGNBIT;
+ }
+ return low | ((uint64_t)high << 32);
+}
+
+uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b)
+{
+ uint64_t result;
+
+ result = a + b;
+ if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
+ SET_QC();
+ result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
+ }
+ return result;
+}
+
+/* We have to do the arithmetic in a larger type than
+ * the input type, because for example with a signed 32 bit
+ * op the absolute difference can overflow a signed 32 bit value.
+ */
+#define DO_ABD(dest, x, y, intype, arithtype) do { \
+ arithtype tmp_x = (intype)(x); \
+ arithtype tmp_y = (intype)(y); \
+ dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
+ } while(0)
+
+uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, uint8_t, uint32_t);
+ DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t);
+ result |= tmp << 16;
+ DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t);
+ result |= tmp << 32;
+ DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, int8_t, int32_t);
+ DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t);
+ result |= tmp << 16;
+ DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t);
+ result |= tmp << 32;
+ DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, uint16_t, uint32_t);
+ DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+ DO_ABD(result, a, b, int16_t, int32_t);
+ DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
+{
+ uint64_t result;
+ DO_ABD(result, a, b, uint32_t, uint64_t);
+ return result;
+}
+
+uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
+{
+ uint64_t result;
+ DO_ABD(result, a, b, int32_t, int64_t);
+ return result;
+}
+#undef DO_ABD
+
+/* Widening multiply. Named type is the source type. */
+#define DO_MULL(dest, x, y, type1, type2) do { \
+ type1 tmp_x = x; \
+ type1 tmp_y = y; \
+ dest = (type2)((type2)tmp_x * (type2)tmp_y); \
+ } while(0)
+
+uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, uint8_t, uint16_t);
+ DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
+ result |= tmp << 16;
+ DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
+ result |= tmp << 32;
+ DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, int8_t, uint16_t);
+ DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
+ result |= tmp << 16;
+ DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
+ result |= tmp << 32;
+ DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
+ result |= tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, uint16_t, uint32_t);
+ DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
+{
+ uint64_t tmp;
+ uint64_t result;
+
+ DO_MULL(result, a, b, int16_t, uint32_t);
+ DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
+ return result | (tmp << 32);
+}
+
+uint64_t HELPER(neon_negl_u16)(uint64_t x)
+{
+ uint16_t tmp;
+ uint64_t result;
+ result = (uint16_t)-x;
+ tmp = -(x >> 16);
+ result |= (uint64_t)tmp << 16;
+ tmp = -(x >> 32);
+ result |= (uint64_t)tmp << 32;
+ tmp = -(x >> 48);
+ result |= (uint64_t)tmp << 48;
+ return result;
+}
+
+uint64_t HELPER(neon_negl_u32)(uint64_t x)
+{
+ uint32_t low = -x;
+ uint32_t high = -(x >> 32);
+ return low | ((uint64_t)high << 32);
+}
+
+/* Saturating sign manipulation. */
+/* ??? Make these use NEON_VOP1 */
+#define DO_QABS8(x) do { \
+ if (x == (int8_t)0x80) { \
+ x = 0x7f; \
+ SET_QC(); \
+ } else if (x < 0) { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x)
+{
+ neon_s8 vec;
+ NEON_UNPACK(neon_s8, vec, x);
+ DO_QABS8(vec.v1);
+ DO_QABS8(vec.v2);
+ DO_QABS8(vec.v3);
+ DO_QABS8(vec.v4);
+ NEON_PACK(neon_s8, x, vec);
+ return x;
+}
+#undef DO_QABS8
+
+#define DO_QNEG8(x) do { \
+ if (x == (int8_t)0x80) { \
+ x = 0x7f; \
+ SET_QC(); \
+ } else { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x)
+{
+ neon_s8 vec;
+ NEON_UNPACK(neon_s8, vec, x);
+ DO_QNEG8(vec.v1);
+ DO_QNEG8(vec.v2);
+ DO_QNEG8(vec.v3);
+ DO_QNEG8(vec.v4);
+ NEON_PACK(neon_s8, x, vec);
+ return x;
+}
+#undef DO_QNEG8
+
+#define DO_QABS16(x) do { \
+ if (x == (int16_t)0x8000) { \
+ x = 0x7fff; \
+ SET_QC(); \
+ } else if (x < 0) { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x)
+{
+ neon_s16 vec;
+ NEON_UNPACK(neon_s16, vec, x);
+ DO_QABS16(vec.v1);
+ DO_QABS16(vec.v2);
+ NEON_PACK(neon_s16, x, vec);
+ return x;
+}
+#undef DO_QABS16
+
+#define DO_QNEG16(x) do { \
+ if (x == (int16_t)0x8000) { \
+ x = 0x7fff; \
+ SET_QC(); \
+ } else { \
+ x = -x; \
+ }} while (0)
+uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x)
+{
+ neon_s16 vec;
+ NEON_UNPACK(neon_s16, vec, x);
+ DO_QNEG16(vec.v1);
+ DO_QNEG16(vec.v2);
+ NEON_PACK(neon_s16, x, vec);
+ return x;
+}
+#undef DO_QNEG16
+
+uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x)
+{
+ if (x == SIGNBIT) {
+ SET_QC();
+ x = ~SIGNBIT;
+ } else if ((int32_t)x < 0) {
+ x = -x;
+ }
+ return x;
+}
+
+uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
+{
+ if (x == SIGNBIT) {
+ SET_QC();
+ x = ~SIGNBIT;
+ } else {
+ x = -x;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x)
+{
+ if (x == SIGNBIT64) {
+ SET_QC();
+ x = ~SIGNBIT64;
+ } else if ((int64_t)x < 0) {
+ x = -x;
+ }
+ return x;
+}
+
+uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
+{
+ if (x == SIGNBIT64) {
+ SET_QC();
+ x = ~SIGNBIT64;
+ } else {
+ x = -x;
+ }
+ return x;
+}
+
+/* NEON Float helpers. */
+uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f0 = make_float32(a);
+ float32 f1 = make_float32(b);
+ return float32_val(float32_abs(float32_sub(f0, f1, fpst)));
+}
+
+/* Floating point comparisons produce an integer result.
+ * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
+ * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
+ */
+uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
+}
+
+uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float32_le(make_float32(b), make_float32(a), fpst);
+}
+
+uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ return -float32_lt(make_float32(b), make_float32(a), fpst);
+}
+
+uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f0 = float32_abs(make_float32(a));
+ float32 f1 = float32_abs(make_float32(b));
+ return -float32_le(f1, f0, fpst);
+}
+
+uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float32 f0 = float32_abs(make_float32(a));
+ float32 f1 = float32_abs(make_float32(b));
+ return -float32_lt(f1, f0, fpst);
+}
+
+uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f0 = float64_abs(make_float64(a));
+ float64 f1 = float64_abs(make_float64(b));
+ return -float64_le(f1, f0, fpst);
+}
+
+uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp)
+{
+ float_status *fpst = fpstp;
+ float64 f0 = float64_abs(make_float64(a));
+ float64 f1 = float64_abs(make_float64(b));
+ return -float64_lt(f1, f0, fpst);
+}
+
+#define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
+
+void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+ uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+ uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+ uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+ uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
+ | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
+ | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
+ | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
+ uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
+ | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
+ | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
+ | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
+ uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
+ | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
+ | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
+ | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
+ uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
+ | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
+ | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
+ | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rm + 1] = make_float64(m1);
+ env->vfp.regs[rd] = make_float64(d0);
+ env->vfp.regs[rd + 1] = make_float64(d1);
+}
+
+void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+ uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+ uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+ uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+ uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
+ | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
+ uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
+ | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
+ uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
+ | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
+ uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
+ | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rm + 1] = make_float64(m1);
+ env->vfp.regs[rd] = make_float64(d0);
+ env->vfp.regs[rd + 1] = make_float64(d1);
+}
+
+void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+ uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+ uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+ uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+ uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
+ uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
+ uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
+ uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rm + 1] = make_float64(m1);
+ env->vfp.regs[rd] = make_float64(d0);
+ env->vfp.regs[rd + 1] = make_float64(d1);
+}
+
+void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm = float64_val(env->vfp.regs[rm]);
+ uint64_t zd = float64_val(env->vfp.regs[rd]);
+ uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
+ | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
+ | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
+ | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
+ uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
+ | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
+ | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
+ | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rd] = make_float64(d0);
+}
+
+void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm = float64_val(env->vfp.regs[rm]);
+ uint64_t zd = float64_val(env->vfp.regs[rd]);
+ uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
+ | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
+ uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
+ | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rd] = make_float64(d0);
+}
+
+void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+ uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+ uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+ uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+ uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
+ | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
+ | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
+ | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
+ uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
+ | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
+ | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
+ | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
+ uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
+ | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
+ | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
+ | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
+ uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
+ | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
+ | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
+ | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rm + 1] = make_float64(m1);
+ env->vfp.regs[rd] = make_float64(d0);
+ env->vfp.regs[rd + 1] = make_float64(d1);
+}
+
+void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+ uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+ uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+ uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+ uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
+ | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
+ uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
+ | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
+ uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
+ | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
+ uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
+ | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rm + 1] = make_float64(m1);
+ env->vfp.regs[rd] = make_float64(d0);
+ env->vfp.regs[rd + 1] = make_float64(d1);
+}
+
+void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm0 = float64_val(env->vfp.regs[rm]);
+ uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
+ uint64_t zd0 = float64_val(env->vfp.regs[rd]);
+ uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
+ uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
+ uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
+ uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
+ uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rm + 1] = make_float64(m1);
+ env->vfp.regs[rd] = make_float64(d0);
+ env->vfp.regs[rd + 1] = make_float64(d1);
+}
+
+void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm = float64_val(env->vfp.regs[rm]);
+ uint64_t zd = float64_val(env->vfp.regs[rd]);
+ uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
+ | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
+ | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
+ | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
+ uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
+ | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
+ | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
+ | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rd] = make_float64(d0);
+}
+
+void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
+{
+ uint64_t zm = float64_val(env->vfp.regs[rm]);
+ uint64_t zd = float64_val(env->vfp.regs[rd]);
+ uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
+ | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
+ uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
+ | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
+ env->vfp.regs[rm] = make_float64(m0);
+ env->vfp.regs[rd] = make_float64(d0);
+}
+
+/* Helper function for 64 bit polynomial multiply case:
+ * perform PolynomialMult(op1, op2) and return either the top or
+ * bottom half of the 128 bit result.
+ */
+uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
+{
+ int bitnum;
+ uint64_t res = 0;
+
+ for (bitnum = 0; bitnum < 64; bitnum++) {
+ if (op1 & (1ULL << bitnum)) {
+ res ^= op2 << bitnum;
+ }
+ }
+ return res;
+}
+uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
+{
+ int bitnum;
+ uint64_t res = 0;
+
+ /* bit 0 of op1 can't influence the high 64 bits at all */
+ for (bitnum = 1; bitnum < 64; bitnum++) {
+ if (op1 & (1ULL << bitnum)) {
+ res ^= op2 >> (64 - bitnum);
+ }
+ }
+ return res;
+}
diff --git a/target/arm/op_addsub.h b/target/arm/op_addsub.h
new file mode 100644
index 0000000000..ca4a1893c3
--- /dev/null
+++ b/target/arm/op_addsub.h
@@ -0,0 +1,103 @@
+/*
+ * ARMv6 integer SIMD operations.
+ *
+ * Copyright (c) 2007 CodeSourcery.
+ * Written by Paul Brook
+ *
+ * This code is licensed under the GPL.
+ */
+
+#ifdef ARITH_GE
+#define GE_ARG , void *gep
+#define DECLARE_GE uint32_t ge = 0
+#define SET_GE *(uint32_t *)gep = ge
+#else
+#define GE_ARG
+#define DECLARE_GE do{}while(0)
+#define SET_GE do{}while(0)
+#endif
+
+#define RESULT(val, n, width) \
+ res |= ((uint32_t)(glue(glue(uint,width),_t))(val)) << (n * width)
+
+uint32_t HELPER(glue(PFX,add16))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD16(a, b, 0);
+ ADD16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,add8))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD8(a, b, 0);
+ ADD8(a >> 8, b >> 8, 1);
+ ADD8(a >> 16, b >> 16, 2);
+ ADD8(a >> 24, b >> 24, 3);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,sub16))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB16(a, b, 0);
+ SUB16(a >> 16, b >> 16, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,sub8))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB8(a, b, 0);
+ SUB8(a >> 8, b >> 8, 1);
+ SUB8(a >> 16, b >> 16, 2);
+ SUB8(a >> 24, b >> 24, 3);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,subaddx))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ ADD16(a, b >> 16, 0);
+ SUB16(a >> 16, b, 1);
+ SET_GE;
+ return res;
+}
+
+uint32_t HELPER(glue(PFX,addsubx))(uint32_t a, uint32_t b GE_ARG)
+{
+ uint32_t res = 0;
+ DECLARE_GE;
+
+ SUB16(a, b >> 16, 0);
+ ADD16(a >> 16, b, 1);
+ SET_GE;
+ return res;
+}
+
+#undef GE_ARG
+#undef DECLARE_GE
+#undef SET_GE
+#undef RESULT
+
+#undef ARITH_GE
+#undef PFX
+#undef ADD16
+#undef SUB16
+#undef ADD8
+#undef SUB8
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
new file mode 100644
index 0000000000..cd94216591
--- /dev/null
+++ b/target/arm/op_helper.c
@@ -0,0 +1,1335 @@
+/*
+ * ARM helper routines
+ *
+ * Copyright (c) 2005-2007 CodeSourcery, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "internals.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+static void raise_exception(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ assert(!excp_is_internal(excp));
+ cs->exception_index = excp;
+ env->exception.syndrome = syndrome;
+ env->exception.target_el = target_el;
+ cpu_loop_exit(cs);
+}
+
+static int exception_target_el(CPUARMState *env)
+{
+ int target_el = MAX(1, arm_current_el(env));
+
+ /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
+ * to EL3 in this case.
+ */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
+ target_el = 3;
+ }
+
+ return target_el;
+}
+
+uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
+ uint32_t rn, uint32_t maxindex)
+{
+ uint32_t val;
+ uint32_t tmp;
+ int index;
+ int shift;
+ uint64_t *table;
+ table = (uint64_t *)&env->vfp.regs[rn];
+ val = 0;
+ for (shift = 0; shift < 32; shift += 8) {
+ index = (ireg >> shift) & 0xff;
+ if (index < maxindex) {
+ tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
+ val |= tmp << shift;
+ } else {
+ val |= def & (0xff << shift);
+ }
+ }
+ return val;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
+ unsigned int target_el,
+ bool same_el,
+ bool s1ptw, bool is_write,
+ int fsc)
+{
+ uint32_t syn;
+
+ /* ISV is only set for data aborts routed to EL2 and
+ * never for stage-1 page table walks faulting on stage 2.
+ *
+ * Furthermore, ISV is only set for certain kinds of load/stores.
+ * If the template syndrome does not have ISV set, we should leave
+ * it cleared.
+ *
+ * See ARMv8 specs, D7-1974:
+ * ISS encoding for an exception from a Data Abort, the
+ * ISV field.
+ */
+ if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
+ syn = syn_data_abort_no_iss(same_el,
+ 0, 0, s1ptw, is_write, fsc);
+ } else {
+ /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
+ * syndrome created at translation time.
+ * Now we create the runtime syndrome with the remaining fields.
+ */
+ syn = syn_data_abort_with_iss(same_el,
+ 0, 0, 0, 0, 0,
+ 0, 0, s1ptw, is_write, fsc,
+ false);
+ /* Merge the runtime syndrome with the template syndrome. */
+ syn |= template_syn;
+ }
+ return syn;
+}
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ bool ret;
+ uint32_t fsr = 0;
+ ARMMMUFaultInfo fi = {};
+
+ ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
+ if (unlikely(ret)) {
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t syn, exc;
+ unsigned int target_el;
+ bool same_el;
+
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+
+ target_el = exception_target_el(env);
+ if (fi.stage2) {
+ target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+ }
+ same_el = arm_current_el(env) == target_el;
+ /* AArch64 syndrome does not have an LPAE bit */
+ syn = fsr & ~(1 << 9);
+
+ /* For insn and data aborts we assume there is no instruction syndrome
+ * information; this is always true for exceptions reported to EL1.
+ */
+ if (access_type == MMU_INST_FETCH) {
+ syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
+ exc = EXCP_PREFETCH_ABORT;
+ } else {
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, fi.s1ptw,
+ access_type == MMU_DATA_STORE, syn);
+ if (access_type == MMU_DATA_STORE
+ && arm_feature(env, ARM_FEATURE_V6)) {
+ fsr |= (1 << 11);
+ }
+ exc = EXCP_DATA_ABORT;
+ }
+
+ env->exception.vaddress = addr;
+ env->exception.fsr = fsr;
+ raise_exception(env, exc, syn, target_el);
+ }
+}
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+ uint32_t syn;
+
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+
+ target_el = exception_target_el(env);
+ same_el = (arm_current_el(env) == target_el);
+
+ env->exception.vaddress = vaddr;
+
+ /* the DFSR for an alignment fault depends on whether we're using
+ * the LPAE long descriptor format, or the short descriptor format
+ */
+ if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
+ env->exception.fsr = (1 << 9) | 0x21;
+ } else {
+ env->exception.fsr = 0x1;
+ }
+
+ if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
+ env->exception.fsr |= (1 << 11);
+ }
+
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, 0, access_type == MMU_DATA_STORE,
+ 0x21);
+ raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
+ env->QF = 1;
+ return res;
+}
+
+uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
+ env->QF = 1;
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
+ env->QF = 1;
+ res = ~(((int32_t)a >> 31) ^ SIGNBIT);
+ }
+ return res;
+}
+
+uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
+{
+ uint32_t res;
+ if (val >= 0x40000000) {
+ res = ~SIGNBIT;
+ env->QF = 1;
+ } else if (val <= (int32_t)0xc0000000) {
+ res = SIGNBIT;
+ env->QF = 1;
+ } else {
+ res = val << 1;
+ }
+ return res;
+}
+
+uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a + b;
+ if (res < a) {
+ env->QF = 1;
+ res = ~0;
+ }
+ return res;
+}
+
+uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
+{
+ uint32_t res = a - b;
+ if (res > a) {
+ env->QF = 1;
+ res = 0;
+ }
+ return res;
+}
+
+/* Signed saturation. */
+static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
+{
+ int32_t top;
+ uint32_t mask;
+
+ top = val >> shift;
+ mask = (1u << shift) - 1;
+ if (top > 0) {
+ env->QF = 1;
+ return mask;
+ } else if (top < -1) {
+ env->QF = 1;
+ return ~mask;
+ }
+ return val;
+}
+
+/* Unsigned saturation. */
+static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
+{
+ uint32_t max;
+
+ max = (1u << shift) - 1;
+ if (val < 0) {
+ env->QF = 1;
+ return 0;
+ } else if (val > max) {
+ env->QF = 1;
+ return max;
+ }
+ return val;
+}
+
+/* Signed saturate. */
+uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ return do_ssat(env, x, shift);
+}
+
+/* Dual halfword signed saturate. */
+uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ uint32_t res;
+
+ res = (uint16_t)do_ssat(env, (int16_t)x, shift);
+ res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
+ return res;
+}
+
+/* Unsigned saturate. */
+uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ return do_usat(env, x, shift);
+}
+
+/* Dual halfword unsigned saturate. */
+uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
+{
+ uint32_t res;
+
+ res = (uint16_t)do_usat(env, (int16_t)x, shift);
+ res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
+ return res;
+}
+
+void HELPER(setend)(CPUARMState *env)
+{
+ env->uncached_cpsr ^= CPSR_E;
+}
+
+/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
+ * The function returns the target EL (1-3) if the instruction is to be trapped;
+ * otherwise it returns 0 indicating it is not trapped.
+ */
+static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
+{
+ int cur_el = arm_current_el(env);
+ uint64_t mask;
+
+ /* If we are currently in EL0 then we need to check if SCTLR is set up for
+ * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
+ */
+ if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
+ int target_el;
+
+ mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
+ if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
+ /* Secure EL0 and Secure PL1 is at EL3 */
+ target_el = 3;
+ } else {
+ target_el = 1;
+ }
+
+ if (!(env->cp15.sctlr_el[target_el] & mask)) {
+ return target_el;
+ }
+ }
+
+ /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
+ * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
+ * bits will be zero indicating no trap.
+ */
+ if (cur_el < 2 && !arm_is_secure(env)) {
+ mask = (is_wfe) ? HCR_TWE : HCR_TWI;
+ if (env->cp15.hcr_el2 & mask) {
+ return 2;
+ }
+ }
+
+ /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
+ if (cur_el < 3) {
+ mask = (is_wfe) ? SCR_TWE : SCR_TWI;
+ if (env->cp15.scr_el3 & mask) {
+ return 3;
+ }
+ }
+
+ return 0;
+}
+
+void HELPER(wfi)(CPUARMState *env)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+ int target_el = check_wfx_trap(env, false);
+
+ if (cpu_has_work(cs)) {
+ /* Don't bother to go into our "low power state" if
+ * we would just wake up immediately.
+ */
+ return;
+ }
+
+ if (target_el) {
+ env->pc -= 4;
+ raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
+ }
+
+ cs->exception_index = EXCP_HLT;
+ cs->halted = 1;
+ cpu_loop_exit(cs);
+}
+
+void HELPER(wfe)(CPUARMState *env)
+{
+ /* This is a hint instruction that is semantically different
+ * from YIELD even though we currently implement it identically.
+ * Don't actually halt the CPU, just yield back to top
+ * level loop. This is not going into a "low power state"
+ * (ie halting until some event occurs), so we never take
+ * a configurable trap to a different exception level.
+ */
+ HELPER(yield)(env);
+}
+
+void HELPER(yield)(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ /* This is a non-trappable hint instruction that generally indicates
+ * that the guest is currently busy-looping. Yield control back to the
+ * top level loop so that a more deserving VCPU has a chance to run.
+ */
+ cs->exception_index = EXCP_YIELD;
+ cpu_loop_exit(cs);
+}
+
+/* Raise an internal-to-QEMU exception. This is limited to only
+ * those EXCP values which are special cases for QEMU to interrupt
+ * execution and not to be used for exceptions which are passed to
+ * the guest (those must all have syndrome information and thus should
+ * use exception_with_syndrome).
+ */
+void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ assert(excp_is_internal(excp));
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+/* Raise an exception with the specified syndrome register value */
+void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el)
+{
+ raise_exception(env, excp, syndrome, target_el);
+}
+
+uint32_t HELPER(cpsr_read)(CPUARMState *env)
+{
+ return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
+}
+
+void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
+{
+ cpsr_write(env, val, mask, CPSRWriteByInstr);
+}
+
+/* Write the CPSR for a 32-bit exception return */
+void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
+{
+ cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
+
+ /* Generated code has already stored the new PC value, but
+ * without masking out its low bits, because which bits need
+ * masking depends on whether we're returning to Thumb or ARM
+ * state. Do the masking now.
+ */
+ env->regs[15] &= (env->thumb ? ~1 : ~3);
+
+ arm_call_el_change_hook(arm_env_get_cpu(env));
+}
+
+/* Access to user mode registers from privileged modes. */
+uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
+{
+ uint32_t val;
+
+ if (regno == 13) {
+ val = env->banked_r13[BANK_USRSYS];
+ } else if (regno == 14) {
+ val = env->banked_r14[BANK_USRSYS];
+ } else if (regno >= 8
+ && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
+ val = env->usr_regs[regno - 8];
+ } else {
+ val = env->regs[regno];
+ }
+ return val;
+}
+
+void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
+{
+ if (regno == 13) {
+ env->banked_r13[BANK_USRSYS] = val;
+ } else if (regno == 14) {
+ env->banked_r14[BANK_USRSYS] = val;
+ } else if (regno >= 8
+ && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
+ env->usr_regs[regno - 8] = val;
+ } else {
+ env->regs[regno] = val;
+ }
+}
+
+void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
+{
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ env->regs[13] = val;
+ } else {
+ env->banked_r13[bank_number(mode)] = val;
+ }
+}
+
+uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
+{
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
+ /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
+ * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
+ */
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ return env->regs[13];
+ } else {
+ return env->banked_r13[bank_number(mode)];
+ }
+}
+
+static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
+ uint32_t regno)
+{
+ /* Raise an exception if the requested access is one of the UNPREDICTABLE
+ * cases; otherwise return. This broadly corresponds to the pseudocode
+ * BankedRegisterAccessValid() and SPSRAccessValid(),
+ * except that we have already handled some cases at translate time.
+ */
+ int curmode = env->uncached_cpsr & CPSR_M;
+
+ if (curmode == tgtmode) {
+ goto undef;
+ }
+
+ if (tgtmode == ARM_CPU_MODE_USR) {
+ switch (regno) {
+ case 8 ... 12:
+ if (curmode != ARM_CPU_MODE_FIQ) {
+ goto undef;
+ }
+ break;
+ case 13:
+ if (curmode == ARM_CPU_MODE_SYS) {
+ goto undef;
+ }
+ break;
+ case 14:
+ if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
+ goto undef;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (tgtmode == ARM_CPU_MODE_HYP) {
+ switch (regno) {
+ case 17: /* ELR_Hyp */
+ if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ break;
+ default:
+ if (curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ break;
+ }
+ }
+
+ return;
+
+undef:
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+}
+
+void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
+ uint32_t regno)
+{
+ msr_mrs_banked_exc_checks(env, tgtmode, regno);
+
+ switch (regno) {
+ case 16: /* SPSRs */
+ env->banked_spsr[bank_number(tgtmode)] = value;
+ break;
+ case 17: /* ELR_Hyp */
+ env->elr_el[2] = value;
+ break;
+ case 13:
+ env->banked_r13[bank_number(tgtmode)] = value;
+ break;
+ case 14:
+ env->banked_r14[bank_number(tgtmode)] = value;
+ break;
+ case 8 ... 12:
+ switch (tgtmode) {
+ case ARM_CPU_MODE_USR:
+ env->usr_regs[regno - 8] = value;
+ break;
+ case ARM_CPU_MODE_FIQ:
+ env->fiq_regs[regno - 8] = value;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
+{
+ msr_mrs_banked_exc_checks(env, tgtmode, regno);
+
+ switch (regno) {
+ case 16: /* SPSRs */
+ return env->banked_spsr[bank_number(tgtmode)];
+ case 17: /* ELR_Hyp */
+ return env->elr_el[2];
+ case 13:
+ return env->banked_r13[bank_number(tgtmode)];
+ case 14:
+ return env->banked_r14[bank_number(tgtmode)];
+ case 8 ... 12:
+ switch (tgtmode) {
+ case ARM_CPU_MODE_USR:
+ return env->usr_regs[regno - 8];
+ case ARM_CPU_MODE_FIQ:
+ return env->fiq_regs[regno - 8];
+ default:
+ g_assert_not_reached();
+ }
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
+ uint32_t isread)
+{
+ const ARMCPRegInfo *ri = rip;
+ int target_el;
+
+ if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
+ && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
+ raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
+ }
+
+ if (!ri->accessfn) {
+ return;
+ }
+
+ switch (ri->accessfn(env, ri, isread)) {
+ case CP_ACCESS_OK:
+ return;
+ case CP_ACCESS_TRAP:
+ target_el = exception_target_el(env);
+ break;
+ case CP_ACCESS_TRAP_EL2:
+ /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
+ * a bug in the access function.
+ */
+ assert(!arm_is_secure(env) && arm_current_el(env) != 3);
+ target_el = 2;
+ break;
+ case CP_ACCESS_TRAP_EL3:
+ target_el = 3;
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED:
+ target_el = exception_target_el(env);
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
+ target_el = 2;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
+ target_el = 3;
+ syndrome = syn_uncategorized();
+ break;
+ case CP_ACCESS_TRAP_FP_EL2:
+ target_el = 2;
+ /* Since we are an implementation that takes exceptions on a trapped
+ * conditional insn only if the insn has passed its condition code
+ * check, we take the IMPDEF choice to always report CV=1 COND=0xe
+ * (which is also the required value for AArch64 traps).
+ */
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
+ case CP_ACCESS_TRAP_FP_EL3:
+ target_el = 3;
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ raise_exception(env, EXCP_UDEF, syndrome, target_el);
+}
+
+void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
+{
+ const ARMCPRegInfo *ri = rip;
+
+ ri->writefn(env, ri, value);
+}
+
+uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
+{
+ const ARMCPRegInfo *ri = rip;
+
+ return ri->readfn(env, ri);
+}
+
+void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
+{
+ const ARMCPRegInfo *ri = rip;
+
+ ri->writefn(env, ri, value);
+}
+
+uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
+{
+ const ARMCPRegInfo *ri = rip;
+
+ return ri->readfn(env, ri);
+}
+
+void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
+{
+ /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
+ * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
+ * to catch that case at translate time.
+ */
+ if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
+ uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
+ extract32(op, 3, 3), 4,
+ imm, 0x1f, 0);
+ raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
+ }
+
+ switch (op) {
+ case 0x05: /* SPSel */
+ update_spsel(env, imm);
+ break;
+ case 0x1e: /* DAIFSet */
+ env->daif |= (imm << 6) & PSTATE_DAIF;
+ break;
+ case 0x1f: /* DAIFClear */
+ env->daif &= ~((imm << 6) & PSTATE_DAIF);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+void HELPER(clear_pstate_ss)(CPUARMState *env)
+{
+ env->pstate &= ~PSTATE_SS;
+}
+
+void HELPER(pre_hvc)(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int cur_el = arm_current_el(env);
+ /* FIXME: Use actual secure state. */
+ bool secure = false;
+ bool undef;
+
+ if (arm_is_psci_call(cpu, EXCP_HVC)) {
+ /* If PSCI is enabled and this looks like a valid PSCI call then
+ * that overrides the architecturally mandated HVC behaviour.
+ */
+ return;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL2)) {
+ /* If EL2 doesn't exist, HVC always UNDEFs */
+ undef = true;
+ } else if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /* EL3.HCE has priority over EL2.HCD. */
+ undef = !(env->cp15.scr_el3 & SCR_HCE);
+ } else {
+ undef = env->cp15.hcr_el2 & HCR_HCD;
+ }
+
+ /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
+ * For ARMv8/AArch64, HVC is allowed in EL3.
+ * Note that we've already trapped HVC from EL0 at translation
+ * time.
+ */
+ if (secure && (!is_a64(env) || cur_el == 1)) {
+ undef = true;
+ }
+
+ if (undef) {
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+}
+
+void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int cur_el = arm_current_el(env);
+ bool secure = arm_is_secure(env);
+ bool smd = env->cp15.scr_el3 & SCR_SMD;
+ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
+ * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
+ * extensions, SMD only applies to NS state.
+ * On ARMv7 without the Virtualization extensions, the SMD bit
+ * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
+ * so we need not special case this here.
+ */
+ bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
+
+ if (arm_is_psci_call(cpu, EXCP_SMC)) {
+ /* If PSCI is enabled and this looks like a valid PSCI call then
+ * that overrides the architecturally mandated SMC behaviour.
+ */
+ return;
+ }
+
+ if (!arm_feature(env, ARM_FEATURE_EL3)) {
+ /* If we have no EL3 then SMC always UNDEFs */
+ undef = true;
+ } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
+ /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
+ raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
+ }
+
+ if (undef) {
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+}
+
+static int el_from_spsr(uint32_t spsr)
+{
+ /* Return the exception level that this SPSR is requesting a return to,
+ * or -1 if it is invalid (an illegal return)
+ */
+ if (spsr & PSTATE_nRW) {
+ switch (spsr & CPSR_M) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ return 1;
+ case ARM_CPU_MODE_MON:
+ /* Returning to Mon from AArch64 is never possible,
+ * so this is an illegal return.
+ */
+ default:
+ return -1;
+ }
+ } else {
+ if (extract32(spsr, 1, 1)) {
+ /* Return with reserved M[1] bit set */
+ return -1;
+ }
+ if (extract32(spsr, 0, 4) == 1) {
+ /* return to EL0 with M[0] bit set */
+ return -1;
+ }
+ return extract32(spsr, 2, 2);
+ }
+}
+
+void HELPER(exception_return)(CPUARMState *env)
+{
+ int cur_el = arm_current_el(env);
+ unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
+ uint32_t spsr = env->banked_spsr[spsr_idx];
+ int new_el;
+ bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
+
+ aarch64_save_sp(env, cur_el);
+
+ env->exclusive_addr = -1;
+
+ /* We must squash the PSTATE.SS bit to zero unless both of the
+ * following hold:
+ * 1. debug exceptions are currently disabled
+ * 2. singlestep will be active in the EL we return to
+ * We check 1 here and 2 after we've done the pstate/cpsr write() to
+ * transition to the EL we're going to.
+ */
+ if (arm_generate_debug_exceptions(env)) {
+ spsr &= ~PSTATE_SS;
+ }
+
+ new_el = el_from_spsr(spsr);
+ if (new_el == -1) {
+ goto illegal_return;
+ }
+ if (new_el > cur_el
+ || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
+ /* Disallow return to an EL which is unimplemented or higher
+ * than the current one.
+ */
+ goto illegal_return;
+ }
+
+ if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
+ /* Return to an EL which is configured for a different register width */
+ goto illegal_return;
+ }
+
+ if (new_el == 2 && arm_is_secure_below_el3(env)) {
+ /* Return to the non-existent secure-EL2 */
+ goto illegal_return;
+ }
+
+ if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
+ && !arm_is_secure_below_el3(env)) {
+ goto illegal_return;
+ }
+
+ if (!return_to_aa64) {
+ env->aarch64 = 0;
+ /* We do a raw CPSR write because aarch64_sync_64_to_32()
+ * will sort the register banks out for us, and we've already
+ * caught all the bad-mode cases in el_from_spsr().
+ */
+ cpsr_write(env, spsr, ~0, CPSRWriteRaw);
+ if (!arm_singlestep_active(env)) {
+ env->uncached_cpsr &= ~PSTATE_SS;
+ }
+ aarch64_sync_64_to_32(env);
+
+ if (spsr & CPSR_T) {
+ env->regs[15] = env->elr_el[cur_el] & ~0x1;
+ } else {
+ env->regs[15] = env->elr_el[cur_el] & ~0x3;
+ }
+ } else {
+ env->aarch64 = 1;
+ pstate_write(env, spsr);
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+ aarch64_restore_sp(env, new_el);
+ env->pc = env->elr_el[cur_el];
+ }
+
+ arm_call_el_change_hook(arm_env_get_cpu(env));
+
+ return;
+
+illegal_return:
+ /* Illegal return events of various kinds have architecturally
+ * mandated behaviour:
+ * restore NZCV and DAIF from SPSR_ELx
+ * set PSTATE.IL
+ * restore PC from ELR_ELx
+ * no change to exception level, execution state or stack pointer
+ */
+ env->pstate |= PSTATE_IL;
+ env->pc = env->elr_el[cur_el];
+ spsr &= PSTATE_NZCV | PSTATE_DAIF;
+ spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
+ pstate_write(env, spsr);
+ if (!arm_singlestep_active(env)) {
+ env->pstate &= ~PSTATE_SS;
+ }
+}
+
+/* Return true if the linked breakpoint entry lbn passes its checks */
+static bool linked_bp_matches(ARMCPU *cpu, int lbn)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t bcr = env->cp15.dbgbcr[lbn];
+ int brps = extract32(cpu->dbgdidr, 24, 4);
+ int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
+ int bt;
+ uint32_t contextidr;
+
+ /* Links to unimplemented or non-context aware breakpoints are
+ * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
+ * as if linked to an UNKNOWN context-aware breakpoint (in which
+ * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
+ * We choose the former.
+ */
+ if (lbn > brps || lbn < (brps - ctx_cmps)) {
+ return false;
+ }
+
+ bcr = env->cp15.dbgbcr[lbn];
+
+ if (extract64(bcr, 0, 1) == 0) {
+ /* Linked breakpoint disabled : generate no events */
+ return false;
+ }
+
+ bt = extract64(bcr, 20, 4);
+
+ /* We match the whole register even if this is AArch32 using the
+ * short descriptor format (in which case it holds both PROCID and ASID),
+ * since we don't implement the optional v7 context ID masking.
+ */
+ contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
+
+ switch (bt) {
+ case 3: /* linked context ID match */
+ if (arm_current_el(env) > 1) {
+ /* Context matches never fire in EL2 or (AArch64) EL3 */
+ return false;
+ }
+ return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
+ case 5: /* linked address mismatch (reserved in AArch64) */
+ case 9: /* linked VMID match (reserved if no EL2) */
+ case 11: /* linked context ID and VMID match (reserved if no EL2) */
+ default:
+ /* Links to Unlinked context breakpoints must generate no
+ * events; we choose to do the same for reserved values too.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
+{
+ CPUARMState *env = &cpu->env;
+ uint64_t cr;
+ int pac, hmc, ssc, wt, lbn;
+ /* Note that for watchpoints the check is against the CPU security
+ * state, not the S/NS attribute on the offending data access.
+ */
+ bool is_secure = arm_is_secure(env);
+ int access_el = arm_current_el(env);
+
+ if (is_wp) {
+ CPUWatchpoint *wp = env->cpu_watchpoint[n];
+
+ if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
+ return false;
+ }
+ cr = env->cp15.dbgwcr[n];
+ if (wp->hitattrs.user) {
+ /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
+ * match watchpoints as if they were accesses done at EL0, even if
+ * the CPU is at EL1 or higher.
+ */
+ access_el = 0;
+ }
+ } else {
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
+
+ if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
+ return false;
+ }
+ cr = env->cp15.dbgbcr[n];
+ }
+ /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
+ * enabled and that the address and access type match; for breakpoints
+ * we know the address matched; check the remaining fields, including
+ * linked breakpoints. We rely on WCR and BCR having the same layout
+ * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
+ * Note that some combinations of {PAC, HMC, SSC} are reserved and
+ * must act either like some valid combination or as if the watchpoint
+ * were disabled. We choose the former, and use this together with
+ * the fact that EL3 must always be Secure and EL2 must always be
+ * Non-Secure to simplify the code slightly compared to the full
+ * table in the ARM ARM.
+ */
+ pac = extract64(cr, 1, 2);
+ hmc = extract64(cr, 13, 1);
+ ssc = extract64(cr, 14, 2);
+
+ switch (ssc) {
+ case 0:
+ break;
+ case 1:
+ case 3:
+ if (is_secure) {
+ return false;
+ }
+ break;
+ case 2:
+ if (!is_secure) {
+ return false;
+ }
+ break;
+ }
+
+ switch (access_el) {
+ case 3:
+ case 2:
+ if (!hmc) {
+ return false;
+ }
+ break;
+ case 1:
+ if (extract32(pac, 0, 1) == 0) {
+ return false;
+ }
+ break;
+ case 0:
+ if (extract32(pac, 1, 1) == 0) {
+ return false;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ wt = extract64(cr, 20, 1);
+ lbn = extract64(cr, 16, 4);
+
+ if (wt && !linked_bp_matches(cpu, lbn)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_watchpoints(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /* If watchpoints are disabled globally or we can't take debug
+ * exceptions here then watchpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
+ if (bp_wp_matches(cpu, n, true)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool check_breakpoints(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ int n;
+
+ /* If breakpoints are disabled globally or we can't take debug
+ * exceptions here then breakpoint firings are ignored.
+ */
+ if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
+ || !arm_generate_debug_exceptions(env)) {
+ return false;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
+ if (bp_wp_matches(cpu, n, false)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void HELPER(check_breakpoints)(CPUARMState *env)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (check_breakpoints(cpu)) {
+ HELPER(exception_internal(env, EXCP_DEBUG));
+ }
+}
+
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+ /* Called by core code when a CPU watchpoint fires; need to check if this
+ * is also an architectural watchpoint match.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return check_watchpoints(cpu);
+}
+
+void arm_debug_excp_handler(CPUState *cs)
+{
+ /* Called by core code when a watchpoint or breakpoint fires;
+ * need to check which one and raise the appropriate exception.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+ if (wp_hit) {
+ if (wp_hit->flags & BP_CPU) {
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
+
+ cs->watchpoint_hit = NULL;
+
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
+ } else {
+ env->exception.fsr = 0x2;
+ }
+ env->exception.vaddress = wp_hit->hitaddr;
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_watchpoint(same_el, 0, wnr),
+ arm_debug_target_el(env));
+ }
+ } else {
+ uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
+ bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
+
+ /* (1) GDB breakpoints should be handled first.
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
+ * since singlestep is also done by generating a debug internal
+ * exception.
+ */
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
+ return;
+ }
+
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
+ } else {
+ env->exception.fsr = 0x2;
+ }
+ /* FAR is UNKNOWN, so doesn't need setting */
+ raise_exception(env, EXCP_PREFETCH_ABORT,
+ syn_breakpoint(same_el),
+ arm_debug_target_el(env));
+ }
+}
+
+/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
+ The only way to do that in TCG is a conditional branch, which clobbers
+ all our temporaries. For now implement these as helper functions. */
+
+/* Similarly for variable shift instructions. */
+
+uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = x & 1;
+ else
+ env->CF = 0;
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (32 - shift)) & 1;
+ return x << shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32)
+ env->CF = (x >> 31) & 1;
+ else
+ env->CF = 0;
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ env->CF = (x >> 31) & 1;
+ return (int32_t)x >> 31;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return (int32_t)x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
+{
+ int shift1, shift;
+ shift1 = i & 0xff;
+ shift = shift1 & 0x1f;
+ if (shift == 0) {
+ if (shift1 != 0)
+ env->CF = (x >> 31) & 1;
+ return x;
+ } else {
+ env->CF = (x >> (shift - 1)) & 1;
+ return ((uint32_t)x >> shift) | (x << (32 - shift));
+ }
+}
diff --git a/target/arm/psci.c b/target/arm/psci.c
new file mode 100644
index 0000000000..14316eb0ae
--- /dev/null
+++ b/target/arm/psci.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 - Linaro
+ * Author: Rob Herring <rob.herring@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "kvm-consts.h"
+#include "sysemu/sysemu.h"
+#include "internals.h"
+#include "arm-powerctl.h"
+#include "exec/exec-all.h"
+
+bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
+{
+ /* Return true if the r0/x0 value indicates a PSCI call and
+ * the exception type matches the configured PSCI conduit. This is
+ * called before the SMC/HVC instruction is executed, to decide whether
+ * we should treat it as a PSCI call or with the architecturally
+ * defined behaviour for an SMC or HVC (which might be UNDEF or trap
+ * to EL2 or to EL3).
+ */
+ CPUARMState *env = &cpu->env;
+ uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0];
+
+ switch (excp_type) {
+ case EXCP_HVC:
+ if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_HVC) {
+ return false;
+ }
+ break;
+ case EXCP_SMC:
+ if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ return false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ switch (param) {
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void arm_handle_psci_call(ARMCPU *cpu)
+{
+ /*
+ * This function partially implements the logic for dispatching Power State
+ * Coordination Interface (PSCI) calls (as described in ARM DEN 0022B.b),
+ * to the extent required for bringing up and taking down secondary cores,
+ * and for handling reset and poweroff requests.
+ * Additional information about the calling convention used is available in
+ * the document 'SMC Calling Convention' (ARM DEN 0028)
+ */
+ CPUARMState *env = &cpu->env;
+ uint64_t param[4];
+ uint64_t context_id, mpidr;
+ target_ulong entry;
+ int32_t ret = 0;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ /*
+ * All PSCI functions take explicit 32-bit or native int sized
+ * arguments so we can simply zero-extend all arguments regardless
+ * of which exact function we are about to call.
+ */
+ param[i] = is_a64(env) ? env->xregs[i] : env->regs[i];
+ }
+
+ if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ goto err;
+ }
+
+ switch (param[0]) {
+ CPUState *target_cpu_state;
+ ARMCPU *target_cpu;
+
+ case QEMU_PSCI_0_2_FN_PSCI_VERSION:
+ ret = QEMU_PSCI_0_2_RET_VERSION_0_2;
+ break;
+ case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */
+ break;
+ case QEMU_PSCI_0_2_FN_AFFINITY_INFO:
+ case QEMU_PSCI_0_2_FN64_AFFINITY_INFO:
+ mpidr = param[1];
+
+ switch (param[2]) {
+ case 0:
+ target_cpu_state = arm_get_cpu_by_id(mpidr);
+ if (!target_cpu_state) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ target_cpu = ARM_CPU(target_cpu_state);
+ ret = target_cpu->powered_off ? 1 : 0;
+ break;
+ default:
+ /* Everything above affinity level 0 is always on. */
+ ret = 0;
+ }
+ break;
+ case QEMU_PSCI_0_2_FN_SYSTEM_RESET:
+ qemu_system_reset_request();
+ /* QEMU reset and shutdown are async requests, but PSCI
+ * mandates that we never return from the reset/shutdown
+ * call, so power the CPU off now so it doesn't execute
+ * anything further.
+ */
+ goto cpu_off;
+ case QEMU_PSCI_0_2_FN_SYSTEM_OFF:
+ qemu_system_shutdown_request();
+ goto cpu_off;
+ case QEMU_PSCI_0_1_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN_CPU_ON:
+ case QEMU_PSCI_0_2_FN64_CPU_ON:
+ mpidr = param[1];
+ entry = param[2];
+ context_id = param[3];
+ /*
+ * The PSCI spec mandates that newly brought up CPUs enter the
+ * exception level of the caller in the same execution mode as
+ * the caller, with context_id in x0/r0, respectively.
+ */
+ ret = arm_set_cpu_on(mpidr, entry, context_id, arm_current_el(env),
+ is_a64(env));
+ break;
+ case QEMU_PSCI_0_1_FN_CPU_OFF:
+ case QEMU_PSCI_0_2_FN_CPU_OFF:
+ goto cpu_off;
+ case QEMU_PSCI_0_1_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN_CPU_SUSPEND:
+ case QEMU_PSCI_0_2_FN64_CPU_SUSPEND:
+ /* Affinity levels are not supported in QEMU */
+ if (param[1] & 0xfffe0000) {
+ ret = QEMU_PSCI_RET_INVALID_PARAMS;
+ break;
+ }
+ /* Powerdown is not supported, we always go into WFI */
+ if (is_a64(env)) {
+ env->xregs[0] = 0;
+ } else {
+ env->regs[0] = 0;
+ }
+ helper_wfi(env);
+ break;
+ case QEMU_PSCI_0_1_FN_MIGRATE:
+ case QEMU_PSCI_0_2_FN_MIGRATE:
+ ret = QEMU_PSCI_RET_NOT_SUPPORTED;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+err:
+ if (is_a64(env)) {
+ env->xregs[0] = ret;
+ } else {
+ env->regs[0] = ret;
+ }
+ return;
+
+cpu_off:
+ ret = arm_set_cpu_off(cpu->mp_affinity);
+ /* notreached */
+ /* sanity check in case something failed */
+ assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS);
+}
diff --git a/target/arm/trace-events b/target/arm/trace-events
new file mode 100644
index 0000000000..e21c84fc6f
--- /dev/null
+++ b/target/arm/trace-events
@@ -0,0 +1,10 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/arm/helper.c
+arm_gt_recalc(int timer, int irqstate, uint64_t nexttick) "gt recalc: timer %d irqstate %d next tick %" PRIx64
+arm_gt_recalc_disabled(int timer) "gt recalc: timer %d irqstate 0 timer disabled"
+arm_gt_cval_write(int timer, uint64_t value) "gt_cval_write: timer %d value %" PRIx64
+arm_gt_tval_write(int timer, uint64_t value) "gt_tval_write: timer %d value %" PRIx64
+arm_gt_ctl_write(int timer, uint64_t value) "gt_ctl_write: timer %d value %" PRIx64
+arm_gt_imask_toggle(int timer, int irqstate) "gt_ctl_write: timer %d IMASK toggle, new irqstate %d"
+arm_gt_cntvoff_write(uint64_t value) "gt_cntvoff_write: value %" PRIx64
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
new file mode 100644
index 0000000000..6dc27a6115
--- /dev/null
+++ b/target/arm/translate-a64.c
@@ -0,0 +1,11430 @@
+/*
+ * AArch64 translation
+ *
+ * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "arm_ldst.h"
+#include "translate.h"
+#include "internals.h"
+#include "qemu/host-utils.h"
+
+#include "exec/semihost.h"
+#include "exec/gen-icount.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+
+#include "trace-tcg.h"
+
+static TCGv_i64 cpu_X[32];
+static TCGv_i64 cpu_pc;
+
+/* Load/store exclusive handling */
+static TCGv_i64 cpu_exclusive_high;
+static TCGv_i64 cpu_reg(DisasContext *s, int reg);
+
+static const char *regnames[] = {
+ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+ "x24", "x25", "x26", "x27", "x28", "x29", "lr", "sp"
+};
+
+enum a64_shift_type {
+ A64_SHIFT_TYPE_LSL = 0,
+ A64_SHIFT_TYPE_LSR = 1,
+ A64_SHIFT_TYPE_ASR = 2,
+ A64_SHIFT_TYPE_ROR = 3
+};
+
+/* Table based decoder typedefs - used when the relevant bits for decode
+ * are too awkwardly scattered across the instruction (eg SIMD).
+ */
+typedef void AArch64DecodeFn(DisasContext *s, uint32_t insn);
+
+typedef struct AArch64DecodeTable {
+ uint32_t pattern;
+ uint32_t mask;
+ AArch64DecodeFn *disas_fn;
+} AArch64DecodeTable;
+
+/* Function prototype for gen_ functions for calling Neon helpers */
+typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
+typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
+typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
+typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
+typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
+typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
+typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
+typedef void NeonGenTwoSingleOPFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
+typedef void NeonGenTwoDoubleOPFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
+typedef void NeonGenOneOpFn(TCGv_i64, TCGv_i64);
+typedef void CryptoTwoOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32);
+typedef void CryptoThreeOpEnvFn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
+
+/* initialize TCG globals. */
+void a64_translate_init(void)
+{
+ int i;
+
+ cpu_pc = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, pc),
+ "pc");
+ for (i = 0; i < 32; i++) {
+ cpu_X[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, xregs[i]),
+ regnames[i]);
+ }
+
+ cpu_exclusive_high = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, exclusive_high), "exclusive_high");
+}
+
+static inline ARMMMUIdx get_a64_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A64 "unprivileged load/store" insns:
+ * if EL1, access as if EL0; otherwise access at current EL
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ g_assert_not_reached();
+ default:
+ return s->mmu_idx;
+ }
+}
+
+void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint32_t psr = pstate_read(env);
+ int i;
+ int el = arm_current_el(env);
+ const char *ns_status;
+
+ cpu_fprintf(f, "PC=%016"PRIx64" SP=%016"PRIx64"\n",
+ env->pc, env->xregs[31]);
+ for (i = 0; i < 31; i++) {
+ cpu_fprintf(f, "X%02d=%016"PRIx64, i, env->xregs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+
+ if (arm_feature(env, ARM_FEATURE_EL3) && el != 3) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+
+ cpu_fprintf(f, "\nPSTATE=%08x %c%c%c%c %sEL%d%c\n",
+ psr,
+ psr & PSTATE_N ? 'N' : '-',
+ psr & PSTATE_Z ? 'Z' : '-',
+ psr & PSTATE_C ? 'C' : '-',
+ psr & PSTATE_V ? 'V' : '-',
+ ns_status,
+ el,
+ psr & PSTATE_SP ? 'h' : 't');
+
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 32;
+ for (i = 0; i < numvfpregs; i += 2) {
+ uint64_t vlo = float64_val(env->vfp.regs[i * 2]);
+ uint64_t vhi = float64_val(env->vfp.regs[(i * 2) + 1]);
+ cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 " ",
+ i, vhi, vlo);
+ vlo = float64_val(env->vfp.regs[(i + 1) * 2]);
+ vhi = float64_val(env->vfp.regs[((i + 1) * 2) + 1]);
+ cpu_fprintf(f, "q%02d=%016" PRIx64 ":%016" PRIx64 "\n",
+ i + 1, vhi, vlo);
+ }
+ cpu_fprintf(f, "FPCR: %08x FPSR: %08x\n",
+ vfp_get_fpcr(env), vfp_get_fpsr(env));
+ }
+}
+
+void gen_a64_set_pc_im(uint64_t val)
+{
+ tcg_gen_movi_i64(cpu_pc, val);
+}
+
+/* Load the PC from a generic TCG variable.
+ *
+ * If address tagging is enabled via the TCR TBI bits, then loading
+ * an address into the PC will clear out any tag in the it:
+ * + for EL2 and EL3 there is only one TBI bit, and if it is set
+ * then the address is zero-extended, clearing bits [63:56]
+ * + for EL0 and EL1, TBI0 controls addresses with bit 55 == 0
+ * and TBI1 controls addressses with bit 55 == 1.
+ * If the appropriate TBI bit is set for the address then
+ * the address is sign-extended from bit 55 into bits [63:56]
+ *
+ * We can avoid doing this for relative-branches, because the
+ * PC + offset can never overflow into the tag bits (assuming
+ * that virtual addresses are less than 56 bits wide, as they
+ * are currently), but we must handle it for branch-to-register.
+ */
+static void gen_a64_set_pc(DisasContext *s, TCGv_i64 src)
+{
+
+ if (s->current_el <= 1) {
+ /* Test if NEITHER or BOTH TBI values are set. If so, no need to
+ * examine bit 55 of address, can just generate code.
+ * If mixed, then test via generated code
+ */
+ if (s->tbi0 && s->tbi1) {
+ TCGv_i64 tmp_reg = tcg_temp_new_i64();
+ /* Both bits set, sign extension from bit 55 into [63:56] will
+ * cover both cases
+ */
+ tcg_gen_shli_i64(tmp_reg, src, 8);
+ tcg_gen_sari_i64(cpu_pc, tmp_reg, 8);
+ tcg_temp_free_i64(tmp_reg);
+ } else if (!s->tbi0 && !s->tbi1) {
+ /* Neither bit set, just load it as-is */
+ tcg_gen_mov_i64(cpu_pc, src);
+ } else {
+ TCGv_i64 tcg_tmpval = tcg_temp_new_i64();
+ TCGv_i64 tcg_bit55 = tcg_temp_new_i64();
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+
+ tcg_gen_andi_i64(tcg_bit55, src, (1ull << 55));
+
+ if (s->tbi0) {
+ /* tbi0==1, tbi1==0, so 0-fill upper byte if bit 55 = 0 */
+ tcg_gen_andi_i64(tcg_tmpval, src,
+ 0x00FFFFFFFFFFFFFFull);
+ tcg_gen_movcond_i64(TCG_COND_EQ, cpu_pc, tcg_bit55, tcg_zero,
+ tcg_tmpval, src);
+ } else {
+ /* tbi0==0, tbi1==1, so 1-fill upper byte if bit 55 = 1 */
+ tcg_gen_ori_i64(tcg_tmpval, src,
+ 0xFF00000000000000ull);
+ tcg_gen_movcond_i64(TCG_COND_NE, cpu_pc, tcg_bit55, tcg_zero,
+ tcg_tmpval, src);
+ }
+ tcg_temp_free_i64(tcg_zero);
+ tcg_temp_free_i64(tcg_bit55);
+ tcg_temp_free_i64(tcg_tmpval);
+ }
+ } else { /* EL > 1 */
+ if (s->tbi0) {
+ /* Force tag byte to all zero */
+ tcg_gen_andi_i64(cpu_pc, src, 0x00FFFFFFFFFFFFFFull);
+ } else {
+ /* Load unmodified address */
+ tcg_gen_mov_i64(cpu_pc, src);
+ }
+ }
+}
+
+typedef struct DisasCompare64 {
+ TCGCond cond;
+ TCGv_i64 value;
+} DisasCompare64;
+
+static void a64_test_cc(DisasCompare64 *c64, int cc)
+{
+ DisasCompare c32;
+
+ arm_test_cc(&c32, cc);
+
+ /* Sign-extend the 32-bit value so that the GE/LT comparisons work
+ * properly. The NE/EQ comparisons are also fine with this choice. */
+ c64->cond = c32.cond;
+ c64->value = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(c64->value, c32.value);
+
+ arm_free_cc(&c32);
+}
+
+static void a64_free_cc(DisasCompare64 *c64)
+{
+ tcg_temp_free_i64(c64->value);
+}
+
+static void gen_exception_internal(int excp)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
+ tcg_temp_free_i32(tcg_el);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+{
+ gen_a64_set_pc_im(s->pc - offset);
+ gen_exception_internal(excp);
+ s->is_jmp = DISAS_EXC;
+}
+
+static void gen_exception_insn(DisasContext *s, int offset, int excp,
+ uint32_t syndrome, uint32_t target_el)
+{
+ gen_a64_set_pc_im(s->pc - offset);
+ gen_exception(excp, syndrome, target_el);
+ s->is_jmp = DISAS_EXC;
+}
+
+static void gen_ss_advance(DisasContext *s)
+{
+ /* If the singlestep state is Active-not-pending, advance to
+ * Active-pending.
+ */
+ if (s->ss_active) {
+ s->pstate_ss = 0;
+ gen_helper_clear_pstate_ss(cpu_env);
+ }
+}
+
+static void gen_step_complete_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
+ default_exception_el(s));
+ s->is_jmp = DISAS_EXC;
+}
+
+static inline bool use_goto_tb(DisasContext *s, int n, uint64_t dest)
+{
+ /* No direct tb linking with singlestep (either QEMU's or the ARM
+ * debug architecture kind) or deterministic io
+ */
+ if (s->singlestep_enabled || s->ss_active || (s->tb->cflags & CF_LAST_IO)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* Only link tbs from inside the same guest page */
+ if ((s->tb->pc & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest)
+{
+ TranslationBlock *tb;
+
+ tb = s->tb;
+ if (use_goto_tb(s, n, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_a64_set_pc_im(dest);
+ tcg_gen_exit_tb((intptr_t)tb + n);
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ gen_a64_set_pc_im(dest);
+ if (s->ss_active) {
+ gen_step_complete_exception(s);
+ } else if (s->singlestep_enabled) {
+ gen_exception_internal(EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(0);
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+ }
+}
+
+static void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
+{
+ /* We don't need to save all of the syndrome so we mask and shift
+ * out uneeded bits to help the sleb128 encoder do a better job.
+ */
+ syn &= ARM_INSN_START_WORD2_MASK;
+ syn >>= ARM_INSN_START_WORD2_SHIFT;
+
+ /* We check and clear insn_start_idx to catch multiple updates. */
+ assert(s->insn_start_idx != 0);
+ tcg_set_insn_param(s->insn_start_idx, 2, syn);
+ s->insn_start_idx = 0;
+}
+
+static void unallocated_encoding(DisasContext *s)
+{
+ /* Unallocated and reserved encodings are uncategorized */
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
+#define unsupported_encoding(s, insn) \
+ do { \
+ qemu_log_mask(LOG_UNIMP, \
+ "%s:%d: unsupported instruction encoding 0x%08x " \
+ "at pc=%016" PRIx64 "\n", \
+ __FILE__, __LINE__, insn, s->pc - 4); \
+ unallocated_encoding(s); \
+ } while (0);
+
+static void init_tmp_a64_array(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ int i;
+ for (i = 0; i < ARRAY_SIZE(s->tmp_a64); i++) {
+ TCGV_UNUSED_I64(s->tmp_a64[i]);
+ }
+#endif
+ s->tmp_a64_count = 0;
+}
+
+static void free_tmp_a64(DisasContext *s)
+{
+ int i;
+ for (i = 0; i < s->tmp_a64_count; i++) {
+ tcg_temp_free_i64(s->tmp_a64[i]);
+ }
+ init_tmp_a64_array(s);
+}
+
+static TCGv_i64 new_tmp_a64(DisasContext *s)
+{
+ assert(s->tmp_a64_count < TMP_A64_MAX);
+ return s->tmp_a64[s->tmp_a64_count++] = tcg_temp_new_i64();
+}
+
+static TCGv_i64 new_tmp_a64_zero(DisasContext *s)
+{
+ TCGv_i64 t = new_tmp_a64(s);
+ tcg_gen_movi_i64(t, 0);
+ return t;
+}
+
+/*
+ * Register access functions
+ *
+ * These functions are used for directly accessing a register in where
+ * changes to the final register value are likely to be made. If you
+ * need to use a register for temporary calculation (e.g. index type
+ * operations) use the read_* form.
+ *
+ * B1.2.1 Register mappings
+ *
+ * In instruction register encoding 31 can refer to ZR (zero register) or
+ * the SP (stack pointer) depending on context. In QEMU's case we map SP
+ * to cpu_X[31] and ZR accesses to a temporary which can be discarded.
+ * This is the point of the _sp forms.
+ */
+static TCGv_i64 cpu_reg(DisasContext *s, int reg)
+{
+ if (reg == 31) {
+ return new_tmp_a64_zero(s);
+ } else {
+ return cpu_X[reg];
+ }
+}
+
+/* register access for when 31 == SP */
+static TCGv_i64 cpu_reg_sp(DisasContext *s, int reg)
+{
+ return cpu_X[reg];
+}
+
+/* read a cpu register in 32bit/64bit mode. Returns a TCGv_i64
+ * representing the register contents. This TCGv is an auto-freed
+ * temporary so it need not be explicitly freed, and may be modified.
+ */
+static TCGv_i64 read_cpu_reg(DisasContext *s, int reg, int sf)
+{
+ TCGv_i64 v = new_tmp_a64(s);
+ if (reg != 31) {
+ if (sf) {
+ tcg_gen_mov_i64(v, cpu_X[reg]);
+ } else {
+ tcg_gen_ext32u_i64(v, cpu_X[reg]);
+ }
+ } else {
+ tcg_gen_movi_i64(v, 0);
+ }
+ return v;
+}
+
+static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
+{
+ TCGv_i64 v = new_tmp_a64(s);
+ if (sf) {
+ tcg_gen_mov_i64(v, cpu_X[reg]);
+ } else {
+ tcg_gen_ext32u_i64(v, cpu_X[reg]);
+ }
+ return v;
+}
+
+/* We should have at some point before trying to access an FP register
+ * done the necessary access check, so assert that
+ * (a) we did the check and
+ * (b) we didn't then just plough ahead anyway if it failed.
+ * Print the instruction pattern in the abort message so we can figure
+ * out what we need to fix if a user encounters this problem in the wild.
+ */
+static inline void assert_fp_access_checked(DisasContext *s)
+{
+#ifdef CONFIG_DEBUG_TCG
+ if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
+ fprintf(stderr, "target-arm: FP access check missing for "
+ "instruction 0x%08x\n", s->insn);
+ abort();
+ }
+#endif
+}
+
+/* Return the offset into CPUARMState of an element of specified
+ * size, 'element' places in from the least significant end of
+ * the FP/vector register Qn.
+ */
+static inline int vec_reg_offset(DisasContext *s, int regno,
+ int element, TCGMemOp size)
+{
+ int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
+#ifdef HOST_WORDS_BIGENDIAN
+ /* This is complicated slightly because vfp.regs[2n] is
+ * still the low half and vfp.regs[2n+1] the high half
+ * of the 128 bit vector, even on big endian systems.
+ * Calculate the offset assuming a fully bigendian 128 bits,
+ * then XOR to account for the order of the two 64 bit halves.
+ */
+ offs += (16 - ((element + 1) * (1 << size)));
+ offs ^= 8;
+#else
+ offs += element * (1 << size);
+#endif
+ assert_fp_access_checked(s);
+ return offs;
+}
+
+/* Return the offset into CPUARMState of a slice (from
+ * the least significant end) of FP register Qn (ie
+ * Dn, Sn, Hn or Bn).
+ * (Note that this is not the same mapping as for A32; see cpu.h)
+ */
+static inline int fp_reg_offset(DisasContext *s, int regno, TCGMemOp size)
+{
+ int offs = offsetof(CPUARMState, vfp.regs[regno * 2]);
+#ifdef HOST_WORDS_BIGENDIAN
+ offs += (8 - (1 << size));
+#endif
+ assert_fp_access_checked(s);
+ return offs;
+}
+
+/* Offset of the high half of the 128 bit vector Qn */
+static inline int fp_reg_hi_offset(DisasContext *s, int regno)
+{
+ assert_fp_access_checked(s);
+ return offsetof(CPUARMState, vfp.regs[regno * 2 + 1]);
+}
+
+/* Convenience accessors for reading and writing single and double
+ * FP registers. Writing clears the upper parts of the associated
+ * 128 bit vector register, as required by the architecture.
+ * Note that unlike the GP register accessors, the values returned
+ * by the read functions must be manually freed.
+ */
+static TCGv_i64 read_fp_dreg(DisasContext *s, int reg)
+{
+ TCGv_i64 v = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
+ return v;
+}
+
+static TCGv_i32 read_fp_sreg(DisasContext *s, int reg)
+{
+ TCGv_i32 v = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(v, cpu_env, fp_reg_offset(s, reg, MO_32));
+ return v;
+}
+
+static void write_fp_dreg(DisasContext *s, int reg, TCGv_i64 v)
+{
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+
+ tcg_gen_st_i64(v, cpu_env, fp_reg_offset(s, reg, MO_64));
+ tcg_gen_st_i64(tcg_zero, cpu_env, fp_reg_hi_offset(s, reg));
+ tcg_temp_free_i64(tcg_zero);
+}
+
+static void write_fp_sreg(DisasContext *s, int reg, TCGv_i32 v)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tmp, v);
+ write_fp_dreg(s, reg, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+static TCGv_ptr get_fpstatus_ptr(void)
+{
+ TCGv_ptr statusptr = tcg_temp_new_ptr();
+ int offset;
+
+ /* In A64 all instructions (both FP and Neon) use the FPCR;
+ * there is no equivalent of the A32 Neon "standard FPSCR value"
+ * and all operations use vfp.fp_status.
+ */
+ offset = offsetof(CPUARMState, vfp.fp_status);
+ tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ return statusptr;
+}
+
+/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
+ * than the 32 bit equivalent.
+ */
+static inline void gen_set_NZ64(TCGv_i64 result)
+{
+ tcg_gen_extr_i64_i32(cpu_ZF, cpu_NF, result);
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, cpu_NF);
+}
+
+/* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
+static inline void gen_logic_CC(int sf, TCGv_i64 result)
+{
+ if (sf) {
+ gen_set_NZ64(result);
+ } else {
+ tcg_gen_extrl_i64_i32(cpu_ZF, result);
+ tcg_gen_mov_i32(cpu_NF, cpu_ZF);
+ }
+ tcg_gen_movi_i32(cpu_CF, 0);
+ tcg_gen_movi_i32(cpu_VF, 0);
+}
+
+/* dest = T0 + T1; compute C, N, V and Z flags */
+static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ TCGv_i64 result, flag, tmp;
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
+
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(flag, flag, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+
+ tcg_gen_mov_i64(dest, result);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(flag);
+ } else {
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t0_32);
+ tcg_temp_free_i32(t1_32);
+ }
+}
+
+/* dest = T0 - T1; compute C, N, V and Z flags */
+static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ /* 64 bit arithmetic */
+ TCGv_i64 result, flag, tmp;
+
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tcg_gen_sub_i64(result, t0, t1);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_and_i64(flag, flag, tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+ tcg_gen_mov_i64(dest, result);
+ tcg_temp_free_i64(flag);
+ tcg_temp_free_i64(result);
+ } else {
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp;
+
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_temp_free_i32(t0_32);
+ tcg_temp_free_i32(t1_32);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+ }
+}
+
+/* dest = T0 + T1 + CF; do not compute flags. */
+static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ TCGv_i64 flag = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(flag, cpu_CF);
+ tcg_gen_add_i64(dest, t0, t1);
+ tcg_gen_add_i64(dest, dest, flag);
+ tcg_temp_free_i64(flag);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(dest, dest);
+ }
+}
+
+/* dest = T0 + T1 + CF; compute C, N, V and Z flags. */
+static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ if (sf) {
+ TCGv_i64 result, cf_64, vf_64, tmp;
+ result = tcg_temp_new_i64();
+ cf_64 = tcg_temp_new_i64();
+ vf_64 = tcg_temp_new_i64();
+ tmp = tcg_const_i64(0);
+
+ tcg_gen_extu_i32_i64(cf_64, cpu_CF);
+ tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
+ tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
+ tcg_gen_extrl_i64_i32(cpu_CF, cf_64);
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(vf_64, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(vf_64, vf_64, tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, vf_64);
+
+ tcg_gen_mov_i64(dest, result);
+
+ tcg_temp_free_i64(tmp);
+ tcg_temp_free_i64(vf_64);
+ tcg_temp_free_i64(cf_64);
+ tcg_temp_free_i64(result);
+ } else {
+ TCGv_i32 t0_32, t1_32, tmp;
+ t0_32 = tcg_temp_new_i32();
+ t1_32 = tcg_temp_new_i32();
+ tmp = tcg_const_i32(0);
+
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
+
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t1_32);
+ tcg_temp_free_i32(t0_32);
+ }
+}
+
+/*
+ * Load/Store generators
+ */
+
+/*
+ * Store from GPR register to memory.
+ */
+static void do_gpr_st_memidx(DisasContext *s, TCGv_i64 source,
+ TCGv_i64 tcg_addr, int size, int memidx,
+ bool iss_valid,
+ unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ g_assert(size <= 3);
+ tcg_gen_qemu_st_i64(source, tcg_addr, memidx, s->be_data + size);
+
+ if (iss_valid) {
+ uint32_t syn;
+
+ syn = syn_data_abort_with_iss(0,
+ size,
+ false,
+ iss_srt,
+ iss_sf,
+ iss_ar,
+ 0, 0, 0, 0, 0, false);
+ disas_set_insn_syndrome(s, syn);
+ }
+}
+
+static void do_gpr_st(DisasContext *s, TCGv_i64 source,
+ TCGv_i64 tcg_addr, int size,
+ bool iss_valid,
+ unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ do_gpr_st_memidx(s, source, tcg_addr, size, get_mem_index(s),
+ iss_valid, iss_srt, iss_sf, iss_ar);
+}
+
+/*
+ * Load from memory to GPR register
+ */
+static void do_gpr_ld_memidx(DisasContext *s,
+ TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed,
+ bool extend, int memidx,
+ bool iss_valid, unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ TCGMemOp memop = s->be_data + size;
+
+ g_assert(size <= 3);
+
+ if (is_signed) {
+ memop += MO_SIGN;
+ }
+
+ tcg_gen_qemu_ld_i64(dest, tcg_addr, memidx, memop);
+
+ if (extend && is_signed) {
+ g_assert(size < 3);
+ tcg_gen_ext32u_i64(dest, dest);
+ }
+
+ if (iss_valid) {
+ uint32_t syn;
+
+ syn = syn_data_abort_with_iss(0,
+ size,
+ is_signed,
+ iss_srt,
+ iss_sf,
+ iss_ar,
+ 0, 0, 0, 0, 0, false);
+ disas_set_insn_syndrome(s, syn);
+ }
+}
+
+static void do_gpr_ld(DisasContext *s,
+ TCGv_i64 dest, TCGv_i64 tcg_addr,
+ int size, bool is_signed, bool extend,
+ bool iss_valid, unsigned int iss_srt,
+ bool iss_sf, bool iss_ar)
+{
+ do_gpr_ld_memidx(s, dest, tcg_addr, size, is_signed, extend,
+ get_mem_index(s),
+ iss_valid, iss_srt, iss_sf, iss_ar);
+}
+
+/*
+ * Store from FP register to memory
+ */
+static void do_fp_st(DisasContext *s, int srcidx, TCGv_i64 tcg_addr, int size)
+{
+ /* This writes the bottom N bits of a 128 bit wide vector to memory */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_offset(s, srcidx, MO_64));
+ if (size < 4) {
+ tcg_gen_qemu_st_i64(tmp, tcg_addr, get_mem_index(s),
+ s->be_data + size);
+ } else {
+ bool be = s->be_data == MO_BE;
+ TCGv_i64 tcg_hiaddr = tcg_temp_new_i64();
+
+ tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
+ tcg_gen_qemu_st_i64(tmp, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
+ s->be_data | MO_Q);
+ tcg_gen_ld_i64(tmp, cpu_env, fp_reg_hi_offset(s, srcidx));
+ tcg_gen_qemu_st_i64(tmp, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
+ s->be_data | MO_Q);
+ tcg_temp_free_i64(tcg_hiaddr);
+ }
+
+ tcg_temp_free_i64(tmp);
+}
+
+/*
+ * Load from memory to FP register
+ */
+static void do_fp_ld(DisasContext *s, int destidx, TCGv_i64 tcg_addr, int size)
+{
+ /* This always zero-extends and writes to a full 128 bit wide vector */
+ TCGv_i64 tmplo = tcg_temp_new_i64();
+ TCGv_i64 tmphi;
+
+ if (size < 4) {
+ TCGMemOp memop = s->be_data + size;
+ tmphi = tcg_const_i64(0);
+ tcg_gen_qemu_ld_i64(tmplo, tcg_addr, get_mem_index(s), memop);
+ } else {
+ bool be = s->be_data == MO_BE;
+ TCGv_i64 tcg_hiaddr;
+
+ tmphi = tcg_temp_new_i64();
+ tcg_hiaddr = tcg_temp_new_i64();
+
+ tcg_gen_addi_i64(tcg_hiaddr, tcg_addr, 8);
+ tcg_gen_qemu_ld_i64(tmplo, be ? tcg_hiaddr : tcg_addr, get_mem_index(s),
+ s->be_data | MO_Q);
+ tcg_gen_qemu_ld_i64(tmphi, be ? tcg_addr : tcg_hiaddr, get_mem_index(s),
+ s->be_data | MO_Q);
+ tcg_temp_free_i64(tcg_hiaddr);
+ }
+
+ tcg_gen_st_i64(tmplo, cpu_env, fp_reg_offset(s, destidx, MO_64));
+ tcg_gen_st_i64(tmphi, cpu_env, fp_reg_hi_offset(s, destidx));
+
+ tcg_temp_free_i64(tmplo);
+ tcg_temp_free_i64(tmphi);
+}
+
+/*
+ * Vector load/store helpers.
+ *
+ * The principal difference between this and a FP load is that we don't
+ * zero extend as we are filling a partial chunk of the vector register.
+ * These functions don't support 128 bit loads/stores, which would be
+ * normal load/store operations.
+ *
+ * The _i32 versions are useful when operating on 32 bit quantities
+ * (eg for floating point single or using Neon helper functions).
+ */
+
+/* Get value of an element within a vector register */
+static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx,
+ int element, TCGMemOp memop)
+{
+ int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_ld16u_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_32:
+ tcg_gen_ld32u_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_8|MO_SIGN:
+ tcg_gen_ld8s_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16|MO_SIGN:
+ tcg_gen_ld16s_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_32|MO_SIGN:
+ tcg_gen_ld32s_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_64:
+ case MO_64|MO_SIGN:
+ tcg_gen_ld_i64(tcg_dest, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void read_vec_element_i32(DisasContext *s, TCGv_i32 tcg_dest, int srcidx,
+ int element, TCGMemOp memop)
+{
+ int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_ld8u_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_ld16u_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_8|MO_SIGN:
+ tcg_gen_ld8s_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_16|MO_SIGN:
+ tcg_gen_ld16s_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ case MO_32:
+ case MO_32|MO_SIGN:
+ tcg_gen_ld_i32(tcg_dest, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Set value of an element within a vector register */
+static void write_vec_element(DisasContext *s, TCGv_i64 tcg_src, int destidx,
+ int element, TCGMemOp memop)
+{
+ int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_st8_i64(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_st16_i64(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_32:
+ tcg_gen_st32_i64(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_64:
+ tcg_gen_st_i64(tcg_src, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void write_vec_element_i32(DisasContext *s, TCGv_i32 tcg_src,
+ int destidx, int element, TCGMemOp memop)
+{
+ int vect_off = vec_reg_offset(s, destidx, element, memop & MO_SIZE);
+ switch (memop) {
+ case MO_8:
+ tcg_gen_st8_i32(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_16:
+ tcg_gen_st16_i32(tcg_src, cpu_env, vect_off);
+ break;
+ case MO_32:
+ tcg_gen_st_i32(tcg_src, cpu_env, vect_off);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Clear the high 64 bits of a 128 bit vector (in general non-quad
+ * vector ops all need to do this).
+ */
+static void clear_vec_high(DisasContext *s, int rd)
+{
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+
+ write_vec_element(s, tcg_zero, rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_zero);
+}
+
+/* Store from vector register to memory */
+static void do_vec_st(DisasContext *s, int srcidx, int element,
+ TCGv_i64 tcg_addr, int size)
+{
+ TCGMemOp memop = s->be_data + size;
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_tmp, srcidx, element, size);
+ tcg_gen_qemu_st_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* Load from memory to vector register */
+static void do_vec_ld(DisasContext *s, int destidx, int element,
+ TCGv_i64 tcg_addr, int size)
+{
+ TCGMemOp memop = s->be_data + size;
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr, get_mem_index(s), memop);
+ write_vec_element(s, tcg_tmp, destidx, element, size);
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* Check that FP/Neon access is enabled. If it is, return
+ * true. If not, emit code to generate an appropriate exception,
+ * and return false; the caller should not emit any code for
+ * the instruction. Note that this check must happen after all
+ * unallocated-encoding checks (otherwise the syndrome information
+ * for the resulting exception will be incorrect).
+ */
+static inline bool fp_access_check(DisasContext *s)
+{
+ assert(!s->fp_access_checked);
+ s->fp_access_checked = true;
+
+ if (!s->fp_excp_el) {
+ return true;
+ }
+
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
+ s->fp_excp_el);
+ return false;
+}
+
+/*
+ * This utility function is for doing register extension with an
+ * optional shift. You will likely want to pass a temporary for the
+ * destination register. See DecodeRegExtend() in the ARM ARM.
+ */
+static void ext_and_shift_reg(TCGv_i64 tcg_out, TCGv_i64 tcg_in,
+ int option, unsigned int shift)
+{
+ int extsize = extract32(option, 0, 2);
+ bool is_signed = extract32(option, 2, 1);
+
+ if (is_signed) {
+ switch (extsize) {
+ case 0:
+ tcg_gen_ext8s_i64(tcg_out, tcg_in);
+ break;
+ case 1:
+ tcg_gen_ext16s_i64(tcg_out, tcg_in);
+ break;
+ case 2:
+ tcg_gen_ext32s_i64(tcg_out, tcg_in);
+ break;
+ case 3:
+ tcg_gen_mov_i64(tcg_out, tcg_in);
+ break;
+ }
+ } else {
+ switch (extsize) {
+ case 0:
+ tcg_gen_ext8u_i64(tcg_out, tcg_in);
+ break;
+ case 1:
+ tcg_gen_ext16u_i64(tcg_out, tcg_in);
+ break;
+ case 2:
+ tcg_gen_ext32u_i64(tcg_out, tcg_in);
+ break;
+ case 3:
+ tcg_gen_mov_i64(tcg_out, tcg_in);
+ break;
+ }
+ }
+
+ if (shift) {
+ tcg_gen_shli_i64(tcg_out, tcg_out, shift);
+ }
+}
+
+static inline void gen_check_sp_alignment(DisasContext *s)
+{
+ /* The AArch64 architecture mandates that (if enabled via PSTATE
+ * or SCTLR bits) there is a check that SP is 16-aligned on every
+ * SP-relative load or store (with an exception generated if it is not).
+ * In line with general QEMU practice regarding misaligned accesses,
+ * we omit these checks for the sake of guest program performance.
+ * This function is provided as a hook so we can more easily add these
+ * checks in future (possibly as a "favour catching guest program bugs
+ * over speed" user selectable option).
+ */
+}
+
+/*
+ * This provides a simple table based table lookup decoder. It is
+ * intended to be used when the relevant bits for decode are too
+ * awkwardly placed and switch/if based logic would be confusing and
+ * deeply nested. Since it's a linear search through the table, tables
+ * should be kept small.
+ *
+ * It returns the first handler where insn & mask == pattern, or
+ * NULL if there is no match.
+ * The table is terminated by an empty mask (i.e. 0)
+ */
+static inline AArch64DecodeFn *lookup_disas_fn(const AArch64DecodeTable *table,
+ uint32_t insn)
+{
+ const AArch64DecodeTable *tptr = table;
+
+ while (tptr->mask) {
+ if ((insn & tptr->mask) == tptr->pattern) {
+ return tptr->disas_fn;
+ }
+ tptr++;
+ }
+ return NULL;
+}
+
+/*
+ * the instruction disassembly implemented here matches
+ * the instruction encoding classifications in chapter 3 (C3)
+ * of the ARM Architecture Reference Manual (DDI0487A_a)
+ */
+
+/* C3.2.7 Unconditional branch (immediate)
+ * 31 30 26 25 0
+ * +----+-----------+-------------------------------------+
+ * | op | 0 0 1 0 1 | imm26 |
+ * +----+-----------+-------------------------------------+
+ */
+static void disas_uncond_b_imm(DisasContext *s, uint32_t insn)
+{
+ uint64_t addr = s->pc + sextract32(insn, 0, 26) * 4 - 4;
+
+ if (insn & (1U << 31)) {
+ /* C5.6.26 BL Branch with link */
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ }
+
+ /* C5.6.20 B Branch / C5.6.26 BL Branch with link */
+ gen_goto_tb(s, 0, addr);
+}
+
+/* C3.2.1 Compare & branch (immediate)
+ * 31 30 25 24 23 5 4 0
+ * +----+-------------+----+---------------------+--------+
+ * | sf | 0 1 1 0 1 0 | op | imm19 | Rt |
+ * +----+-------------+----+---------------------+--------+
+ */
+static void disas_comp_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, rt;
+ uint64_t addr;
+ TCGLabel *label_match;
+ TCGv_i64 tcg_cmp;
+
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 24, 1); /* 0: CBZ; 1: CBNZ */
+ rt = extract32(insn, 0, 5);
+ addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+
+ tcg_cmp = read_cpu_reg(s, rt, sf);
+ label_match = gen_new_label();
+
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_cmp, 0, label_match);
+
+ gen_goto_tb(s, 0, s->pc);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+}
+
+/* C3.2.5 Test & branch (immediate)
+ * 31 30 25 24 23 19 18 5 4 0
+ * +----+-------------+----+-------+-------------+------+
+ * | b5 | 0 1 1 0 1 1 | op | b40 | imm14 | Rt |
+ * +----+-------------+----+-------+-------------+------+
+ */
+static void disas_test_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int bit_pos, op, rt;
+ uint64_t addr;
+ TCGLabel *label_match;
+ TCGv_i64 tcg_cmp;
+
+ bit_pos = (extract32(insn, 31, 1) << 5) | extract32(insn, 19, 5);
+ op = extract32(insn, 24, 1); /* 0: TBZ; 1: TBNZ */
+ addr = s->pc + sextract32(insn, 5, 14) * 4 - 4;
+ rt = extract32(insn, 0, 5);
+
+ tcg_cmp = tcg_temp_new_i64();
+ tcg_gen_andi_i64(tcg_cmp, cpu_reg(s, rt), (1ULL << bit_pos));
+ label_match = gen_new_label();
+ tcg_gen_brcondi_i64(op ? TCG_COND_NE : TCG_COND_EQ,
+ tcg_cmp, 0, label_match);
+ tcg_temp_free_i64(tcg_cmp);
+ gen_goto_tb(s, 0, s->pc);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+}
+
+/* C3.2.2 / C5.6.19 Conditional branch (immediate)
+ * 31 25 24 23 5 4 3 0
+ * +---------------+----+---------------------+----+------+
+ * | 0 1 0 1 0 1 0 | o1 | imm19 | o0 | cond |
+ * +---------------+----+---------------------+----+------+
+ */
+static void disas_cond_b_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int cond;
+ uint64_t addr;
+
+ if ((insn & (1 << 4)) || (insn & (1 << 24))) {
+ unallocated_encoding(s);
+ return;
+ }
+ addr = s->pc + sextract32(insn, 5, 19) * 4 - 4;
+ cond = extract32(insn, 0, 4);
+
+ if (cond < 0x0e) {
+ /* genuinely conditional branches */
+ TCGLabel *label_match = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ gen_goto_tb(s, 0, s->pc);
+ gen_set_label(label_match);
+ gen_goto_tb(s, 1, addr);
+ } else {
+ /* 0xe and 0xf are both "always" conditions */
+ gen_goto_tb(s, 0, addr);
+ }
+}
+
+/* C5.6.68 HINT */
+static void handle_hint(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ unsigned int selector = crm << 3 | op2;
+
+ if (op1 != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (selector) {
+ case 0: /* NOP */
+ return;
+ case 3: /* WFI */
+ s->is_jmp = DISAS_WFI;
+ return;
+ case 1: /* YIELD */
+ s->is_jmp = DISAS_YIELD;
+ return;
+ case 2: /* WFE */
+ s->is_jmp = DISAS_WFE;
+ return;
+ case 4: /* SEV */
+ case 5: /* SEVL */
+ /* we treat all as NOP at least for now */
+ return;
+ default:
+ /* default specified as NOP equivalent */
+ return;
+ }
+}
+
+static void gen_clrex(DisasContext *s, uint32_t insn)
+{
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+/* CLREX, DSB, DMB, ISB */
+static void handle_sync(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ TCGBar bar;
+
+ if (op1 != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (op2) {
+ case 2: /* CLREX */
+ gen_clrex(s, insn);
+ return;
+ case 4: /* DSB */
+ case 5: /* DMB */
+ switch (crm & 3) {
+ case 1: /* MBReqTypes_Reads */
+ bar = TCG_BAR_SC | TCG_MO_LD_LD | TCG_MO_LD_ST;
+ break;
+ case 2: /* MBReqTypes_Writes */
+ bar = TCG_BAR_SC | TCG_MO_ST_ST;
+ break;
+ default: /* MBReqTypes_All */
+ bar = TCG_BAR_SC | TCG_MO_ALL;
+ break;
+ }
+ tcg_gen_mb(bar);
+ return;
+ case 6: /* ISB */
+ /* We need to break the TB after this insn to execute
+ * a self-modified code correctly and also to take
+ * any pending interrupts immediately.
+ */
+ s->is_jmp = DISAS_UPDATE;
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+/* C5.6.130 MSR (immediate) - move immediate to processor state field */
+static void handle_msr_i(DisasContext *s, uint32_t insn,
+ unsigned int op1, unsigned int op2, unsigned int crm)
+{
+ int op = op1 << 3 | op2;
+ switch (op) {
+ case 0x05: /* SPSel */
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x1e: /* DAIFSet */
+ case 0x1f: /* DAIFClear */
+ {
+ TCGv_i32 tcg_imm = tcg_const_i32(crm);
+ TCGv_i32 tcg_op = tcg_const_i32(op);
+ gen_a64_set_pc_im(s->pc - 4);
+ gen_helper_msr_i_pstate(cpu_env, tcg_op, tcg_imm);
+ tcg_temp_free_i32(tcg_imm);
+ tcg_temp_free_i32(tcg_op);
+ s->is_jmp = DISAS_UPDATE;
+ break;
+ }
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+static void gen_get_nzcv(TCGv_i64 tcg_rt)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGv_i32 nzcv = tcg_temp_new_i32();
+
+ /* build bit 31, N */
+ tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
+ /* build bit 30, Z */
+ tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
+ tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
+ /* build bit 29, C */
+ tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
+ /* build bit 28, V */
+ tcg_gen_shri_i32(tmp, cpu_VF, 31);
+ tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
+ /* generate result */
+ tcg_gen_extu_i32_i64(tcg_rt, nzcv);
+
+ tcg_temp_free_i32(nzcv);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_set_nzcv(TCGv_i64 tcg_rt)
+
+{
+ TCGv_i32 nzcv = tcg_temp_new_i32();
+
+ /* take NZCV from R[t] */
+ tcg_gen_extrl_i64_i32(nzcv, tcg_rt);
+
+ /* bit 31, N */
+ tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
+ /* bit 30, Z */
+ tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
+ /* bit 29, C */
+ tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
+ tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
+ /* bit 28, V */
+ tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
+ tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
+ tcg_temp_free_i32(nzcv);
+}
+
+/* C5.6.129 MRS - move from system register
+ * C5.6.131 MSR (register) - move to system register
+ * C5.6.204 SYS
+ * C5.6.205 SYSL
+ * These are all essentially the same insn in 'read' and 'write'
+ * versions, with varying op0 fields.
+ */
+static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
+ unsigned int op0, unsigned int op1, unsigned int op2,
+ unsigned int crn, unsigned int crm, unsigned int rt)
+{
+ const ARMCPRegInfo *ri;
+ TCGv_i64 tcg_rt;
+
+ ri = get_arm_cp_reginfo(s->cp_regs,
+ ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP,
+ crn, crm, op0, op1, op2));
+
+ if (!ri) {
+ /* Unknown register; this might be a guest error or a QEMU
+ * unimplemented feature.
+ */
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
+ "system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
+ isread ? "read" : "write", op0, op1, crn, crm, op2);
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Check access permissions */
+ if (!cp_access_ok(s->current_el, ri, isread)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (ri->accessfn) {
+ /* Emit code to perform further access permissions checks at
+ * runtime; this may result in an exception.
+ */
+ TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn, tcg_isread;
+ uint32_t syndrome;
+
+ gen_a64_set_pc_im(s->pc - 4);
+ tmpptr = tcg_const_ptr(ri);
+ syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
+ tcg_syn = tcg_const_i32(syndrome);
+ tcg_isread = tcg_const_i32(isread);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn, tcg_isread);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_isread);
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return;
+ case ARM_CP_NZCV:
+ tcg_rt = cpu_reg(s, rt);
+ if (isread) {
+ gen_get_nzcv(tcg_rt);
+ } else {
+ gen_set_nzcv(tcg_rt);
+ }
+ return;
+ case ARM_CP_CURRENTEL:
+ /* Reads as current EL value from pstate, which is
+ * guaranteed to be constant by the tb flags.
+ */
+ tcg_rt = cpu_reg(s, rt);
+ tcg_gen_movi_i64(tcg_rt, s->current_el << 2);
+ return;
+ case ARM_CP_DC_ZVA:
+ /* Writes clear the aligned block of memory which rt points into. */
+ tcg_rt = cpu_reg(s, rt);
+ gen_helper_dc_zva(cpu_env, tcg_rt);
+ return;
+ default:
+ break;
+ }
+
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ gen_io_start();
+ }
+
+ tcg_rt = cpu_reg(s, rt);
+
+ if (isread) {
+ if (ri->type & ARM_CP_CONST) {
+ tcg_gen_movi_i64(tcg_rt, ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tcg_rt, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_ld_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ }
+ } else {
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return;
+ } else if (ri->writefn) {
+ TCGv_ptr tmpptr;
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tcg_rt);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tcg_rt, cpu_env, ri->fieldoffset);
+ }
+ }
+
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ /* I/O operations must end the TB here (whether read or write) */
+ gen_io_end();
+ s->is_jmp = DISAS_UPDATE;
+ } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ /* We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ s->is_jmp = DISAS_UPDATE;
+ }
+}
+
+/* C3.2.4 System
+ * 31 22 21 20 19 18 16 15 12 11 8 7 5 4 0
+ * +---------------------+---+-----+-----+-------+-------+-----+------+
+ * | 1 1 0 1 0 1 0 1 0 0 | L | op0 | op1 | CRn | CRm | op2 | Rt |
+ * +---------------------+---+-----+-----+-------+-------+-----+------+
+ */
+static void disas_system(DisasContext *s, uint32_t insn)
+{
+ unsigned int l, op0, op1, crn, crm, op2, rt;
+ l = extract32(insn, 21, 1);
+ op0 = extract32(insn, 19, 2);
+ op1 = extract32(insn, 16, 3);
+ crn = extract32(insn, 12, 4);
+ crm = extract32(insn, 8, 4);
+ op2 = extract32(insn, 5, 3);
+ rt = extract32(insn, 0, 5);
+
+ if (op0 == 0) {
+ if (l || rt != 31) {
+ unallocated_encoding(s);
+ return;
+ }
+ switch (crn) {
+ case 2: /* C5.6.68 HINT */
+ handle_hint(s, insn, op1, op2, crm);
+ break;
+ case 3: /* CLREX, DSB, DMB, ISB */
+ handle_sync(s, insn, op1, op2, crm);
+ break;
+ case 4: /* C5.6.130 MSR (immediate) */
+ handle_msr_i(s, insn, op1, op2, crm);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ return;
+ }
+ handle_sys(s, insn, l, op0, op1, op2, crn, crm, rt);
+}
+
+/* C3.2.3 Exception generation
+ *
+ * 31 24 23 21 20 5 4 2 1 0
+ * +-----------------+-----+------------------------+-----+----+
+ * | 1 1 0 1 0 1 0 0 | opc | imm16 | op2 | LL |
+ * +-----------------------+------------------------+----------+
+ */
+static void disas_exc(DisasContext *s, uint32_t insn)
+{
+ int opc = extract32(insn, 21, 3);
+ int op2_ll = extract32(insn, 0, 5);
+ int imm16 = extract32(insn, 5, 16);
+ TCGv_i32 tmp;
+
+ switch (opc) {
+ case 0:
+ /* For SVC, HVC and SMC we advance the single-step state
+ * machine before taking the exception. This is architecturally
+ * mandated, to ensure that single-stepping a system call
+ * instruction works properly.
+ */
+ switch (op2_ll) {
+ case 1: /* SVC */
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_SWI, syn_aa64_svc(imm16),
+ default_exception_el(s));
+ break;
+ case 2: /* HVC */
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration.
+ */
+ gen_a64_set_pc_im(s->pc - 4);
+ gen_helper_pre_hvc(cpu_env);
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_HVC, syn_aa64_hvc(imm16), 2);
+ break;
+ case 3: /* SMC */
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ gen_a64_set_pc_im(s->pc - 4);
+ tmp = tcg_const_i32(syn_aa64_smc(imm16));
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_ss_advance(s);
+ gen_exception_insn(s, 0, EXCP_SMC, syn_aa64_smc(imm16), 3);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ break;
+ case 1:
+ if (op2_ll != 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* BRK */
+ gen_exception_insn(s, 4, EXCP_BKPT, syn_aa64_bkpt(imm16),
+ default_exception_el(s));
+ break;
+ case 2:
+ if (op2_ll != 0) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* HLT. This has two purposes.
+ * Architecturally, it is an external halting debug instruction.
+ * Since QEMU doesn't implement external debug, we treat this as
+ * it is required for halting debug disabled: it will UNDEF.
+ * Secondly, "HLT 0xf000" is the A64 semihosting syscall instruction.
+ */
+ if (semihosting_enabled() && imm16 == 0xf000) {
+#ifndef CONFIG_USER_ONLY
+ /* In system mode, don't allow userspace access to semihosting,
+ * to provide some semblance of security (and for consistency
+ * with our 32-bit semihosting).
+ */
+ if (s->current_el == 0) {
+ unsupported_encoding(s, insn);
+ break;
+ }
+#endif
+ gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ } else {
+ unsupported_encoding(s, insn);
+ }
+ break;
+ case 5:
+ if (op2_ll < 1 || op2_ll > 3) {
+ unallocated_encoding(s);
+ break;
+ }
+ /* DCPS1, DCPS2, DCPS3 */
+ unsupported_encoding(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.2.7 Unconditional branch (register)
+ * 31 25 24 21 20 16 15 10 9 5 4 0
+ * +---------------+-------+-------+-------+------+-------+
+ * | 1 1 0 1 0 1 1 | opc | op2 | op3 | Rn | op4 |
+ * +---------------+-------+-------+-------+------+-------+
+ */
+static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
+{
+ unsigned int opc, op2, op3, rn, op4;
+
+ opc = extract32(insn, 21, 4);
+ op2 = extract32(insn, 16, 5);
+ op3 = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ op4 = extract32(insn, 0, 5);
+
+ if (op4 != 0x0 || op3 != 0x0 || op2 != 0x1f) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opc) {
+ case 0: /* BR */
+ case 1: /* BLR */
+ case 2: /* RET */
+ gen_a64_set_pc(s, cpu_reg(s, rn));
+ /* BLR also needs to load return address */
+ if (opc == 1) {
+ tcg_gen_movi_i64(cpu_reg(s, 30), s->pc);
+ }
+ break;
+ case 4: /* ERET */
+ if (s->current_el == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ gen_helper_exception_return(cpu_env);
+ s->is_jmp = DISAS_JUMP;
+ return;
+ case 5: /* DRPS */
+ if (rn != 0x1f) {
+ unallocated_encoding(s);
+ } else {
+ unsupported_encoding(s, insn);
+ }
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ s->is_jmp = DISAS_JUMP;
+}
+
+/* C3.2 Branches, exception generating and system instructions */
+static void disas_b_exc_sys(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 25, 7)) {
+ case 0x0a: case 0x0b:
+ case 0x4a: case 0x4b: /* Unconditional branch (immediate) */
+ disas_uncond_b_imm(s, insn);
+ break;
+ case 0x1a: case 0x5a: /* Compare & branch (immediate) */
+ disas_comp_b_imm(s, insn);
+ break;
+ case 0x1b: case 0x5b: /* Test & branch (immediate) */
+ disas_test_b_imm(s, insn);
+ break;
+ case 0x2a: /* Conditional branch (immediate) */
+ disas_cond_b_imm(s, insn);
+ break;
+ case 0x6a: /* Exception generation / System */
+ if (insn & (1 << 24)) {
+ disas_system(s, insn);
+ } else {
+ disas_exc(s, insn);
+ }
+ break;
+ case 0x6b: /* Unconditional branch (register) */
+ disas_uncond_b_reg(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/*
+ * Load/Store exclusive instructions are implemented by remembering
+ * the value/address loaded, and seeing if these are the same
+ * when the store is performed. This is not actually the architecturally
+ * mandated semantics, but it works for typical guest code sequences
+ * and avoids having to monitor regular stores.
+ *
+ * The store exclusive uses the atomic cmpxchg primitives to avoid
+ * races in multi-threaded linux-user and when MTTCG softmmu is
+ * enabled.
+ */
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv_i64 addr, int size, bool is_pair)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ TCGMemOp memop = s->be_data + size;
+
+ g_assert(size <= 3);
+ tcg_gen_qemu_ld_i64(tmp, addr, get_mem_index(s), memop);
+
+ if (is_pair) {
+ TCGv_i64 addr2 = tcg_temp_new_i64();
+ TCGv_i64 hitmp = tcg_temp_new_i64();
+
+ g_assert(size >= 2);
+ tcg_gen_addi_i64(addr2, addr, 1 << size);
+ tcg_gen_qemu_ld_i64(hitmp, addr2, get_mem_index(s), memop);
+ tcg_temp_free_i64(addr2);
+ tcg_gen_mov_i64(cpu_exclusive_high, hitmp);
+ tcg_gen_mov_i64(cpu_reg(s, rt2), hitmp);
+ tcg_temp_free_i64(hitmp);
+ }
+
+ tcg_gen_mov_i64(cpu_exclusive_val, tmp);
+ tcg_gen_mov_i64(cpu_reg(s, rt), tmp);
+
+ tcg_temp_free_i64(tmp);
+ tcg_gen_mov_i64(cpu_exclusive_addr, addr);
+}
+
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i64 inaddr, int size, int is_pair)
+{
+ /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]
+ * && (!is_pair || env->exclusive_high == [addr + datasize])) {
+ * [addr] = {Rt};
+ * if (is_pair) {
+ * [addr + datasize] = {Rt2};
+ * }
+ * {Rd} = 0;
+ * } else {
+ * {Rd} = 1;
+ * }
+ * env->exclusive_addr = -1;
+ */
+ TCGLabel *fail_label = gen_new_label();
+ TCGLabel *done_label = gen_new_label();
+ TCGv_i64 addr = tcg_temp_local_new_i64();
+ TCGv_i64 tmp;
+
+ /* Copy input into a local temp so it is not trashed when the
+ * basic block ends at the branch insn.
+ */
+ tcg_gen_mov_i64(addr, inaddr);
+ tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+
+ tmp = tcg_temp_new_i64();
+ if (is_pair) {
+ if (size == 2) {
+ TCGv_i64 val = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(tmp, cpu_reg(s, rt), cpu_reg(s, rt2));
+ tcg_gen_concat32_i64(val, cpu_exclusive_val, cpu_exclusive_high);
+ tcg_gen_atomic_cmpxchg_i64(tmp, addr, val, tmp,
+ get_mem_index(s),
+ size | MO_ALIGN | s->be_data);
+ tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, val);
+ tcg_temp_free_i64(val);
+ } else if (s->be_data == MO_LE) {
+ gen_helper_paired_cmpxchg64_le(tmp, cpu_env, addr, cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ } else {
+ gen_helper_paired_cmpxchg64_be(tmp, cpu_env, addr, cpu_reg(s, rt),
+ cpu_reg(s, rt2));
+ }
+ } else {
+ TCGv_i64 val = cpu_reg(s, rt);
+ tcg_gen_atomic_cmpxchg_i64(tmp, addr, cpu_exclusive_val, val,
+ get_mem_index(s),
+ size | MO_ALIGN | s->be_data);
+ tcg_gen_setcond_i64(TCG_COND_NE, tmp, tmp, cpu_exclusive_val);
+ }
+
+ tcg_temp_free_i64(addr);
+
+ tcg_gen_mov_i64(cpu_reg(s, rd), tmp);
+ tcg_temp_free_i64(tmp);
+ tcg_gen_br(done_label);
+
+ gen_set_label(fail_label);
+ tcg_gen_movi_i64(cpu_reg(s, rd), 1);
+ gen_set_label(done_label);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+/* Update the Sixty-Four bit (SF) registersize. This logic is derived
+ * from the ARMv8 specs for LDR (Shared decode for all encodings).
+ */
+static bool disas_ldst_compute_iss_sf(int size, bool is_signed, int opc)
+{
+ int opc0 = extract32(opc, 0, 1);
+ int regsize;
+
+ if (is_signed) {
+ regsize = opc0 ? 32 : 64;
+ } else {
+ regsize = size == 3 ? 64 : 32;
+ }
+ return regsize == 64;
+}
+
+/* C3.3.6 Load/store exclusive
+ *
+ * 31 30 29 24 23 22 21 20 16 15 14 10 9 5 4 0
+ * +-----+-------------+----+---+----+------+----+-------+------+------+
+ * | sz | 0 0 1 0 0 0 | o2 | L | o1 | Rs | o0 | Rt2 | Rn | Rt |
+ * +-----+-------------+----+---+----+------+----+-------+------+------+
+ *
+ * sz: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64 bit
+ * L: 0 -> store, 1 -> load
+ * o2: 0 -> exclusive, 1 -> not
+ * o1: 0 -> single register, 1 -> register pair
+ * o0: 1 -> load-acquire/store-release, 0 -> not
+ */
+static void disas_ldst_excl(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rt2 = extract32(insn, 10, 5);
+ int is_lasr = extract32(insn, 15, 1);
+ int rs = extract32(insn, 16, 5);
+ int is_pair = extract32(insn, 21, 1);
+ int is_store = !extract32(insn, 22, 1);
+ int is_excl = !extract32(insn, 23, 1);
+ int size = extract32(insn, 30, 2);
+ TCGv_i64 tcg_addr;
+
+ if ((!is_excl && !is_pair && !is_lasr) ||
+ (!is_excl && is_pair) ||
+ (is_pair && size < 2)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ /* Note that since TCG is single threaded load-acquire/store-release
+ * semantics require no extra if (is_lasr) { ... } handling.
+ */
+
+ if (is_excl) {
+ if (!is_store) {
+ s->is_ldex = true;
+ gen_load_exclusive(s, rt, rt2, tcg_addr, size, is_pair);
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+ } else {
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+ gen_store_exclusive(s, rs, rt, rt2, tcg_addr, size, is_pair);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, false, 0);
+
+ /* Generate ISS for non-exclusive accesses including LASR. */
+ if (is_store) {
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
+ }
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ true, rt, iss_sf, is_lasr);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, false, false,
+ true, rt, iss_sf, is_lasr);
+ if (is_lasr) {
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ }
+ }
+ }
+}
+
+/*
+ * C3.3.5 Load register (literal)
+ *
+ * 31 30 29 27 26 25 24 23 5 4 0
+ * +-----+-------+---+-----+-------------------+-------+
+ * | opc | 0 1 1 | V | 0 0 | imm19 | Rt |
+ * +-----+-------+---+-----+-------------------+-------+
+ *
+ * V: 1 -> vector (simd/fp)
+ * opc (non-vector): 00 -> 32 bit, 01 -> 64 bit,
+ * 10-> 32 bit signed, 11 -> prefetch
+ * opc (vector): 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit (11 unallocated)
+ */
+static void disas_ld_lit(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int64_t imm = sextract32(insn, 5, 19) << 2;
+ bool is_vector = extract32(insn, 26, 1);
+ int opc = extract32(insn, 30, 2);
+ bool is_signed = false;
+ int size = 2;
+ TCGv_i64 tcg_rt, tcg_addr;
+
+ if (is_vector) {
+ if (opc == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ size = 2 + opc;
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (opc == 3) {
+ /* PRFM (literal) : prefetch */
+ return;
+ }
+ size = 2 + extract32(opc, 0, 1);
+ is_signed = extract32(opc, 1, 1);
+ }
+
+ tcg_rt = cpu_reg(s, rt);
+
+ tcg_addr = tcg_const_i64((s->pc - 4) + imm);
+ if (is_vector) {
+ do_fp_ld(s, rt, tcg_addr, size);
+ } else {
+ /* Only unsigned 32bit loads target 32bit registers. */
+ bool iss_sf = opc != 0;
+
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
+ true, rt, iss_sf, false);
+ }
+ tcg_temp_free_i64(tcg_addr);
+}
+
+/*
+ * C5.6.80 LDNP (Load Pair - non-temporal hint)
+ * C5.6.81 LDP (Load Pair - non vector)
+ * C5.6.82 LDPSW (Load Pair Signed Word - non vector)
+ * C5.6.176 STNP (Store Pair - non-temporal hint)
+ * C5.6.177 STP (Store Pair - non vector)
+ * C6.3.165 LDNP (Load Pair of SIMD&FP - non-temporal hint)
+ * C6.3.165 LDP (Load Pair of SIMD&FP)
+ * C6.3.284 STNP (Store Pair of SIMD&FP - non-temporal hint)
+ * C6.3.284 STP (Store Pair of SIMD&FP)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 15 14 10 9 5 4 0
+ * +-----+-------+---+---+-------+---+-----------------------------+
+ * | opc | 1 0 1 | V | 0 | index | L | imm7 | Rt2 | Rn | Rt |
+ * +-----+-------+---+---+-------+---+-------+-------+------+------+
+ *
+ * opc: LDP/STP/LDNP/STNP 00 -> 32 bit, 10 -> 64 bit
+ * LDPSW 01
+ * LDP/STP/LDNP/STNP (SIMD) 00 -> 32 bit, 01 -> 64 bit, 10 -> 128 bit
+ * V: 0 -> GPR, 1 -> Vector
+ * idx: 00 -> signed offset with non-temporal hint, 01 -> post-index,
+ * 10 -> signed offset, 11 -> pre-index
+ * L: 0 -> Store 1 -> Load
+ *
+ * Rt, Rt2 = GPR or SIMD registers to be stored
+ * Rn = general purpose register containing address
+ * imm7 = signed offset (multiple of 4 or 8 depending on size)
+ */
+static void disas_ldst_pair(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rt2 = extract32(insn, 10, 5);
+ uint64_t offset = sextract64(insn, 15, 7);
+ int index = extract32(insn, 23, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ bool is_load = extract32(insn, 22, 1);
+ int opc = extract32(insn, 30, 2);
+
+ bool is_signed = false;
+ bool postindex = false;
+ bool wback = false;
+
+ TCGv_i64 tcg_addr; /* calculated address */
+ int size;
+
+ if (opc == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_vector) {
+ size = 2 + opc;
+ } else {
+ size = 2 + extract32(opc, 1, 1);
+ is_signed = extract32(opc, 0, 1);
+ if (!is_load && is_signed) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ switch (index) {
+ case 1: /* post-index */
+ postindex = true;
+ wback = true;
+ break;
+ case 0:
+ /* signed offset with "non-temporal" hint. Since we don't emulate
+ * caches we don't care about hints to the cache system about
+ * data access patterns, and handle this identically to plain
+ * signed offset.
+ */
+ if (is_signed) {
+ /* There is no non-temporal-hint version of LDPSW */
+ unallocated_encoding(s);
+ return;
+ }
+ postindex = false;
+ break;
+ case 2: /* signed offset, rn not updated */
+ postindex = false;
+ break;
+ case 3: /* pre-index */
+ postindex = false;
+ wback = true;
+ break;
+ }
+
+ if (is_vector && !fp_access_check(s)) {
+ return;
+ }
+
+ offset <<= size;
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ if (!postindex) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
+ }
+
+ if (is_vector) {
+ if (is_load) {
+ do_fp_ld(s, rt, tcg_addr, size);
+ } else {
+ do_fp_st(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ if (is_load) {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, false,
+ false, 0, false, false);
+ } else {
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ false, 0, false, false);
+ }
+ }
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, 1 << size);
+ if (is_vector) {
+ if (is_load) {
+ do_fp_ld(s, rt2, tcg_addr, size);
+ } else {
+ do_fp_st(s, rt2, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt2 = cpu_reg(s, rt2);
+ if (is_load) {
+ do_gpr_ld(s, tcg_rt2, tcg_addr, size, is_signed, false,
+ false, 0, false, false);
+ } else {
+ do_gpr_st(s, tcg_rt2, tcg_addr, size,
+ false, 0, false, false);
+ }
+ }
+
+ if (wback) {
+ if (postindex) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, offset - (1 << size));
+ } else {
+ tcg_gen_subi_i64(tcg_addr, tcg_addr, 1 << size);
+ }
+ tcg_gen_mov_i64(cpu_reg_sp(s, rn), tcg_addr);
+ }
+}
+
+/*
+ * C3.3.8 Load/store (immediate post-indexed)
+ * C3.3.9 Load/store (immediate pre-indexed)
+ * C3.3.12 Load/store (unscaled immediate)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 20 12 11 10 9 5 4 0
+ * +----+-------+---+-----+-----+---+--------+-----+------+------+
+ * |size| 1 1 1 | V | 0 0 | opc | 0 | imm9 | idx | Rn | Rt |
+ * +----+-------+---+-----+-----+---+--------+-----+------+------+
+ *
+ * idx = 01 -> post-indexed, 11 pre-indexed, 00 unscaled imm. (no writeback)
+ 10 -> unprivileged
+ * V = 0 -> non-vector
+ * size: 00 -> 8 bit, 01 -> 16 bit, 10 -> 32 bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ */
+static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ int imm9 = sextract32(insn, 12, 9);
+ int idx = extract32(insn, 10, 2);
+ bool is_signed = false;
+ bool is_store = false;
+ bool is_extended = false;
+ bool is_unpriv = (idx == 2);
+ bool iss_valid = !is_vector;
+ bool post_index;
+ bool writeback;
+
+ TCGv_i64 tcg_addr;
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4 || is_unpriv) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = ((opc & 1) == 0);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ if (is_unpriv) {
+ unallocated_encoding(s);
+ return;
+ }
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ switch (idx) {
+ case 0:
+ case 2:
+ post_index = false;
+ writeback = false;
+ break;
+ case 1:
+ post_index = true;
+ writeback = true;
+ break;
+ case 3:
+ post_index = false;
+ writeback = true;
+ break;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ if (!post_index) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
+ }
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, tcg_addr, size);
+ } else {
+ do_fp_ld(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ int memidx = is_unpriv ? get_a64_user_mem_index(s) : get_mem_index(s);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+
+ if (is_store) {
+ do_gpr_st_memidx(s, tcg_rt, tcg_addr, size, memidx,
+ iss_valid, rt, iss_sf, false);
+ } else {
+ do_gpr_ld_memidx(s, tcg_rt, tcg_addr, size,
+ is_signed, is_extended, memidx,
+ iss_valid, rt, iss_sf, false);
+ }
+ }
+
+ if (writeback) {
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
+ if (post_index) {
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, imm9);
+ }
+ tcg_gen_mov_i64(tcg_rn, tcg_addr);
+ }
+}
+
+/*
+ * C3.3.10 Load/store (register offset)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
+ * |size| 1 1 1 | V | 0 0 | opc | 1 | Rm | opt | S| 1 0 | Rn | Rt |
+ * +----+-------+---+-----+-----+---+------+-----+--+-----+----+----+
+ *
+ * For non-vector:
+ * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ * For vector:
+ * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
+ * opc<0>: 0 -> store, 1 -> load
+ * V: 1 -> vector/simd
+ * opt: extend encoding (see DecodeRegExtend)
+ * S: if S=1 then scale (essentially index by sizeof(size))
+ * Rt: register to transfer into/out of
+ * Rn: address register or SP for base
+ * Rm: offset register or ZR for offset
+ */
+static void disas_ldst_reg_roffset(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ int shift = extract32(insn, 12, 1);
+ int rm = extract32(insn, 16, 5);
+ int opt = extract32(insn, 13, 3);
+ bool is_signed = false;
+ bool is_store = false;
+ bool is_extended = false;
+
+ TCGv_i64 tcg_rm;
+ TCGv_i64 tcg_addr;
+
+ if (extract32(opt, 1, 1) == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+
+ tcg_rm = read_cpu_reg(s, rm, 1);
+ ext_and_shift_reg(tcg_rm, tcg_rm, opt, shift ? size : 0);
+
+ tcg_gen_add_i64(tcg_addr, tcg_addr, tcg_rm);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, tcg_addr, size);
+ } else {
+ do_fp_ld(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ true, rt, iss_sf, false);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size,
+ is_signed, is_extended,
+ true, rt, iss_sf, false);
+ }
+ }
+}
+
+/*
+ * C3.3.13 Load/store (unsigned immediate)
+ *
+ * 31 30 29 27 26 25 24 23 22 21 10 9 5
+ * +----+-------+---+-----+-----+------------+-------+------+
+ * |size| 1 1 1 | V | 0 1 | opc | imm12 | Rn | Rt |
+ * +----+-------+---+-----+-----+------------+-------+------+
+ *
+ * For non-vector:
+ * size: 00-> byte, 01 -> 16 bit, 10 -> 32bit, 11 -> 64bit
+ * opc: 00 -> store, 01 -> loadu, 10 -> loads 64, 11 -> loads 32
+ * For vector:
+ * size is opc<1>:size<1:0> so 100 -> 128 bit; 110 and 111 unallocated
+ * opc<0>: 0 -> store, 1 -> load
+ * Rn: base address register (inc SP)
+ * Rt: target register
+ */
+static void disas_ldst_reg_unsigned_imm(DisasContext *s, uint32_t insn,
+ int opc,
+ int size,
+ int rt,
+ bool is_vector)
+{
+ int rn = extract32(insn, 5, 5);
+ unsigned int imm12 = extract32(insn, 10, 12);
+ unsigned int offset;
+
+ TCGv_i64 tcg_addr;
+
+ bool is_store;
+ bool is_signed = false;
+ bool is_extended = false;
+
+ if (is_vector) {
+ size |= (opc & 2) << 1;
+ if (size > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = !extract32(opc, 0, 1);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ } else {
+ if (size == 3 && opc == 2) {
+ /* PRFM - prefetch */
+ return;
+ }
+ if (opc == 3 && size > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_store = (opc == 0);
+ is_signed = extract32(opc, 1, 1);
+ is_extended = (size < 3) && extract32(opc, 0, 1);
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+ tcg_addr = read_cpu_reg_sp(s, rn, 1);
+ offset = imm12 << size;
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, offset);
+
+ if (is_vector) {
+ if (is_store) {
+ do_fp_st(s, rt, tcg_addr, size);
+ } else {
+ do_fp_ld(s, rt, tcg_addr, size);
+ }
+ } else {
+ TCGv_i64 tcg_rt = cpu_reg(s, rt);
+ bool iss_sf = disas_ldst_compute_iss_sf(size, is_signed, opc);
+ if (is_store) {
+ do_gpr_st(s, tcg_rt, tcg_addr, size,
+ true, rt, iss_sf, false);
+ } else {
+ do_gpr_ld(s, tcg_rt, tcg_addr, size, is_signed, is_extended,
+ true, rt, iss_sf, false);
+ }
+ }
+}
+
+/* Load/store register (all forms) */
+static void disas_ldst_reg(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int opc = extract32(insn, 22, 2);
+ bool is_vector = extract32(insn, 26, 1);
+ int size = extract32(insn, 30, 2);
+
+ switch (extract32(insn, 24, 2)) {
+ case 0:
+ if (extract32(insn, 21, 1) == 1 && extract32(insn, 10, 2) == 2) {
+ disas_ldst_reg_roffset(s, insn, opc, size, rt, is_vector);
+ } else {
+ /* Load/store register (unscaled immediate)
+ * Load/store immediate pre/post-indexed
+ * Load/store register unprivileged
+ */
+ disas_ldst_reg_imm9(s, insn, opc, size, rt, is_vector);
+ }
+ break;
+ case 1:
+ disas_ldst_reg_unsigned_imm(s, insn, opc, size, rt, is_vector);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.3.1 AdvSIMD load/store multiple structures
+ *
+ * 31 30 29 23 22 21 16 15 12 11 10 9 5 4 0
+ * +---+---+---------------+---+-------------+--------+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 0 0 | L | 0 0 0 0 0 0 | opcode | size | Rn | Rt |
+ * +---+---+---------------+---+-------------+--------+------+------+------+
+ *
+ * C3.3.2 AdvSIMD load/store multiple structures (post-indexed)
+ *
+ * 31 30 29 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---------------+---+---+---------+--------+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 0 1 | L | 0 | Rm | opcode | size | Rn | Rt |
+ * +---+---+---------------+---+---+---------+--------+------+------+------+
+ *
+ * Rt: first (or only) SIMD&FP register to be transferred
+ * Rn: base address or SP
+ * Rm (post-index only): post-index register (when !31) or size dependent #imm
+ */
+static void disas_ldst_multiple_struct(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int size = extract32(insn, 10, 2);
+ int opcode = extract32(insn, 12, 4);
+ bool is_store = !extract32(insn, 22, 1);
+ bool is_postidx = extract32(insn, 23, 1);
+ bool is_q = extract32(insn, 30, 1);
+ TCGv_i64 tcg_addr, tcg_rn;
+
+ int ebytes = 1 << size;
+ int elements = (is_q ? 128 : 64) / (8 << size);
+ int rpt; /* num iterations */
+ int selem; /* structure elements */
+ int r;
+
+ if (extract32(insn, 31, 1) || extract32(insn, 21, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* From the shared decode logic */
+ switch (opcode) {
+ case 0x0:
+ rpt = 1;
+ selem = 4;
+ break;
+ case 0x2:
+ rpt = 4;
+ selem = 1;
+ break;
+ case 0x4:
+ rpt = 1;
+ selem = 3;
+ break;
+ case 0x6:
+ rpt = 3;
+ selem = 1;
+ break;
+ case 0x7:
+ rpt = 1;
+ selem = 1;
+ break;
+ case 0x8:
+ rpt = 1;
+ selem = 2;
+ break;
+ case 0xa:
+ rpt = 2;
+ selem = 1;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 3 && !is_q && selem != 1) {
+ /* reserved */
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ tcg_rn = cpu_reg_sp(s, rn);
+ tcg_addr = tcg_temp_new_i64();
+ tcg_gen_mov_i64(tcg_addr, tcg_rn);
+
+ for (r = 0; r < rpt; r++) {
+ int e;
+ for (e = 0; e < elements; e++) {
+ int tt = (rt + r) % 32;
+ int xs;
+ for (xs = 0; xs < selem; xs++) {
+ if (is_store) {
+ do_vec_st(s, tt, e, tcg_addr, size);
+ } else {
+ do_vec_ld(s, tt, e, tcg_addr, size);
+
+ /* For non-quad operations, setting a slice of the low
+ * 64 bits of the register clears the high 64 bits (in
+ * the ARM ARM pseudocode this is implicit in the fact
+ * that 'rval' is a 64 bit wide variable). We optimize
+ * by noticing that we only need to do this the first
+ * time we touch a register.
+ */
+ if (!is_q && e == 0 && (r == 0 || xs == selem - 1)) {
+ clear_vec_high(s, tt);
+ }
+ }
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
+ tt = (tt + 1) % 32;
+ }
+ }
+ }
+
+ if (is_postidx) {
+ int rm = extract32(insn, 16, 5);
+ if (rm == 31) {
+ tcg_gen_mov_i64(tcg_rn, tcg_addr);
+ } else {
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
+ }
+ }
+ tcg_temp_free_i64(tcg_addr);
+}
+
+/* C3.3.3 AdvSIMD load/store single structure
+ *
+ * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 1 0 | L R | 0 0 0 0 0 | opc | S | size | Rn | Rt |
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ *
+ * C3.3.4 AdvSIMD load/store single structure (post-indexed)
+ *
+ * 31 30 29 23 22 21 20 16 15 13 12 11 10 9 5 4 0
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ * | 0 | Q | 0 0 1 1 0 1 1 | L R | Rm | opc | S | size | Rn | Rt |
+ * +---+---+---------------+-----+-----------+-----+---+------+------+------+
+ *
+ * Rt: first (or only) SIMD&FP register to be transferred
+ * Rn: base address or SP
+ * Rm (post-index only): post-index register (when !31) or size dependent #imm
+ * index = encoded in Q:S:size dependent on size
+ *
+ * lane_size = encoded in R, opc
+ * transfer width = encoded in opc, S, size
+ */
+static void disas_ldst_single_struct(DisasContext *s, uint32_t insn)
+{
+ int rt = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int size = extract32(insn, 10, 2);
+ int S = extract32(insn, 12, 1);
+ int opc = extract32(insn, 13, 3);
+ int R = extract32(insn, 21, 1);
+ int is_load = extract32(insn, 22, 1);
+ int is_postidx = extract32(insn, 23, 1);
+ int is_q = extract32(insn, 30, 1);
+
+ int scale = extract32(opc, 1, 2);
+ int selem = (extract32(opc, 0, 1) << 1 | R) + 1;
+ bool replicate = false;
+ int index = is_q << 3 | S << 2 | size;
+ int ebytes, xs;
+ TCGv_i64 tcg_addr, tcg_rn;
+
+ switch (scale) {
+ case 3:
+ if (!is_load || S) {
+ unallocated_encoding(s);
+ return;
+ }
+ scale = size;
+ replicate = true;
+ break;
+ case 0:
+ break;
+ case 1:
+ if (extract32(size, 0, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ index >>= 1;
+ break;
+ case 2:
+ if (extract32(size, 1, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!extract32(size, 0, 1)) {
+ index >>= 2;
+ } else {
+ if (S) {
+ unallocated_encoding(s);
+ return;
+ }
+ index >>= 3;
+ scale = 3;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ ebytes = 1 << scale;
+
+ if (rn == 31) {
+ gen_check_sp_alignment(s);
+ }
+
+ tcg_rn = cpu_reg_sp(s, rn);
+ tcg_addr = tcg_temp_new_i64();
+ tcg_gen_mov_i64(tcg_addr, tcg_rn);
+
+ for (xs = 0; xs < selem; xs++) {
+ if (replicate) {
+ /* Load and replicate to all elements */
+ uint64_t mulconst;
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(tcg_tmp, tcg_addr,
+ get_mem_index(s), s->be_data + scale);
+ switch (scale) {
+ case 0:
+ mulconst = 0x0101010101010101ULL;
+ break;
+ case 1:
+ mulconst = 0x0001000100010001ULL;
+ break;
+ case 2:
+ mulconst = 0x0000000100000001ULL;
+ break;
+ case 3:
+ mulconst = 0;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (mulconst) {
+ tcg_gen_muli_i64(tcg_tmp, tcg_tmp, mulconst);
+ }
+ write_vec_element(s, tcg_tmp, rt, 0, MO_64);
+ if (is_q) {
+ write_vec_element(s, tcg_tmp, rt, 1, MO_64);
+ } else {
+ clear_vec_high(s, rt);
+ }
+ tcg_temp_free_i64(tcg_tmp);
+ } else {
+ /* Load/store one element per register */
+ if (is_load) {
+ do_vec_ld(s, rt, index, tcg_addr, s->be_data + scale);
+ } else {
+ do_vec_st(s, rt, index, tcg_addr, s->be_data + scale);
+ }
+ }
+ tcg_gen_addi_i64(tcg_addr, tcg_addr, ebytes);
+ rt = (rt + 1) % 32;
+ }
+
+ if (is_postidx) {
+ int rm = extract32(insn, 16, 5);
+ if (rm == 31) {
+ tcg_gen_mov_i64(tcg_rn, tcg_addr);
+ } else {
+ tcg_gen_add_i64(tcg_rn, tcg_rn, cpu_reg(s, rm));
+ }
+ }
+ tcg_temp_free_i64(tcg_addr);
+}
+
+/* C3.3 Loads and stores */
+static void disas_ldst(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 24, 6)) {
+ case 0x08: /* Load/store exclusive */
+ disas_ldst_excl(s, insn);
+ break;
+ case 0x18: case 0x1c: /* Load register (literal) */
+ disas_ld_lit(s, insn);
+ break;
+ case 0x28: case 0x29:
+ case 0x2c: case 0x2d: /* Load/store pair (all forms) */
+ disas_ldst_pair(s, insn);
+ break;
+ case 0x38: case 0x39:
+ case 0x3c: case 0x3d: /* Load/store register (all forms) */
+ disas_ldst_reg(s, insn);
+ break;
+ case 0x0c: /* AdvSIMD load/store multiple structures */
+ disas_ldst_multiple_struct(s, insn);
+ break;
+ case 0x0d: /* AdvSIMD load/store single structure */
+ disas_ldst_single_struct(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.4.6 PC-rel. addressing
+ * 31 30 29 28 24 23 5 4 0
+ * +----+-------+-----------+-------------------+------+
+ * | op | immlo | 1 0 0 0 0 | immhi | Rd |
+ * +----+-------+-----------+-------------------+------+
+ */
+static void disas_pc_rel_adr(DisasContext *s, uint32_t insn)
+{
+ unsigned int page, rd;
+ uint64_t base;
+ uint64_t offset;
+
+ page = extract32(insn, 31, 1);
+ /* SignExtend(immhi:immlo) -> offset */
+ offset = sextract64(insn, 5, 19);
+ offset = offset << 2 | extract32(insn, 29, 2);
+ rd = extract32(insn, 0, 5);
+ base = s->pc - 4;
+
+ if (page) {
+ /* ADRP (page based) */
+ base &= ~0xfff;
+ offset <<= 12;
+ }
+
+ tcg_gen_movi_i64(cpu_reg(s, rd), base + offset);
+}
+
+/*
+ * C3.4.1 Add/subtract (immediate)
+ *
+ * 31 30 29 28 24 23 22 21 10 9 5 4 0
+ * +--+--+--+-----------+-----+-------------+-----+-----+
+ * |sf|op| S| 1 0 0 0 1 |shift| imm12 | Rn | Rd |
+ * +--+--+--+-----------+-----+-------------+-----+-----+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * shift: 00 -> LSL imm by 0, 01 -> LSL imm by 12
+ */
+static void disas_add_sub_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ uint64_t imm = extract32(insn, 10, 12);
+ int shift = extract32(insn, 22, 2);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool is_64bit = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rn = cpu_reg_sp(s, rn);
+ TCGv_i64 tcg_rd = setflags ? cpu_reg(s, rd) : cpu_reg_sp(s, rd);
+ TCGv_i64 tcg_result;
+
+ switch (shift) {
+ case 0x0:
+ break;
+ case 0x1:
+ imm <<= 12;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_result = tcg_temp_new_i64();
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_subi_i64(tcg_result, tcg_rn, imm);
+ } else {
+ tcg_gen_addi_i64(tcg_result, tcg_rn, imm);
+ }
+ } else {
+ TCGv_i64 tcg_imm = tcg_const_i64(imm);
+ if (sub_op) {
+ gen_sub_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
+ } else {
+ gen_add_CC(is_64bit, tcg_result, tcg_rn, tcg_imm);
+ }
+ tcg_temp_free_i64(tcg_imm);
+ }
+
+ if (is_64bit) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
+}
+
+/* The input should be a value in the bottom e bits (with higher
+ * bits zero); returns that value replicated into every element
+ * of size e in a 64 bit integer.
+ */
+static uint64_t bitfield_replicate(uint64_t mask, unsigned int e)
+{
+ assert(e != 0);
+ while (e < 64) {
+ mask |= mask << e;
+ e *= 2;
+ }
+ return mask;
+}
+
+/* Return a value with the bottom len bits set (where 0 < len <= 64) */
+static inline uint64_t bitmask64(unsigned int length)
+{
+ assert(length > 0 && length <= 64);
+ return ~0ULL >> (64 - length);
+}
+
+/* Simplified variant of pseudocode DecodeBitMasks() for the case where we
+ * only require the wmask. Returns false if the imms/immr/immn are a reserved
+ * value (ie should cause a guest UNDEF exception), and true if they are
+ * valid, in which case the decoded bit pattern is written to result.
+ */
+static bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn,
+ unsigned int imms, unsigned int immr)
+{
+ uint64_t mask;
+ unsigned e, levels, s, r;
+ int len;
+
+ assert(immn < 2 && imms < 64 && immr < 64);
+
+ /* The bit patterns we create here are 64 bit patterns which
+ * are vectors of identical elements of size e = 2, 4, 8, 16, 32 or
+ * 64 bits each. Each element contains the same value: a run
+ * of between 1 and e-1 non-zero bits, rotated within the
+ * element by between 0 and e-1 bits.
+ *
+ * The element size and run length are encoded into immn (1 bit)
+ * and imms (6 bits) as follows:
+ * 64 bit elements: immn = 1, imms = <length of run - 1>
+ * 32 bit elements: immn = 0, imms = 0 : <length of run - 1>
+ * 16 bit elements: immn = 0, imms = 10 : <length of run - 1>
+ * 8 bit elements: immn = 0, imms = 110 : <length of run - 1>
+ * 4 bit elements: immn = 0, imms = 1110 : <length of run - 1>
+ * 2 bit elements: immn = 0, imms = 11110 : <length of run - 1>
+ * Notice that immn = 0, imms = 11111x is the only combination
+ * not covered by one of the above options; this is reserved.
+ * Further, <length of run - 1> all-ones is a reserved pattern.
+ *
+ * In all cases the rotation is by immr % e (and immr is 6 bits).
+ */
+
+ /* First determine the element size */
+ len = 31 - clz32((immn << 6) | (~imms & 0x3f));
+ if (len < 1) {
+ /* This is the immn == 0, imms == 0x11111x case */
+ return false;
+ }
+ e = 1 << len;
+
+ levels = e - 1;
+ s = imms & levels;
+ r = immr & levels;
+
+ if (s == levels) {
+ /* <length of run - 1> mustn't be all-ones. */
+ return false;
+ }
+
+ /* Create the value of one element: s+1 set bits rotated
+ * by r within the element (which is e bits wide)...
+ */
+ mask = bitmask64(s + 1);
+ if (r) {
+ mask = (mask >> r) | (mask << (e - r));
+ mask &= bitmask64(e);
+ }
+ /* ...then replicate the element over the whole 64 bit value */
+ mask = bitfield_replicate(mask, e);
+ *result = mask;
+ return true;
+}
+
+/* C3.4.4 Logical (immediate)
+ * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
+ * +----+-----+-------------+---+------+------+------+------+
+ * | sf | opc | 1 0 0 1 0 0 | N | immr | imms | Rn | Rd |
+ * +----+-----+-------------+---+------+------+------+------+
+ */
+static void disas_logic_imm(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, opc, is_n, immr, imms, rn, rd;
+ TCGv_i64 tcg_rd, tcg_rn;
+ uint64_t wmask;
+ bool is_and = false;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ is_n = extract32(insn, 22, 1);
+ immr = extract32(insn, 16, 6);
+ imms = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (!sf && is_n) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (opc == 0x3) { /* ANDS */
+ tcg_rd = cpu_reg(s, rd);
+ } else {
+ tcg_rd = cpu_reg_sp(s, rd);
+ }
+ tcg_rn = cpu_reg(s, rn);
+
+ if (!logic_imm_decode_wmask(&wmask, is_n, imms, immr)) {
+ /* some immediate field values are reserved */
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!sf) {
+ wmask &= 0xffffffff;
+ }
+
+ switch (opc) {
+ case 0x3: /* ANDS */
+ case 0x0: /* AND */
+ tcg_gen_andi_i64(tcg_rd, tcg_rn, wmask);
+ is_and = true;
+ break;
+ case 0x1: /* ORR */
+ tcg_gen_ori_i64(tcg_rd, tcg_rn, wmask);
+ break;
+ case 0x2: /* EOR */
+ tcg_gen_xori_i64(tcg_rd, tcg_rn, wmask);
+ break;
+ default:
+ assert(FALSE); /* must handle all above */
+ break;
+ }
+
+ if (!sf && !is_and) {
+ /* zero extend final result; we know we can skip this for AND
+ * since the immediate had the high 32 bits clear.
+ */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+
+ if (opc == 3) { /* ANDS */
+ gen_logic_CC(sf, tcg_rd);
+ }
+}
+
+/*
+ * C3.4.5 Move wide (immediate)
+ *
+ * 31 30 29 28 23 22 21 20 5 4 0
+ * +--+-----+-------------+-----+----------------+------+
+ * |sf| opc | 1 0 0 1 0 1 | hw | imm16 | Rd |
+ * +--+-----+-------------+-----+----------------+------+
+ *
+ * sf: 0 -> 32 bit, 1 -> 64 bit
+ * opc: 00 -> N, 10 -> Z, 11 -> K
+ * hw: shift/16 (0,16, and sf only 32, 48)
+ */
+static void disas_movw_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ uint64_t imm = extract32(insn, 5, 16);
+ int sf = extract32(insn, 31, 1);
+ int opc = extract32(insn, 29, 2);
+ int pos = extract32(insn, 21, 2) << 4;
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_imm;
+
+ if (!sf && (pos >= 32)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opc) {
+ case 0: /* MOVN */
+ case 2: /* MOVZ */
+ imm <<= pos;
+ if (opc == 0) {
+ imm = ~imm;
+ }
+ if (!sf) {
+ imm &= 0xffffffffu;
+ }
+ tcg_gen_movi_i64(tcg_rd, imm);
+ break;
+ case 3: /* MOVK */
+ tcg_imm = tcg_const_i64(imm);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_imm, pos, 16);
+ tcg_temp_free_i64(tcg_imm);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.4.2 Bitfield
+ * 31 30 29 28 23 22 21 16 15 10 9 5 4 0
+ * +----+-----+-------------+---+------+------+------+------+
+ * | sf | opc | 1 0 0 1 1 0 | N | immr | imms | Rn | Rd |
+ * +----+-----+-------------+---+------+------+------+------+
+ */
+static void disas_bitfield(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, n, opc, ri, si, rn, rd, bitsize, pos, len;
+ TCGv_i64 tcg_rd, tcg_tmp;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ n = extract32(insn, 22, 1);
+ ri = extract32(insn, 16, 6);
+ si = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+ bitsize = sf ? 64 : 32;
+
+ if (sf != n || ri >= bitsize || si >= bitsize || opc > 2) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+
+ /* Suppress the zero-extend for !sf. Since RI and SI are constrained
+ to be smaller than bitsize, we'll never reference data outside the
+ low 32-bits anyway. */
+ tcg_tmp = read_cpu_reg(s, rn, 1);
+
+ /* Recognize the common aliases. */
+ if (opc == 0) { /* SBFM */
+ if (ri == 0) {
+ if (si == 7) { /* SXTB */
+ tcg_gen_ext8s_i64(tcg_rd, tcg_tmp);
+ goto done;
+ } else if (si == 15) { /* SXTH */
+ tcg_gen_ext16s_i64(tcg_rd, tcg_tmp);
+ goto done;
+ } else if (si == 31) { /* SXTW */
+ tcg_gen_ext32s_i64(tcg_rd, tcg_tmp);
+ goto done;
+ }
+ }
+ if (si == 63 || (si == 31 && ri <= si)) { /* ASR */
+ if (si == 31) {
+ tcg_gen_ext32s_i64(tcg_tmp, tcg_tmp);
+ }
+ tcg_gen_sari_i64(tcg_rd, tcg_tmp, ri);
+ goto done;
+ }
+ } else if (opc == 2) { /* UBFM */
+ if (ri == 0) { /* UXTB, UXTH, plus non-canonical AND */
+ tcg_gen_andi_i64(tcg_rd, tcg_tmp, bitmask64(si + 1));
+ return;
+ }
+ if (si == 63 || (si == 31 && ri <= si)) { /* LSR */
+ if (si == 31) {
+ tcg_gen_ext32u_i64(tcg_tmp, tcg_tmp);
+ }
+ tcg_gen_shri_i64(tcg_rd, tcg_tmp, ri);
+ return;
+ }
+ if (si + 1 == ri && si != bitsize - 1) { /* LSL */
+ int shift = bitsize - 1 - si;
+ tcg_gen_shli_i64(tcg_rd, tcg_tmp, shift);
+ goto done;
+ }
+ }
+
+ if (opc != 1) { /* SBFM or UBFM */
+ tcg_gen_movi_i64(tcg_rd, 0);
+ }
+
+ /* do the bit move operation */
+ if (si >= ri) {
+ /* Wd<s-r:0> = Wn<s:r> */
+ tcg_gen_shri_i64(tcg_tmp, tcg_tmp, ri);
+ pos = 0;
+ len = (si - ri) + 1;
+ } else {
+ /* Wd<32+s-r,32-r> = Wn<s:0> */
+ pos = bitsize - ri;
+ len = si + 1;
+ }
+
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, pos, len);
+
+ if (opc == 0) { /* SBFM - sign extend the destination field */
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, 64 - (pos + len));
+ tcg_gen_sari_i64(tcg_rd, tcg_rd, 64 - (pos + len));
+ }
+
+ done:
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C3.4.3 Extract
+ * 31 30 29 28 23 22 21 20 16 15 10 9 5 4 0
+ * +----+------+-------------+---+----+------+--------+------+------+
+ * | sf | op21 | 1 0 0 1 1 1 | N | o0 | Rm | imms | Rn | Rd |
+ * +----+------+-------------+---+----+------+--------+------+------+
+ */
+static void disas_extract(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, n, rm, imm, rn, rd, bitsize, op21, op0;
+
+ sf = extract32(insn, 31, 1);
+ n = extract32(insn, 22, 1);
+ rm = extract32(insn, 16, 5);
+ imm = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+ op21 = extract32(insn, 29, 2);
+ op0 = extract32(insn, 21, 1);
+ bitsize = sf ? 64 : 32;
+
+ if (sf != n || op21 || op0 || imm >= bitsize) {
+ unallocated_encoding(s);
+ } else {
+ TCGv_i64 tcg_rd, tcg_rm, tcg_rn;
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (unlikely(imm == 0)) {
+ /* tcg shl_i32/shl_i64 is undefined for 32/64 bit shifts,
+ * so an extract from bit 0 is a special case.
+ */
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, cpu_reg(s, rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rm));
+ }
+ } else if (rm == rn) { /* ROR */
+ tcg_rm = cpu_reg(s, rm);
+ if (sf) {
+ tcg_gen_rotri_i64(tcg_rd, tcg_rm, imm);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tcg_rm);
+ tcg_gen_rotri_i32(tmp, tmp, imm);
+ tcg_gen_extu_i32_i64(tcg_rd, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ } else {
+ tcg_rm = read_cpu_reg(s, rm, sf);
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_gen_shri_i64(tcg_rm, tcg_rm, imm);
+ tcg_gen_shli_i64(tcg_rn, tcg_rn, bitsize - imm);
+ tcg_gen_or_i64(tcg_rd, tcg_rm, tcg_rn);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ }
+ }
+}
+
+/* C3.4 Data processing - immediate */
+static void disas_data_proc_imm(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 23, 6)) {
+ case 0x20: case 0x21: /* PC-rel. addressing */
+ disas_pc_rel_adr(s, insn);
+ break;
+ case 0x22: case 0x23: /* Add/subtract (immediate) */
+ disas_add_sub_imm(s, insn);
+ break;
+ case 0x24: /* Logical (immediate) */
+ disas_logic_imm(s, insn);
+ break;
+ case 0x25: /* Move wide (immediate) */
+ disas_movw_imm(s, insn);
+ break;
+ case 0x26: /* Bitfield */
+ disas_bitfield(s, insn);
+ break;
+ case 0x27: /* Extract */
+ disas_extract(s, insn);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Shift a TCGv src by TCGv shift_amount, put result in dst.
+ * Note that it is the caller's responsibility to ensure that the
+ * shift amount is in range (ie 0..31 or 0..63) and provide the ARM
+ * mandated semantics for out of range shifts.
+ */
+static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, TCGv_i64 shift_amount)
+{
+ switch (shift_type) {
+ case A64_SHIFT_TYPE_LSL:
+ tcg_gen_shl_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_LSR:
+ tcg_gen_shr_i64(dst, src, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ASR:
+ if (!sf) {
+ tcg_gen_ext32s_i64(dst, src);
+ }
+ tcg_gen_sar_i64(dst, sf ? src : dst, shift_amount);
+ break;
+ case A64_SHIFT_TYPE_ROR:
+ if (sf) {
+ tcg_gen_rotr_i64(dst, src, shift_amount);
+ } else {
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t0, src);
+ tcg_gen_extrl_i64_i32(t1, shift_amount);
+ tcg_gen_rotr_i32(t0, t0, t1);
+ tcg_gen_extu_i32_i64(dst, t0);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ }
+ break;
+ default:
+ assert(FALSE); /* all shift types should be handled */
+ break;
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(dst, dst);
+ }
+}
+
+/* Shift a TCGv src by immediate, put result in dst.
+ * The shift amount must be in range (this should always be true as the
+ * relevant instructions will UNDEF on bad shift immediates).
+ */
+static void shift_reg_imm(TCGv_i64 dst, TCGv_i64 src, int sf,
+ enum a64_shift_type shift_type, unsigned int shift_i)
+{
+ assert(shift_i < (sf ? 64 : 32));
+
+ if (shift_i == 0) {
+ tcg_gen_mov_i64(dst, src);
+ } else {
+ TCGv_i64 shift_const;
+
+ shift_const = tcg_const_i64(shift_i);
+ shift_reg(dst, src, sf, shift_type, shift_const);
+ tcg_temp_free_i64(shift_const);
+ }
+}
+
+/* C3.5.10 Logical (shifted register)
+ * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
+ * +----+-----+-----------+-------+---+------+--------+------+------+
+ * | sf | opc | 0 1 0 1 0 | shift | N | Rm | imm6 | Rn | Rd |
+ * +----+-----+-----------+-------+---+------+--------+------+------+
+ */
+static void disas_logic_reg(DisasContext *s, uint32_t insn)
+{
+ TCGv_i64 tcg_rd, tcg_rn, tcg_rm;
+ unsigned int sf, opc, shift_type, invert, rm, shift_amount, rn, rd;
+
+ sf = extract32(insn, 31, 1);
+ opc = extract32(insn, 29, 2);
+ shift_type = extract32(insn, 22, 2);
+ invert = extract32(insn, 21, 1);
+ rm = extract32(insn, 16, 5);
+ shift_amount = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (!sf && (shift_amount & (1 << 5))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd = cpu_reg(s, rd);
+
+ if (opc == 1 && shift_amount == 0 && shift_type == 0 && rn == 31) {
+ /* Unshifted ORR and ORN with WZR/XZR is the standard encoding for
+ * register-register MOV and MVN, so it is worth special casing.
+ */
+ tcg_rm = cpu_reg(s, rm);
+ if (invert) {
+ tcg_gen_not_i64(tcg_rd, tcg_rm);
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_rm);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rm);
+ }
+ }
+ return;
+ }
+
+ tcg_rm = read_cpu_reg(s, rm, sf);
+
+ if (shift_amount) {
+ shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, shift_amount);
+ }
+
+ tcg_rn = cpu_reg(s, rn);
+
+ switch (opc | (invert << 2)) {
+ case 0: /* AND */
+ case 3: /* ANDS */
+ tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 1: /* ORR */
+ tcg_gen_or_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 2: /* EOR */
+ tcg_gen_xor_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 4: /* BIC */
+ case 7: /* BICS */
+ tcg_gen_andc_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 5: /* ORN */
+ tcg_gen_orc_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ case 6: /* EON */
+ tcg_gen_eqv_i64(tcg_rd, tcg_rn, tcg_rm);
+ break;
+ default:
+ assert(FALSE);
+ break;
+ }
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+
+ if (opc == 3) {
+ gen_logic_CC(sf, tcg_rd);
+ }
+}
+
+/*
+ * C3.5.1 Add/subtract (extended register)
+ *
+ * 31|30|29|28 24|23 22|21|20 16|15 13|12 10|9 5|4 0|
+ * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ * |sf|op| S| 0 1 0 1 1 | opt | 1| Rm |option| imm3 | Rn | Rd |
+ * +--+--+--+-----------+-----+--+-------+------+------+----+----+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * opt: 00
+ * option: extension type (see DecodeRegExtend)
+ * imm3: optional shift to Rm
+ *
+ * Rd = Rn + LSL(extend(Rm), amount)
+ */
+static void disas_add_sub_ext_reg(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm3 = extract32(insn, 10, 3);
+ int option = extract32(insn, 13, 3);
+ int rm = extract32(insn, 16, 5);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rm, tcg_rn; /* temps */
+ TCGv_i64 tcg_rd;
+ TCGv_i64 tcg_result;
+
+ if (imm3 > 4) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* non-flag setting ops may use SP */
+ if (!setflags) {
+ tcg_rd = cpu_reg_sp(s, rd);
+ } else {
+ tcg_rd = cpu_reg(s, rd);
+ }
+ tcg_rn = read_cpu_reg_sp(s, rn, sf);
+
+ tcg_rm = read_cpu_reg(s, rm, sf);
+ ext_and_shift_reg(tcg_rm, tcg_rm, option, imm3);
+
+ tcg_result = tcg_temp_new_i64();
+
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
+ }
+ } else {
+ if (sub_op) {
+ gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ }
+ }
+
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
+}
+
+/*
+ * C3.5.2 Add/subtract (shifted register)
+ *
+ * 31 30 29 28 24 23 22 21 20 16 15 10 9 5 4 0
+ * +--+--+--+-----------+-----+--+-------+---------+------+------+
+ * |sf|op| S| 0 1 0 1 1 |shift| 0| Rm | imm6 | Rn | Rd |
+ * +--+--+--+-----------+-----+--+-------+---------+------+------+
+ *
+ * sf: 0 -> 32bit, 1 -> 64bit
+ * op: 0 -> add , 1 -> sub
+ * S: 1 -> set flags
+ * shift: 00 -> LSL, 01 -> LSR, 10 -> ASR, 11 -> RESERVED
+ * imm6: Shift amount to apply to Rm before the add/sub
+ */
+static void disas_add_sub_reg(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm6 = extract32(insn, 10, 6);
+ int rm = extract32(insn, 16, 5);
+ int shift_type = extract32(insn, 22, 2);
+ bool setflags = extract32(insn, 29, 1);
+ bool sub_op = extract32(insn, 30, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn, tcg_rm;
+ TCGv_i64 tcg_result;
+
+ if ((shift_type == 3) || (!sf && (imm6 > 31))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rn = read_cpu_reg(s, rn, sf);
+ tcg_rm = read_cpu_reg(s, rm, sf);
+
+ shift_reg_imm(tcg_rm, tcg_rm, sf, shift_type, imm6);
+
+ tcg_result = tcg_temp_new_i64();
+
+ if (!setflags) {
+ if (sub_op) {
+ tcg_gen_sub_i64(tcg_result, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_result, tcg_rn, tcg_rm);
+ }
+ } else {
+ if (sub_op) {
+ gen_sub_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ } else {
+ gen_add_CC(sf, tcg_result, tcg_rn, tcg_rm);
+ }
+ }
+
+ if (sf) {
+ tcg_gen_mov_i64(tcg_rd, tcg_result);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_result);
+ }
+
+ tcg_temp_free_i64(tcg_result);
+}
+
+/* C3.5.9 Data-processing (3 source)
+
+ 31 30 29 28 24 23 21 20 16 15 14 10 9 5 4 0
+ +--+------+-----------+------+------+----+------+------+------+
+ |sf| op54 | 1 1 0 1 1 | op31 | Rm | o0 | Ra | Rn | Rd |
+ +--+------+-----------+------+------+----+------+------+------+
+
+ */
+static void disas_data_proc_3src(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int ra = extract32(insn, 10, 5);
+ int rm = extract32(insn, 16, 5);
+ int op_id = (extract32(insn, 29, 3) << 4) |
+ (extract32(insn, 21, 3) << 1) |
+ extract32(insn, 15, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool is_sub = extract32(op_id, 0, 1);
+ bool is_high = extract32(op_id, 2, 1);
+ bool is_signed = false;
+ TCGv_i64 tcg_op1;
+ TCGv_i64 tcg_op2;
+ TCGv_i64 tcg_tmp;
+
+ /* Note that op_id is sf:op54:op31:o0 so it includes the 32/64 size flag */
+ switch (op_id) {
+ case 0x42: /* SMADDL */
+ case 0x43: /* SMSUBL */
+ case 0x44: /* SMULH */
+ is_signed = true;
+ break;
+ case 0x0: /* MADD (32bit) */
+ case 0x1: /* MSUB (32bit) */
+ case 0x40: /* MADD (64bit) */
+ case 0x41: /* MSUB (64bit) */
+ case 0x4a: /* UMADDL */
+ case 0x4b: /* UMSUBL */
+ case 0x4c: /* UMULH */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_high) {
+ TCGv_i64 low_bits = tcg_temp_new_i64(); /* low bits discarded */
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+ TCGv_i64 tcg_rm = cpu_reg(s, rm);
+
+ if (is_signed) {
+ tcg_gen_muls2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_mulu2_i64(low_bits, tcg_rd, tcg_rn, tcg_rm);
+ }
+
+ tcg_temp_free_i64(low_bits);
+ return;
+ }
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
+ tcg_tmp = tcg_temp_new_i64();
+
+ if (op_id < 0x42) {
+ tcg_gen_mov_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_mov_i64(tcg_op2, cpu_reg(s, rm));
+ } else {
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_ext32s_i64(tcg_op2, cpu_reg(s, rm));
+ } else {
+ tcg_gen_ext32u_i64(tcg_op1, cpu_reg(s, rn));
+ tcg_gen_ext32u_i64(tcg_op2, cpu_reg(s, rm));
+ }
+ }
+
+ if (ra == 31 && !is_sub) {
+ /* Special-case MADD with rA == XZR; it is the standard MUL alias */
+ tcg_gen_mul_i64(cpu_reg(s, rd), tcg_op1, tcg_op2);
+ } else {
+ tcg_gen_mul_i64(tcg_tmp, tcg_op1, tcg_op2);
+ if (is_sub) {
+ tcg_gen_sub_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ } else {
+ tcg_gen_add_i64(cpu_reg(s, rd), cpu_reg(s, ra), tcg_tmp);
+ }
+ }
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(cpu_reg(s, rd), cpu_reg(s, rd));
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* C3.5.3 - Add/subtract (with carry)
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 10 9 5 4 0
+ * +--+--+--+------------------------+------+---------+------+-----+
+ * |sf|op| S| 1 1 0 1 0 0 0 0 | rm | opcode2 | Rn | Rd |
+ * +--+--+--+------------------------+------+---------+------+-----+
+ * [000000]
+ */
+
+static void disas_adc_sbc(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, setflags, rm, rn, rd;
+ TCGv_i64 tcg_y, tcg_rn, tcg_rd;
+
+ if (extract32(insn, 10, 6) != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 30, 1);
+ setflags = extract32(insn, 29, 1);
+ rm = extract32(insn, 16, 5);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (op) {
+ tcg_y = new_tmp_a64(s);
+ tcg_gen_not_i64(tcg_y, cpu_reg(s, rm));
+ } else {
+ tcg_y = cpu_reg(s, rm);
+ }
+
+ if (setflags) {
+ gen_adc_CC(sf, tcg_rd, tcg_rn, tcg_y);
+ } else {
+ gen_adc(sf, tcg_rd, tcg_rn, tcg_y);
+ }
+}
+
+/* C3.5.4 - C3.5.5 Conditional compare (immediate / register)
+ * 31 30 29 28 27 26 25 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
+ * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
+ * |sf|op| S| 1 1 0 1 0 0 1 0 |imm5/rm | cond |i/r |o2| Rn |o3|nzcv |
+ * +--+--+--+------------------------+--------+------+----+--+------+--+-----+
+ * [1] y [0] [0]
+ */
+static void disas_cc(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, op, y, cond, rn, nzcv, is_imm;
+ TCGv_i32 tcg_t0, tcg_t1, tcg_t2;
+ TCGv_i64 tcg_tmp, tcg_y, tcg_rn;
+ DisasCompare c;
+
+ if (!extract32(insn, 29, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (insn & (1 << 10 | 1 << 4)) {
+ unallocated_encoding(s);
+ return;
+ }
+ sf = extract32(insn, 31, 1);
+ op = extract32(insn, 30, 1);
+ is_imm = extract32(insn, 11, 1);
+ y = extract32(insn, 16, 5); /* y = rm (reg) or imm5 (imm) */
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ nzcv = extract32(insn, 0, 4);
+
+ /* Set T0 = !COND. */
+ tcg_t0 = tcg_temp_new_i32();
+ arm_test_cc(&c, cond);
+ tcg_gen_setcondi_i32(tcg_invert_cond(c.cond), tcg_t0, c.value, 0);
+ arm_free_cc(&c);
+
+ /* Load the arguments for the new comparison. */
+ if (is_imm) {
+ tcg_y = new_tmp_a64(s);
+ tcg_gen_movi_i64(tcg_y, y);
+ } else {
+ tcg_y = cpu_reg(s, y);
+ }
+ tcg_rn = cpu_reg(s, rn);
+
+ /* Set the flags for the new comparison. */
+ tcg_tmp = tcg_temp_new_i64();
+ if (op) {
+ gen_sub_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+ } else {
+ gen_add_CC(sf, tcg_tmp, tcg_rn, tcg_y);
+ }
+ tcg_temp_free_i64(tcg_tmp);
+
+ /* If COND was false, force the flags to #nzcv. Compute two masks
+ * to help with this: T1 = (COND ? 0 : -1), T2 = (COND ? -1 : 0).
+ * For tcg hosts that support ANDC, we can make do with just T1.
+ * In either case, allow the tcg optimizer to delete any unused mask.
+ */
+ tcg_t1 = tcg_temp_new_i32();
+ tcg_t2 = tcg_temp_new_i32();
+ tcg_gen_neg_i32(tcg_t1, tcg_t0);
+ tcg_gen_subi_i32(tcg_t2, tcg_t0, 1);
+
+ if (nzcv & 8) { /* N */
+ tcg_gen_or_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_NF, cpu_NF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_NF, cpu_NF, tcg_t2);
+ }
+ }
+ if (nzcv & 4) { /* Z */
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_ZF, cpu_ZF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_ZF, cpu_ZF, tcg_t2);
+ }
+ } else {
+ tcg_gen_or_i32(cpu_ZF, cpu_ZF, tcg_t0);
+ }
+ if (nzcv & 2) { /* C */
+ tcg_gen_or_i32(cpu_CF, cpu_CF, tcg_t0);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_CF, cpu_CF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_CF, cpu_CF, tcg_t2);
+ }
+ }
+ if (nzcv & 1) { /* V */
+ tcg_gen_or_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ if (TCG_TARGET_HAS_andc_i32) {
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tcg_t1);
+ } else {
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tcg_t2);
+ }
+ }
+ tcg_temp_free_i32(tcg_t0);
+ tcg_temp_free_i32(tcg_t1);
+ tcg_temp_free_i32(tcg_t2);
+}
+
+/* C3.5.6 Conditional select
+ * 31 30 29 28 21 20 16 15 12 11 10 9 5 4 0
+ * +----+----+---+-----------------+------+------+-----+------+------+
+ * | sf | op | S | 1 1 0 1 0 1 0 0 | Rm | cond | op2 | Rn | Rd |
+ * +----+----+---+-----------------+------+------+-----+------+------+
+ */
+static void disas_cond_select(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, else_inv, rm, cond, else_inc, rn, rd;
+ TCGv_i64 tcg_rd, zero;
+ DisasCompare64 c;
+
+ if (extract32(insn, 29, 1) || extract32(insn, 11, 1)) {
+ /* S == 1 or op2<1> == 1 */
+ unallocated_encoding(s);
+ return;
+ }
+ sf = extract32(insn, 31, 1);
+ else_inv = extract32(insn, 30, 1);
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ else_inc = extract32(insn, 10, 1);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ tcg_rd = cpu_reg(s, rd);
+
+ a64_test_cc(&c, cond);
+ zero = tcg_const_i64(0);
+
+ if (rn == 31 && rm == 31 && (else_inc ^ else_inv)) {
+ /* CSET & CSETM. */
+ tcg_gen_setcond_i64(tcg_invert_cond(c.cond), tcg_rd, c.value, zero);
+ if (else_inv) {
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ }
+ } else {
+ TCGv_i64 t_true = cpu_reg(s, rn);
+ TCGv_i64 t_false = read_cpu_reg(s, rm, 1);
+ if (else_inv && else_inc) {
+ tcg_gen_neg_i64(t_false, t_false);
+ } else if (else_inv) {
+ tcg_gen_not_i64(t_false, t_false);
+ } else if (else_inc) {
+ tcg_gen_addi_i64(t_false, t_false, 1);
+ }
+ tcg_gen_movcond_i64(c.cond, tcg_rd, c.value, zero, t_true, t_false);
+ }
+
+ tcg_temp_free_i64(zero);
+ a64_free_cc(&c);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+static void handle_clz(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_clz64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_clz(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+static void handle_cls(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_cls64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_cls32(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+static void handle_rbit(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd, tcg_rn;
+ tcg_rd = cpu_reg(s, rd);
+ tcg_rn = cpu_reg(s, rn);
+
+ if (sf) {
+ gen_helper_rbit64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i32 tcg_tmp32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn);
+ gen_helper_rbit(tcg_tmp32, tcg_tmp32);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32);
+ tcg_temp_free_i32(tcg_tmp32);
+ }
+}
+
+/* C5.6.149 REV with sf==1, opcode==3 ("REV64") */
+static void handle_rev64(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ if (!sf) {
+ unallocated_encoding(s);
+ return;
+ }
+ tcg_gen_bswap64_i64(cpu_reg(s, rd), cpu_reg(s, rn));
+}
+
+/* C5.6.149 REV with sf==0, opcode==2
+ * C5.6.151 REV32 (sf==1, opcode==2)
+ */
+static void handle_rev32(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+
+ if (sf) {
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ /* bswap32_i64 requires zero high word */
+ tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
+ tcg_gen_bswap32_i64(tcg_rd, tcg_tmp);
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_concat32_i64(tcg_rd, tcg_rd, tcg_tmp);
+
+ tcg_temp_free_i64(tcg_tmp);
+ } else {
+ tcg_gen_ext32u_i64(tcg_rd, cpu_reg(s, rn));
+ tcg_gen_bswap32_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C5.6.150 REV16 (opcode==1) */
+static void handle_rev16(DisasContext *s, unsigned int sf,
+ unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ tcg_gen_andi_i64(tcg_tmp, tcg_rn, 0xffff);
+ tcg_gen_bswap16_i64(tcg_rd, tcg_tmp);
+
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
+ tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 16, 16);
+
+ if (sf) {
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
+ tcg_gen_andi_i64(tcg_tmp, tcg_tmp, 0xffff);
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 32, 16);
+
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 48);
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_tmp, 48, 16);
+ }
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* C3.5.7 Data-processing (1 source)
+ * 31 30 29 28 21 20 16 15 10 9 5 4 0
+ * +----+---+---+-----------------+---------+--------+------+------+
+ * | sf | 1 | S | 1 1 0 1 0 1 1 0 | opcode2 | opcode | Rn | Rd |
+ * +----+---+---+-----------------+---------+--------+------+------+
+ */
+static void disas_data_proc_1src(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, opcode, rn, rd;
+
+ if (extract32(insn, 29, 1) || extract32(insn, 16, 5)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ sf = extract32(insn, 31, 1);
+ opcode = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 0: /* RBIT */
+ handle_rbit(s, sf, rn, rd);
+ break;
+ case 1: /* REV16 */
+ handle_rev16(s, sf, rn, rd);
+ break;
+ case 2: /* REV32 */
+ handle_rev32(s, sf, rn, rd);
+ break;
+ case 3: /* REV64 */
+ handle_rev64(s, sf, rn, rd);
+ break;
+ case 4: /* CLZ */
+ handle_clz(s, sf, rn, rd);
+ break;
+ case 5: /* CLS */
+ handle_cls(s, sf, rn, rd);
+ break;
+ }
+}
+
+static void handle_div(DisasContext *s, bool is_signed, unsigned int sf,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_n, tcg_m, tcg_rd;
+ tcg_rd = cpu_reg(s, rd);
+
+ if (!sf && is_signed) {
+ tcg_n = new_tmp_a64(s);
+ tcg_m = new_tmp_a64(s);
+ tcg_gen_ext32s_i64(tcg_n, cpu_reg(s, rn));
+ tcg_gen_ext32s_i64(tcg_m, cpu_reg(s, rm));
+ } else {
+ tcg_n = read_cpu_reg(s, rn, sf);
+ tcg_m = read_cpu_reg(s, rm, sf);
+ }
+
+ if (is_signed) {
+ gen_helper_sdiv64(tcg_rd, tcg_n, tcg_m);
+ } else {
+ gen_helper_udiv64(tcg_rd, tcg_n, tcg_m);
+ }
+
+ if (!sf) { /* zero extend final result */
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C5.6.115 LSLV, C5.6.118 LSRV, C5.6.17 ASRV, C5.6.154 RORV */
+static void handle_shift_reg(DisasContext *s,
+ enum a64_shift_type shift_type, unsigned int sf,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_shift = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+ TCGv_i64 tcg_rn = read_cpu_reg(s, rn, sf);
+
+ tcg_gen_andi_i64(tcg_shift, cpu_reg(s, rm), sf ? 63 : 31);
+ shift_reg(tcg_rd, tcg_rn, sf, shift_type, tcg_shift);
+ tcg_temp_free_i64(tcg_shift);
+}
+
+/* CRC32[BHWX], CRC32C[BHWX] */
+static void handle_crc32(DisasContext *s,
+ unsigned int sf, unsigned int sz, bool crc32c,
+ unsigned int rm, unsigned int rn, unsigned int rd)
+{
+ TCGv_i64 tcg_acc, tcg_val;
+ TCGv_i32 tcg_bytes;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC)
+ || (sf == 1 && sz != 3)
+ || (sf == 0 && sz == 3)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (sz == 3) {
+ tcg_val = cpu_reg(s, rm);
+ } else {
+ uint64_t mask;
+ switch (sz) {
+ case 0:
+ mask = 0xFF;
+ break;
+ case 1:
+ mask = 0xFFFF;
+ break;
+ case 2:
+ mask = 0xFFFFFFFF;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_val = new_tmp_a64(s);
+ tcg_gen_andi_i64(tcg_val, cpu_reg(s, rm), mask);
+ }
+
+ tcg_acc = cpu_reg(s, rn);
+ tcg_bytes = tcg_const_i32(1 << sz);
+
+ if (crc32c) {
+ gen_helper_crc32c_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
+ } else {
+ gen_helper_crc32_64(cpu_reg(s, rd), tcg_acc, tcg_val, tcg_bytes);
+ }
+
+ tcg_temp_free_i32(tcg_bytes);
+}
+
+/* C3.5.8 Data-processing (2 source)
+ * 31 30 29 28 21 20 16 15 10 9 5 4 0
+ * +----+---+---+-----------------+------+--------+------+------+
+ * | sf | 0 | S | 1 1 0 1 0 1 1 0 | Rm | opcode | Rn | Rd |
+ * +----+---+---+-----------------+------+--------+------+------+
+ */
+static void disas_data_proc_2src(DisasContext *s, uint32_t insn)
+{
+ unsigned int sf, rm, opcode, rn, rd;
+ sf = extract32(insn, 31, 1);
+ rm = extract32(insn, 16, 5);
+ opcode = extract32(insn, 10, 6);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (extract32(insn, 29, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 2: /* UDIV */
+ handle_div(s, false, sf, rm, rn, rd);
+ break;
+ case 3: /* SDIV */
+ handle_div(s, true, sf, rm, rn, rd);
+ break;
+ case 8: /* LSLV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_LSL, sf, rm, rn, rd);
+ break;
+ case 9: /* LSRV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_LSR, sf, rm, rn, rd);
+ break;
+ case 10: /* ASRV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_ASR, sf, rm, rn, rd);
+ break;
+ case 11: /* RORV */
+ handle_shift_reg(s, A64_SHIFT_TYPE_ROR, sf, rm, rn, rd);
+ break;
+ case 16:
+ case 17:
+ case 18:
+ case 19:
+ case 20:
+ case 21:
+ case 22:
+ case 23: /* CRC32 */
+ {
+ int sz = extract32(opcode, 0, 2);
+ bool crc32c = extract32(opcode, 2, 1);
+ handle_crc32(s, sf, sz, crc32c, rm, rn, rd);
+ break;
+ }
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.5 Data processing - register */
+static void disas_data_proc_reg(DisasContext *s, uint32_t insn)
+{
+ switch (extract32(insn, 24, 5)) {
+ case 0x0a: /* Logical (shifted register) */
+ disas_logic_reg(s, insn);
+ break;
+ case 0x0b: /* Add/subtract */
+ if (insn & (1 << 21)) { /* (extended register) */
+ disas_add_sub_ext_reg(s, insn);
+ } else {
+ disas_add_sub_reg(s, insn);
+ }
+ break;
+ case 0x1b: /* Data-processing (3 source) */
+ disas_data_proc_3src(s, insn);
+ break;
+ case 0x1a:
+ switch (extract32(insn, 21, 3)) {
+ case 0x0: /* Add/subtract (with carry) */
+ disas_adc_sbc(s, insn);
+ break;
+ case 0x2: /* Conditional compare */
+ disas_cc(s, insn); /* both imm and reg forms */
+ break;
+ case 0x4: /* Conditional select */
+ disas_cond_select(s, insn);
+ break;
+ case 0x6: /* Data-processing */
+ if (insn & (1 << 30)) { /* (1 source) */
+ disas_data_proc_1src(s, insn);
+ } else { /* (2 source) */
+ disas_data_proc_2src(s, insn);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+static void handle_fp_compare(DisasContext *s, bool is_double,
+ unsigned int rn, unsigned int rm,
+ bool cmp_with_zero, bool signal_all_nans)
+{
+ TCGv_i64 tcg_flags = tcg_temp_new_i64();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ if (is_double) {
+ TCGv_i64 tcg_vn, tcg_vm;
+
+ tcg_vn = read_fp_dreg(s, rn);
+ if (cmp_with_zero) {
+ tcg_vm = tcg_const_i64(0);
+ } else {
+ tcg_vm = read_fp_dreg(s, rm);
+ }
+ if (signal_all_nans) {
+ gen_helper_vfp_cmped_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmpd_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ tcg_temp_free_i64(tcg_vn);
+ tcg_temp_free_i64(tcg_vm);
+ } else {
+ TCGv_i32 tcg_vn, tcg_vm;
+
+ tcg_vn = read_fp_sreg(s, rn);
+ if (cmp_with_zero) {
+ tcg_vm = tcg_const_i32(0);
+ } else {
+ tcg_vm = read_fp_sreg(s, rm);
+ }
+ if (signal_all_nans) {
+ gen_helper_vfp_cmpes_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ } else {
+ gen_helper_vfp_cmps_a64(tcg_flags, tcg_vn, tcg_vm, fpst);
+ }
+ tcg_temp_free_i32(tcg_vn);
+ tcg_temp_free_i32(tcg_vm);
+ }
+
+ tcg_temp_free_ptr(fpst);
+
+ gen_set_nzcv(tcg_flags);
+
+ tcg_temp_free_i64(tcg_flags);
+}
+
+/* C3.6.22 Floating point compare
+ * 31 30 29 28 24 23 22 21 20 16 15 14 13 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | op | 1 0 0 0 | Rn | op2 |
+ * +---+---+---+-----------+------+---+------+-----+---------+------+-------+
+ */
+static void disas_fp_compare(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, op, rn, opc, op2r;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
+ rm = extract32(insn, 16, 5);
+ op = extract32(insn, 14, 2);
+ rn = extract32(insn, 5, 5);
+ opc = extract32(insn, 3, 2);
+ op2r = extract32(insn, 0, 3);
+
+ if (mos || op || op2r || type > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fp_compare(s, type, rn, rm, opc & 1, opc & 2);
+}
+
+/* C3.6.23 Floating point conditional compare
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 3 0
+ * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 0 1 | Rn | op | nzcv |
+ * +---+---+---+-----------+------+---+------+------+-----+------+----+------+
+ */
+static void disas_fp_ccomp(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, cond, rn, op, nzcv;
+ TCGv_i64 tcg_flags;
+ TCGLabel *label_continue = NULL;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ op = extract32(insn, 4, 1);
+ nzcv = extract32(insn, 0, 4);
+
+ if (mos || type > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (cond < 0x0e) { /* not always */
+ TCGLabel *label_match = gen_new_label();
+ label_continue = gen_new_label();
+ arm_gen_test_cc(cond, label_match);
+ /* nomatch: */
+ tcg_flags = tcg_const_i64(nzcv << 28);
+ gen_set_nzcv(tcg_flags);
+ tcg_temp_free_i64(tcg_flags);
+ tcg_gen_br(label_continue);
+ gen_set_label(label_match);
+ }
+
+ handle_fp_compare(s, type, rn, rm, false, op);
+
+ if (cond < 0x0e) {
+ gen_set_label(label_continue);
+ }
+}
+
+/* C3.6.24 Floating point conditional select
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+------+-----+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | cond | 1 1 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+------+-----+------+------+
+ */
+static void disas_fp_csel(DisasContext *s, uint32_t insn)
+{
+ unsigned int mos, type, rm, cond, rn, rd;
+ TCGv_i64 t_true, t_false, t_zero;
+ DisasCompare64 c;
+
+ mos = extract32(insn, 29, 3);
+ type = extract32(insn, 22, 2); /* 0 = single, 1 = double */
+ rm = extract32(insn, 16, 5);
+ cond = extract32(insn, 12, 4);
+ rn = extract32(insn, 5, 5);
+ rd = extract32(insn, 0, 5);
+
+ if (mos || type > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* Zero extend sreg inputs to 64 bits now. */
+ t_true = tcg_temp_new_i64();
+ t_false = tcg_temp_new_i64();
+ read_vec_element(s, t_true, rn, 0, type ? MO_64 : MO_32);
+ read_vec_element(s, t_false, rm, 0, type ? MO_64 : MO_32);
+
+ a64_test_cc(&c, cond);
+ t_zero = tcg_const_i64(0);
+ tcg_gen_movcond_i64(c.cond, t_true, c.value, t_zero, t_true, t_false);
+ tcg_temp_free_i64(t_zero);
+ tcg_temp_free_i64(t_false);
+ a64_free_cc(&c);
+
+ /* Note that sregs write back zeros to the high bits,
+ and we've already done the zero-extension. */
+ write_fp_dreg(s, rd, t_true);
+ tcg_temp_free_i64(t_true);
+}
+
+/* C3.6.25 Floating-point data-processing (1 source) - single precision */
+static void handle_fp_1src_single(DisasContext *s, int opcode, int rd, int rn)
+{
+ TCGv_ptr fpst;
+ TCGv_i32 tcg_op;
+ TCGv_i32 tcg_res;
+
+ fpst = get_fpstatus_ptr();
+ tcg_op = read_fp_sreg(s, rn);
+ tcg_res = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ tcg_gen_mov_i32(tcg_res, tcg_op);
+ break;
+ case 0x1: /* FABS */
+ gen_helper_vfp_abss(tcg_res, tcg_op);
+ break;
+ case 0x2: /* FNEG */
+ gen_helper_vfp_negs(tcg_res, tcg_op);
+ break;
+ case 0x3: /* FSQRT */
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ {
+ TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ gen_helper_rints(tcg_res, tcg_op, fpst);
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ break;
+ }
+ case 0xe: /* FRINTX */
+ gen_helper_rints_exact(tcg_res, tcg_op, fpst);
+ break;
+ case 0xf: /* FRINTI */
+ gen_helper_rints(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ abort();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* C3.6.25 Floating-point data-processing (1 source) - double precision */
+static void handle_fp_1src_double(DisasContext *s, int opcode, int rd, int rn)
+{
+ TCGv_ptr fpst;
+ TCGv_i64 tcg_op;
+ TCGv_i64 tcg_res;
+
+ fpst = get_fpstatus_ptr();
+ tcg_op = read_fp_dreg(s, rn);
+ tcg_res = tcg_temp_new_i64();
+
+ switch (opcode) {
+ case 0x0: /* FMOV */
+ tcg_gen_mov_i64(tcg_res, tcg_op);
+ break;
+ case 0x1: /* FABS */
+ gen_helper_vfp_absd(tcg_res, tcg_op);
+ break;
+ case 0x2: /* FNEG */
+ gen_helper_vfp_negd(tcg_res, tcg_op);
+ break;
+ case 0x3: /* FSQRT */
+ gen_helper_vfp_sqrtd(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x8: /* FRINTN */
+ case 0x9: /* FRINTP */
+ case 0xa: /* FRINTM */
+ case 0xb: /* FRINTZ */
+ case 0xc: /* FRINTA */
+ {
+ TCGv_i32 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(opcode & 7));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ break;
+ }
+ case 0xe: /* FRINTX */
+ gen_helper_rintd_exact(tcg_res, tcg_op, fpst);
+ break;
+ case 0xf: /* FRINTI */
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ abort();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+}
+
+static void handle_fp_fcvt(DisasContext *s, int opcode,
+ int rd, int rn, int dtype, int ntype)
+{
+ switch (ntype) {
+ case 0x0:
+ {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
+ if (dtype == 1) {
+ /* Single to double */
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvtds(tcg_rd, tcg_rn, cpu_env);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ } else {
+ /* Single to half */
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_rd, tcg_rn, cpu_env);
+ /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ }
+ tcg_temp_free_i32(tcg_rn);
+ break;
+ }
+ case 0x1:
+ {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ if (dtype == 0) {
+ /* Double to single */
+ gen_helper_vfp_fcvtsd(tcg_rd, tcg_rn, cpu_env);
+ } else {
+ /* Double to half */
+ gen_helper_vfp_fcvt_f64_to_f16(tcg_rd, tcg_rn, cpu_env);
+ /* write_fp_sreg is OK here because top half of tcg_rd is zero */
+ }
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ break;
+ }
+ case 0x3:
+ {
+ TCGv_i32 tcg_rn = read_fp_sreg(s, rn);
+ tcg_gen_ext16u_i32(tcg_rn, tcg_rn);
+ if (dtype == 0) {
+ /* Half to single */
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_rd, tcg_rn, cpu_env);
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ } else {
+ /* Half to double */
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ gen_helper_vfp_fcvt_f16_to_f64(tcg_rd, tcg_rn, cpu_env);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ }
+ tcg_temp_free_i32(tcg_rn);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+/* C3.6.25 Floating point data-processing (1 source)
+ * 31 30 29 28 24 23 22 21 20 15 14 10 9 5 4 0
+ * +---+---+---+-----------+------+---+--------+-----------+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | opcode | 1 0 0 0 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+--------+-----------+------+------+
+ */
+static void disas_fp_1src(DisasContext *s, uint32_t insn)
+{
+ int type = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 15, 6);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 0x4: case 0x5: case 0x7:
+ {
+ /* FCVT between half, single and double precision */
+ int dtype = extract32(opcode, 0, 2);
+ if (type == 2 || dtype == type) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fp_fcvt(s, opcode, rd, rn, dtype, type);
+ break;
+ }
+ case 0x0 ... 0x3:
+ case 0x8 ... 0xc:
+ case 0xe ... 0xf:
+ /* 32-to-32 and 64-to-64 ops */
+ switch (type) {
+ case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fp_1src_single(s, opcode, rd, rn);
+ break;
+ case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fp_1src_double(s, opcode, rd, rn);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.6.26 Floating-point data-processing (2 source) - single precision */
+static void handle_fp_2src_single(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_op1;
+ TCGv_i32 tcg_op2;
+ TCGv_i32 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i32();
+ fpst = get_fpstatus_ptr();
+ tcg_op1 = read_fp_sreg(s, rn);
+ tcg_op2 = read_fp_sreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_negs(tcg_res, tcg_res);
+ break;
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* C3.6.26 Floating-point data-processing (2 source) - double precision */
+static void handle_fp_2src_double(DisasContext *s, int opcode,
+ int rd, int rn, int rm)
+{
+ TCGv_i64 tcg_op1;
+ TCGv_i64 tcg_op2;
+ TCGv_i64 tcg_res;
+ TCGv_ptr fpst;
+
+ tcg_res = tcg_temp_new_i64();
+ fpst = get_fpstatus_ptr();
+ tcg_op1 = read_fp_dreg(s, rn);
+ tcg_op2 = read_fp_dreg(s, rm);
+
+ switch (opcode) {
+ case 0x0: /* FMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1: /* FDIV */
+ gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2: /* FADD */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3: /* FSUB */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x4: /* FMAX */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5: /* FMIN */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x6: /* FMAXNM */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7: /* FMINNM */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x8: /* FNMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_negd(tcg_res, tcg_res);
+ break;
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* C3.6.26 Floating point data-processing (2 source)
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | Rm | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_fp_2src(DisasContext *s, uint32_t insn)
+{
+ int type = extract32(insn, 22, 2);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int opcode = extract32(insn, 12, 4);
+
+ if (opcode > 8) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (type) {
+ case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_2src_single(s, opcode, rd, rn, rm);
+ break;
+ case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_2src_double(s, opcode, rd, rn, rm);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+}
+
+/* C3.6.27 Floating-point data-processing (3 source) - single precision */
+static void handle_fp_3src_single(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i32 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ tcg_op1 = read_fp_sreg(s, rn);
+ tcg_op2 = read_fp_sreg(s, rm);
+ tcg_op3 = read_fp_sreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ gen_helper_vfp_negs(tcg_op3, tcg_op3);
+ }
+
+ if (o0 != o1) {
+ gen_helper_vfp_negs(tcg_op1, tcg_op1);
+ }
+
+ gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_op3);
+ tcg_temp_free_i32(tcg_res);
+}
+
+/* C3.6.27 Floating-point data-processing (3 source) - double precision */
+static void handle_fp_3src_double(DisasContext *s, bool o0, bool o1,
+ int rd, int rn, int rm, int ra)
+{
+ TCGv_i64 tcg_op1, tcg_op2, tcg_op3;
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ tcg_op1 = read_fp_dreg(s, rn);
+ tcg_op2 = read_fp_dreg(s, rm);
+ tcg_op3 = read_fp_dreg(s, ra);
+
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (o1 == true) {
+ gen_helper_vfp_negd(tcg_op3, tcg_op3);
+ }
+
+ if (o0 != o1) {
+ gen_helper_vfp_negd(tcg_op1, tcg_op1);
+ }
+
+ gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2, tcg_op3, fpst);
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_op3);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* C3.6.27 Floating point data-processing (3 source)
+ * 31 30 29 28 24 23 22 21 20 16 15 14 10 9 5 4 0
+ * +---+---+---+-----------+------+----+------+----+------+------+------+
+ * | M | 0 | S | 1 1 1 1 1 | type | o1 | Rm | o0 | Ra | Rn | Rd |
+ * +---+---+---+-----------+------+----+------+----+------+------+------+
+ */
+static void disas_fp_3src(DisasContext *s, uint32_t insn)
+{
+ int type = extract32(insn, 22, 2);
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int ra = extract32(insn, 10, 5);
+ int rm = extract32(insn, 16, 5);
+ bool o0 = extract32(insn, 15, 1);
+ bool o1 = extract32(insn, 21, 1);
+
+ switch (type) {
+ case 0:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_3src_single(s, o0, o1, rd, rn, rm, ra);
+ break;
+ case 1:
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fp_3src_double(s, o0, o1, rd, rn, rm, ra);
+ break;
+ default:
+ unallocated_encoding(s);
+ }
+}
+
+/* C3.6.28 Floating point immediate
+ * 31 30 29 28 24 23 22 21 20 13 12 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------------+-------+------+------+
+ * | M | 0 | S | 1 1 1 1 0 | type | 1 | imm8 | 1 0 0 | imm5 | Rd |
+ * +---+---+---+-----------+------+---+------------+-------+------+------+
+ */
+static void disas_fp_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int imm8 = extract32(insn, 13, 8);
+ int is_double = extract32(insn, 22, 2);
+ uint64_t imm;
+ TCGv_i64 tcg_res;
+
+ if (is_double > 1) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* The imm8 encodes the sign bit, enough bits to represent
+ * an exponent in the range 01....1xx to 10....0xx,
+ * and the most significant 4 bits of the mantissa; see
+ * VFPExpandImm() in the v8 ARM ARM.
+ */
+ if (is_double) {
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
+ extract32(imm8, 0, 6);
+ imm <<= 48;
+ } else {
+ imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
+ (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
+ (extract32(imm8, 0, 6) << 3);
+ imm <<= 16;
+ }
+
+ tcg_res = tcg_const_i64(imm);
+ write_fp_dreg(s, rd, tcg_res);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* Handle floating point <=> fixed point conversions. Note that we can
+ * also deal with fp <=> integer conversions as a special case (scale == 64)
+ * OPTME: consider handling that special case specially or at least skipping
+ * the call to scalbn in the helpers for zero shifts.
+ */
+static void handle_fpfpcvt(DisasContext *s, int rd, int rn, int opcode,
+ bool itof, int rmode, int scale, int sf, int type)
+{
+ bool is_signed = !(opcode & 1);
+ bool is_double = type;
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_shift;
+
+ tcg_fpstatus = get_fpstatus_ptr();
+
+ tcg_shift = tcg_const_i32(64 - scale);
+
+ if (itof) {
+ TCGv_i64 tcg_int = cpu_reg(s, rn);
+ if (!sf) {
+ TCGv_i64 tcg_extend = new_tmp_a64(s);
+
+ if (is_signed) {
+ tcg_gen_ext32s_i64(tcg_extend, tcg_int);
+ } else {
+ tcg_gen_ext32u_i64(tcg_extend, tcg_int);
+ }
+
+ tcg_int = tcg_extend;
+ }
+
+ if (is_double) {
+ TCGv_i64 tcg_double = tcg_temp_new_i64();
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_dreg(s, rd, tcg_double);
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_uqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpstatus);
+ }
+ write_fp_sreg(s, rd, tcg_single);
+ tcg_temp_free_i32(tcg_single);
+ }
+ } else {
+ TCGv_i64 tcg_int = cpu_reg(s, rd);
+ TCGv_i32 tcg_rmode;
+
+ if (extract32(opcode, 2, 1)) {
+ /* There are too many rounding modes to all fit into rmode,
+ * so FCVTA[US] is a special case.
+ */
+ rmode = FPROUNDING_TIEAWAY;
+ }
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+
+ if (is_double) {
+ TCGv_i64 tcg_double = read_fp_dreg(s, rn);
+ if (is_signed) {
+ if (!sf) {
+ gen_helper_vfp_tosld(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosqd(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ if (!sf) {
+ gen_helper_vfp_tould(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqd(tcg_int, tcg_double,
+ tcg_shift, tcg_fpstatus);
+ }
+ }
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single = read_fp_sreg(s, rn);
+ if (sf) {
+ if (is_signed) {
+ gen_helper_vfp_tosqs(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touqs(tcg_int, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ } else {
+ TCGv_i32 tcg_dest = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_tosls(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_touls(tcg_dest, tcg_single,
+ tcg_shift, tcg_fpstatus);
+ }
+ tcg_gen_extu_i32_i64(tcg_int, tcg_dest);
+ tcg_temp_free_i32(tcg_dest);
+ }
+ tcg_temp_free_i32(tcg_single);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+
+ if (!sf) {
+ tcg_gen_ext32u_i64(tcg_int, tcg_int);
+ }
+ }
+
+ tcg_temp_free_ptr(tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+}
+
+/* C3.6.29 Floating point <-> fixed point conversions
+ * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
+ * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
+ * | sf | 0 | S | 1 1 1 1 0 | type | 0 | rmode | opcode | scale | Rn | Rd |
+ * +----+---+---+-----------+------+---+-------+--------+-------+------+------+
+ */
+static void disas_fp_fixed_conv(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int scale = extract32(insn, 10, 6);
+ int opcode = extract32(insn, 16, 3);
+ int rmode = extract32(insn, 19, 2);
+ int type = extract32(insn, 22, 2);
+ bool sbit = extract32(insn, 29, 1);
+ bool sf = extract32(insn, 31, 1);
+ bool itof;
+
+ if (sbit || (type > 1)
+ || (!sf && scale < 32)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch ((rmode << 3) | opcode) {
+ case 0x2: /* SCVTF */
+ case 0x3: /* UCVTF */
+ itof = true;
+ break;
+ case 0x18: /* FCVTZS */
+ case 0x19: /* FCVTZU */
+ itof = false;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_fpfpcvt(s, rd, rn, opcode, itof, FPROUNDING_ZERO, scale, sf, type);
+}
+
+static void handle_fmov(DisasContext *s, int rd, int rn, int type, bool itof)
+{
+ /* FMOV: gpr to or from float, double, or top half of quad fp reg,
+ * without conversion.
+ */
+
+ if (itof) {
+ TCGv_i64 tcg_rn = cpu_reg(s, rn);
+
+ switch (type) {
+ case 0:
+ {
+ /* 32 bit */
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(tmp, tcg_rn);
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_offset(s, rd, MO_64));
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ case 1:
+ {
+ /* 64 bit */
+ TCGv_i64 tmp = tcg_const_i64(0);
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_offset(s, rd, MO_64));
+ tcg_gen_st_i64(tmp, cpu_env, fp_reg_hi_offset(s, rd));
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ case 2:
+ /* 64 bit to top half. */
+ tcg_gen_st_i64(tcg_rn, cpu_env, fp_reg_hi_offset(s, rd));
+ break;
+ }
+ } else {
+ TCGv_i64 tcg_rd = cpu_reg(s, rd);
+
+ switch (type) {
+ case 0:
+ /* 32 bit */
+ tcg_gen_ld32u_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_32));
+ break;
+ case 1:
+ /* 64 bit */
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_offset(s, rn, MO_64));
+ break;
+ case 2:
+ /* 64 bits from top half */
+ tcg_gen_ld_i64(tcg_rd, cpu_env, fp_reg_hi_offset(s, rn));
+ break;
+ }
+ }
+}
+
+/* C3.6.30 Floating point <-> integer conversions
+ * 31 30 29 28 24 23 22 21 20 19 18 16 15 10 9 5 4 0
+ * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
+ * | sf | 0 | S | 1 1 1 1 0 | type | 1 | rmode | opc | 0 0 0 0 0 0 | Rn | Rd |
+ * +----+---+---+-----------+------+---+-------+-----+-------------+----+----+
+ */
+static void disas_fp_int_conv(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 16, 3);
+ int rmode = extract32(insn, 19, 2);
+ int type = extract32(insn, 22, 2);
+ bool sbit = extract32(insn, 29, 1);
+ bool sf = extract32(insn, 31, 1);
+
+ if (sbit) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (opcode > 5) {
+ /* FMOV */
+ bool itof = opcode & 1;
+
+ if (rmode >= 2) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (sf << 3 | type << 1 | rmode) {
+ case 0x0: /* 32 bit */
+ case 0xa: /* 64 bit */
+ case 0xd: /* 64 bit to top half of quad */
+ break;
+ default:
+ /* all other sf/type/rmode combinations are invalid */
+ unallocated_encoding(s);
+ break;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fmov(s, rd, rn, type, itof);
+ } else {
+ /* actual FP conversions */
+ bool itof = extract32(opcode, 1, 1);
+
+ if (type > 1 || (rmode != 0 && opcode > 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_fpfpcvt(s, rd, rn, opcode, itof, rmode, 64, sf, type);
+ }
+}
+
+/* FP-specific subcases of table C3-6 (SIMD and FP data processing)
+ * 31 30 29 28 25 24 0
+ * +---+---+---+---------+-----------------------------+
+ * | | 0 | | 1 1 1 1 | |
+ * +---+---+---+---------+-----------------------------+
+ */
+static void disas_data_proc_fp(DisasContext *s, uint32_t insn)
+{
+ if (extract32(insn, 24, 1)) {
+ /* Floating point data-processing (3 source) */
+ disas_fp_3src(s, insn);
+ } else if (extract32(insn, 21, 1) == 0) {
+ /* Floating point to fixed point conversions */
+ disas_fp_fixed_conv(s, insn);
+ } else {
+ switch (extract32(insn, 10, 2)) {
+ case 1:
+ /* Floating point conditional compare */
+ disas_fp_ccomp(s, insn);
+ break;
+ case 2:
+ /* Floating point data-processing (2 source) */
+ disas_fp_2src(s, insn);
+ break;
+ case 3:
+ /* Floating point conditional select */
+ disas_fp_csel(s, insn);
+ break;
+ case 0:
+ switch (ctz32(extract32(insn, 12, 4))) {
+ case 0: /* [15:12] == xxx1 */
+ /* Floating point immediate */
+ disas_fp_imm(s, insn);
+ break;
+ case 1: /* [15:12] == xx10 */
+ /* Floating point compare */
+ disas_fp_compare(s, insn);
+ break;
+ case 2: /* [15:12] == x100 */
+ /* Floating point data-processing (1 source) */
+ disas_fp_1src(s, insn);
+ break;
+ case 3: /* [15:12] == 1000 */
+ unallocated_encoding(s);
+ break;
+ default: /* [15:12] == 0000 */
+ /* Floating point <-> integer conversions */
+ disas_fp_int_conv(s, insn);
+ break;
+ }
+ break;
+ }
+ }
+}
+
+static void do_ext64(DisasContext *s, TCGv_i64 tcg_left, TCGv_i64 tcg_right,
+ int pos)
+{
+ /* Extract 64 bits from the middle of two concatenated 64 bit
+ * vector register slices left:right. The extracted bits start
+ * at 'pos' bits into the right (least significant) side.
+ * We return the result in tcg_right, and guarantee not to
+ * trash tcg_left.
+ */
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+ assert(pos > 0 && pos < 64);
+
+ tcg_gen_shri_i64(tcg_right, tcg_right, pos);
+ tcg_gen_shli_i64(tcg_tmp, tcg_left, 64 - pos);
+ tcg_gen_or_i64(tcg_right, tcg_right, tcg_tmp);
+
+ tcg_temp_free_i64(tcg_tmp);
+}
+
+/* C3.6.1 EXT
+ * 31 30 29 24 23 22 21 20 16 15 14 11 10 9 5 4 0
+ * +---+---+-------------+-----+---+------+---+------+---+------+------+
+ * | 0 | Q | 1 0 1 1 1 0 | op2 | 0 | Rm | 0 | imm4 | 0 | Rn | Rd |
+ * +---+---+-------------+-----+---+------+---+------+---+------+------+
+ */
+static void disas_simd_ext(DisasContext *s, uint32_t insn)
+{
+ int is_q = extract32(insn, 30, 1);
+ int op2 = extract32(insn, 22, 2);
+ int imm4 = extract32(insn, 11, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int pos = imm4 << 3;
+ TCGv_i64 tcg_resl, tcg_resh;
+
+ if (op2 != 0 || (!is_q && extract32(imm4, 3, 1))) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_resh = tcg_temp_new_i64();
+ tcg_resl = tcg_temp_new_i64();
+
+ /* Vd gets bits starting at pos bits into Vm:Vn. This is
+ * either extracting 128 bits from a 128:128 concatenation, or
+ * extracting 64 bits from a 64:64 concatenation.
+ */
+ if (!is_q) {
+ read_vec_element(s, tcg_resl, rn, 0, MO_64);
+ if (pos != 0) {
+ read_vec_element(s, tcg_resh, rm, 0, MO_64);
+ do_ext64(s, tcg_resh, tcg_resl, pos);
+ }
+ tcg_gen_movi_i64(tcg_resh, 0);
+ } else {
+ TCGv_i64 tcg_hh;
+ typedef struct {
+ int reg;
+ int elt;
+ } EltPosns;
+ EltPosns eltposns[] = { {rn, 0}, {rn, 1}, {rm, 0}, {rm, 1} };
+ EltPosns *elt = eltposns;
+
+ if (pos >= 64) {
+ elt++;
+ pos -= 64;
+ }
+
+ read_vec_element(s, tcg_resl, elt->reg, elt->elt, MO_64);
+ elt++;
+ read_vec_element(s, tcg_resh, elt->reg, elt->elt, MO_64);
+ elt++;
+ if (pos != 0) {
+ do_ext64(s, tcg_resh, tcg_resl, pos);
+ tcg_hh = tcg_temp_new_i64();
+ read_vec_element(s, tcg_hh, elt->reg, elt->elt, MO_64);
+ do_ext64(s, tcg_hh, tcg_resh, pos);
+ tcg_temp_free_i64(tcg_hh);
+ }
+ }
+
+ write_vec_element(s, tcg_resl, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_resl);
+ write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_resh);
+}
+
+/* C3.6.2 TBL/TBX
+ * 31 30 29 24 23 22 21 20 16 15 14 13 12 11 10 9 5 4 0
+ * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 | op2 | 0 | Rm | 0 | len | op | 0 0 | Rn | Rd |
+ * +---+---+-------------+-----+---+------+---+-----+----+-----+------+------+
+ */
+static void disas_simd_tb(DisasContext *s, uint32_t insn)
+{
+ int op2 = extract32(insn, 22, 2);
+ int is_q = extract32(insn, 30, 1);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int is_tblx = extract32(insn, 12, 1);
+ int len = extract32(insn, 13, 2);
+ TCGv_i64 tcg_resl, tcg_resh, tcg_idx;
+ TCGv_i32 tcg_regno, tcg_numregs;
+
+ if (op2 != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* This does a table lookup: for every byte element in the input
+ * we index into a table formed from up to four vector registers,
+ * and then the output is the result of the lookups. Our helper
+ * function does the lookup operation for a single 64 bit part of
+ * the input.
+ */
+ tcg_resl = tcg_temp_new_i64();
+ tcg_resh = tcg_temp_new_i64();
+
+ if (is_tblx) {
+ read_vec_element(s, tcg_resl, rd, 0, MO_64);
+ } else {
+ tcg_gen_movi_i64(tcg_resl, 0);
+ }
+ if (is_tblx && is_q) {
+ read_vec_element(s, tcg_resh, rd, 1, MO_64);
+ } else {
+ tcg_gen_movi_i64(tcg_resh, 0);
+ }
+
+ tcg_idx = tcg_temp_new_i64();
+ tcg_regno = tcg_const_i32(rn);
+ tcg_numregs = tcg_const_i32(len + 1);
+ read_vec_element(s, tcg_idx, rm, 0, MO_64);
+ gen_helper_simd_tbl(tcg_resl, cpu_env, tcg_resl, tcg_idx,
+ tcg_regno, tcg_numregs);
+ if (is_q) {
+ read_vec_element(s, tcg_idx, rm, 1, MO_64);
+ gen_helper_simd_tbl(tcg_resh, cpu_env, tcg_resh, tcg_idx,
+ tcg_regno, tcg_numregs);
+ }
+ tcg_temp_free_i64(tcg_idx);
+ tcg_temp_free_i32(tcg_regno);
+ tcg_temp_free_i32(tcg_numregs);
+
+ write_vec_element(s, tcg_resl, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_resl);
+ write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_resh);
+}
+
+/* C3.6.3 ZIP/UZP/TRN
+ * 31 30 29 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
+ * +---+---+-------------+------+---+------+---+------------------+------+
+ * | 0 | Q | 0 0 1 1 1 0 | size | 0 | Rm | 0 | opc | 1 0 | Rn | Rd |
+ * +---+---+-------------+------+---+------+---+------------------+------+
+ */
+static void disas_simd_zip_trn(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ /* opc field bits [1:0] indicate ZIP/UZP/TRN;
+ * bit 2 indicates 1 vs 2 variant of the insn.
+ */
+ int opcode = extract32(insn, 12, 2);
+ bool part = extract32(insn, 14, 1);
+ bool is_q = extract32(insn, 30, 1);
+ int esize = 8 << size;
+ int i, ofs;
+ int datasize = is_q ? 128 : 64;
+ int elements = datasize / esize;
+ TCGv_i64 tcg_res, tcg_resl, tcg_resh;
+
+ if (opcode == 0 || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_resl = tcg_const_i64(0);
+ tcg_resh = tcg_const_i64(0);
+ tcg_res = tcg_temp_new_i64();
+
+ for (i = 0; i < elements; i++) {
+ switch (opcode) {
+ case 1: /* UZP1/2 */
+ {
+ int midpoint = elements / 2;
+ if (i < midpoint) {
+ read_vec_element(s, tcg_res, rn, 2 * i + part, size);
+ } else {
+ read_vec_element(s, tcg_res, rm,
+ 2 * (i - midpoint) + part, size);
+ }
+ break;
+ }
+ case 2: /* TRN1/2 */
+ if (i & 1) {
+ read_vec_element(s, tcg_res, rm, (i & ~1) + part, size);
+ } else {
+ read_vec_element(s, tcg_res, rn, (i & ~1) + part, size);
+ }
+ break;
+ case 3: /* ZIP1/2 */
+ {
+ int base = part * elements / 2;
+ if (i & 1) {
+ read_vec_element(s, tcg_res, rm, base + (i >> 1), size);
+ } else {
+ read_vec_element(s, tcg_res, rn, base + (i >> 1), size);
+ }
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ ofs = i * esize;
+ if (ofs < 64) {
+ tcg_gen_shli_i64(tcg_res, tcg_res, ofs);
+ tcg_gen_or_i64(tcg_resl, tcg_resl, tcg_res);
+ } else {
+ tcg_gen_shli_i64(tcg_res, tcg_res, ofs - 64);
+ tcg_gen_or_i64(tcg_resh, tcg_resh, tcg_res);
+ }
+ }
+
+ tcg_temp_free_i64(tcg_res);
+
+ write_vec_element(s, tcg_resl, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_resl);
+ write_vec_element(s, tcg_resh, rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_resh);
+}
+
+static void do_minmaxop(DisasContext *s, TCGv_i32 tcg_elt1, TCGv_i32 tcg_elt2,
+ int opc, bool is_min, TCGv_ptr fpst)
+{
+ /* Helper function for disas_simd_across_lanes: do a single precision
+ * min/max operation on the specified two inputs,
+ * and return the result in tcg_elt1.
+ */
+ if (opc == 0xc) {
+ if (is_min) {
+ gen_helper_vfp_minnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+ } else {
+ gen_helper_vfp_maxnums(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+ }
+ } else {
+ assert(opc == 0xf);
+ if (is_min) {
+ gen_helper_vfp_mins(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+ } else {
+ gen_helper_vfp_maxs(tcg_elt1, tcg_elt1, tcg_elt2, fpst);
+ }
+ }
+}
+
+/* C3.6.4 AdvSIMD across lanes
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_across_lanes(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ bool is_q = extract32(insn, 30, 1);
+ bool is_u = extract32(insn, 29, 1);
+ bool is_fp = false;
+ bool is_min = false;
+ int esize;
+ int elements;
+ int i;
+ TCGv_i64 tcg_res, tcg_elt;
+
+ switch (opcode) {
+ case 0x1b: /* ADDV */
+ if (is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x3: /* SADDLV, UADDLV */
+ case 0xa: /* SMAXV, UMAXV */
+ case 0x1a: /* SMINV, UMINV */
+ if (size == 3 || (size == 2 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0xc: /* FMAXNMV, FMINNMV */
+ case 0xf: /* FMAXV, FMINV */
+ if (!is_u || !is_q || extract32(size, 0, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* Bit 1 of size field encodes min vs max, and actual size is always
+ * 32 bits: adjust the size variable so following code can rely on it
+ */
+ is_min = extract32(size, 1, 1);
+ is_fp = true;
+ size = 2;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ esize = 8 << size;
+ elements = (is_q ? 128 : 64) / esize;
+
+ tcg_res = tcg_temp_new_i64();
+ tcg_elt = tcg_temp_new_i64();
+
+ /* These instructions operate across all lanes of a vector
+ * to produce a single result. We can guarantee that a 64
+ * bit intermediate is sufficient:
+ * + for [US]ADDLV the maximum element size is 32 bits, and
+ * the result type is 64 bits
+ * + for FMAX*V, FMIN*V, ADDV the intermediate type is the
+ * same as the element size, which is 32 bits at most
+ * For the integer operations we can choose to work at 64
+ * or 32 bits and truncate at the end; for simplicity
+ * we use 64 bits always. The floating point
+ * ops do require 32 bit intermediates, though.
+ */
+ if (!is_fp) {
+ read_vec_element(s, tcg_res, rn, 0, size | (is_u ? 0 : MO_SIGN));
+
+ for (i = 1; i < elements; i++) {
+ read_vec_element(s, tcg_elt, rn, i, size | (is_u ? 0 : MO_SIGN));
+
+ switch (opcode) {
+ case 0x03: /* SADDLV / UADDLV */
+ case 0x1b: /* ADDV */
+ tcg_gen_add_i64(tcg_res, tcg_res, tcg_elt);
+ break;
+ case 0x0a: /* SMAXV / UMAXV */
+ tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
+ tcg_res,
+ tcg_res, tcg_elt, tcg_res, tcg_elt);
+ break;
+ case 0x1a: /* SMINV / UMINV */
+ tcg_gen_movcond_i64(is_u ? TCG_COND_LEU : TCG_COND_LE,
+ tcg_res,
+ tcg_res, tcg_elt, tcg_res, tcg_elt);
+ break;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ }
+ } else {
+ /* Floating point ops which work on 32 bit (single) intermediates.
+ * Note that correct NaN propagation requires that we do these
+ * operations in exactly the order specified by the pseudocode.
+ */
+ TCGv_i32 tcg_elt1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_elt2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_elt3 = tcg_temp_new_i32();
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ assert(esize == 32);
+ assert(elements == 4);
+
+ read_vec_element(s, tcg_elt, rn, 0, MO_32);
+ tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt);
+ read_vec_element(s, tcg_elt, rn, 1, MO_32);
+ tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
+
+ do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
+
+ read_vec_element(s, tcg_elt, rn, 2, MO_32);
+ tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt);
+ read_vec_element(s, tcg_elt, rn, 3, MO_32);
+ tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt);
+
+ do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst);
+
+ do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst);
+
+ tcg_gen_extu_i32_i64(tcg_res, tcg_elt1);
+ tcg_temp_free_i32(tcg_elt1);
+ tcg_temp_free_i32(tcg_elt2);
+ tcg_temp_free_i32(tcg_elt3);
+ tcg_temp_free_ptr(fpst);
+ }
+
+ tcg_temp_free_i64(tcg_elt);
+
+ /* Now truncate the result to the width required for the final output */
+ if (opcode == 0x03) {
+ /* SADDLV, UADDLV: result is 2*esize */
+ size++;
+ }
+
+ switch (size) {
+ case 0:
+ tcg_gen_ext8u_i64(tcg_res, tcg_res);
+ break;
+ case 1:
+ tcg_gen_ext16u_i64(tcg_res, tcg_res);
+ break;
+ case 2:
+ tcg_gen_ext32u_i64(tcg_res, tcg_res);
+ break;
+ case 3:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* C6.3.31 DUP (Element, Vector)
+ *
+ * 31 30 29 21 20 16 15 10 9 5 4 0
+ * +---+---+-------------------+--------+-------------+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
+ * +---+---+-------------------+--------+-------------+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ */
+static void handle_simd_dupe(DisasContext *s, int is_q, int rd, int rn,
+ int imm5)
+{
+ int size = ctz32(imm5);
+ int esize = 8 << size;
+ int elements = (is_q ? 128 : 64) / esize;
+ int index, i;
+ TCGv_i64 tmp;
+
+ if (size > 3 || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ index = imm5 >> (size + 1);
+
+ tmp = tcg_temp_new_i64();
+ read_vec_element(s, tmp, rn, index, size);
+
+ for (i = 0; i < elements; i++) {
+ write_vec_element(s, tmp, rd, i, size);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tmp);
+}
+
+/* C6.3.31 DUP (element, scalar)
+ * 31 21 20 16 15 10 9 5 4 0
+ * +-----------------------+--------+-------------+------+------+
+ * | 0 1 0 1 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 0 1 | Rn | Rd |
+ * +-----------------------+--------+-------------+------+------+
+ */
+static void handle_simd_dupes(DisasContext *s, int rd, int rn,
+ int imm5)
+{
+ int size = ctz32(imm5);
+ int index;
+ TCGv_i64 tmp;
+
+ if (size > 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ index = imm5 >> (size + 1);
+
+ /* This instruction just extracts the specified element and
+ * zero-extends it into the bottom of the destination register.
+ */
+ tmp = tcg_temp_new_i64();
+ read_vec_element(s, tmp, rn, index, size);
+ write_fp_dreg(s, rd, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* C6.3.32 DUP (General)
+ *
+ * 31 30 29 21 20 16 15 10 9 5 4 0
+ * +---+---+-------------------+--------+-------------+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 0 1 1 | Rn | Rd |
+ * +---+---+-------------------+--------+-------------+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ */
+static void handle_simd_dupg(DisasContext *s, int is_q, int rd, int rn,
+ int imm5)
+{
+ int size = ctz32(imm5);
+ int esize = 8 << size;
+ int elements = (is_q ? 128 : 64)/esize;
+ int i = 0;
+
+ if (size > 3 || ((size == 3) && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ for (i = 0; i < elements; i++) {
+ write_vec_element(s, cpu_reg(s, rn), rd, i, size);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+/* C6.3.150 INS (Element)
+ *
+ * 31 21 20 16 15 14 11 10 9 5 4 0
+ * +-----------------------+--------+------------+---+------+------+
+ * | 0 1 1 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
+ * +-----------------------+--------+------------+---+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ * index: encoded in imm5<4:size+1>
+ */
+static void handle_simd_inse(DisasContext *s, int rd, int rn,
+ int imm4, int imm5)
+{
+ int size = ctz32(imm5);
+ int src_index, dst_index;
+ TCGv_i64 tmp;
+
+ if (size > 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ dst_index = extract32(imm5, 1+size, 5);
+ src_index = extract32(imm4, size, 4);
+
+ tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tmp, rn, src_index, size);
+ write_vec_element(s, tmp, rd, dst_index, size);
+
+ tcg_temp_free_i64(tmp);
+}
+
+
+/* C6.3.151 INS (General)
+ *
+ * 31 21 20 16 15 10 9 5 4 0
+ * +-----------------------+--------+-------------+------+------+
+ * | 0 1 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 0 1 1 1 | Rn | Rd |
+ * +-----------------------+--------+-------------+------+------+
+ *
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ * index: encoded in imm5<4:size+1>
+ */
+static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5)
+{
+ int size = ctz32(imm5);
+ int idx;
+
+ if (size > 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ idx = extract32(imm5, 1 + size, 4 - size);
+ write_vec_element(s, cpu_reg(s, rn), rd, idx, size);
+}
+
+/*
+ * C6.3.321 UMOV (General)
+ * C6.3.237 SMOV (General)
+ *
+ * 31 30 29 21 20 16 15 12 10 9 5 4 0
+ * +---+---+-------------------+--------+-------------+------+------+
+ * | 0 | Q | 0 0 1 1 1 0 0 0 0 | imm5 | 0 0 1 U 1 1 | Rn | Rd |
+ * +---+---+-------------------+--------+-------------+------+------+
+ *
+ * U: unsigned when set
+ * size: encoded in imm5 (see ARM ARM LowestSetBit())
+ */
+static void handle_simd_umov_smov(DisasContext *s, int is_q, int is_signed,
+ int rn, int rd, int imm5)
+{
+ int size = ctz32(imm5);
+ int element;
+ TCGv_i64 tcg_rd;
+
+ /* Check for UnallocatedEncodings */
+ if (is_signed) {
+ if (size > 2 || (size == 2 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ if (size > 3
+ || (size < 3 && is_q)
+ || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ element = extract32(imm5, 1+size, 4);
+
+ tcg_rd = cpu_reg(s, rd);
+ read_vec_element(s, tcg_rd, rn, element, size | (is_signed ? MO_SIGN : 0));
+ if (is_signed && !is_q) {
+ tcg_gen_ext32u_i64(tcg_rd, tcg_rd);
+ }
+}
+
+/* C3.6.5 AdvSIMD copy
+ * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
+ * +---+---+----+-----------------+------+---+------+---+------+------+
+ * | 0 | Q | op | 0 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
+ * +---+---+----+-----------------+------+---+------+---+------+------+
+ */
+static void disas_simd_copy(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm4 = extract32(insn, 11, 4);
+ int op = extract32(insn, 29, 1);
+ int is_q = extract32(insn, 30, 1);
+ int imm5 = extract32(insn, 16, 5);
+
+ if (op) {
+ if (is_q) {
+ /* INS (element) */
+ handle_simd_inse(s, rd, rn, imm4, imm5);
+ } else {
+ unallocated_encoding(s);
+ }
+ } else {
+ switch (imm4) {
+ case 0:
+ /* DUP (element - vector) */
+ handle_simd_dupe(s, is_q, rd, rn, imm5);
+ break;
+ case 1:
+ /* DUP (general) */
+ handle_simd_dupg(s, is_q, rd, rn, imm5);
+ break;
+ case 3:
+ if (is_q) {
+ /* INS (general) */
+ handle_simd_insg(s, rd, rn, imm5);
+ } else {
+ unallocated_encoding(s);
+ }
+ break;
+ case 5:
+ case 7:
+ /* UMOV/SMOV (is_q indicates 32/64; imm4 indicates signedness) */
+ handle_simd_umov_smov(s, is_q, (imm4 == 5), rn, rd, imm5);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+ }
+}
+
+/* C3.6.6 AdvSIMD modified immediate
+ * 31 30 29 28 19 18 16 15 12 11 10 9 5 4 0
+ * +---+---+----+---------------------+-----+-------+----+---+-------+------+
+ * | 0 | Q | op | 0 1 1 1 1 0 0 0 0 0 | abc | cmode | o2 | 1 | defgh | Rd |
+ * +---+---+----+---------------------+-----+-------+----+---+-------+------+
+ *
+ * There are a number of operations that can be carried out here:
+ * MOVI - move (shifted) imm into register
+ * MVNI - move inverted (shifted) imm into register
+ * ORR - bitwise OR of (shifted) imm with register
+ * BIC - bitwise clear of (shifted) imm with register
+ */
+static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int cmode = extract32(insn, 12, 4);
+ int cmode_3_1 = extract32(cmode, 1, 3);
+ int cmode_0 = extract32(cmode, 0, 1);
+ int o2 = extract32(insn, 11, 1);
+ uint64_t abcdefgh = extract32(insn, 5, 5) | (extract32(insn, 16, 3) << 5);
+ bool is_neg = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ uint64_t imm = 0;
+ TCGv_i64 tcg_rd, tcg_imm;
+ int i;
+
+ if (o2 != 0 || ((cmode == 0xf) && is_neg && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* See AdvSIMDExpandImm() in ARM ARM */
+ switch (cmode_3_1) {
+ case 0: /* Replicate(Zeros(24):imm8, 2) */
+ case 1: /* Replicate(Zeros(16):imm8:Zeros(8), 2) */
+ case 2: /* Replicate(Zeros(8):imm8:Zeros(16), 2) */
+ case 3: /* Replicate(imm8:Zeros(24), 2) */
+ {
+ int shift = cmode_3_1 * 8;
+ imm = bitfield_replicate(abcdefgh << shift, 32);
+ break;
+ }
+ case 4: /* Replicate(Zeros(8):imm8, 4) */
+ case 5: /* Replicate(imm8:Zeros(8), 4) */
+ {
+ int shift = (cmode_3_1 & 0x1) * 8;
+ imm = bitfield_replicate(abcdefgh << shift, 16);
+ break;
+ }
+ case 6:
+ if (cmode_0) {
+ /* Replicate(Zeros(8):imm8:Ones(16), 2) */
+ imm = (abcdefgh << 16) | 0xffff;
+ } else {
+ /* Replicate(Zeros(16):imm8:Ones(8), 2) */
+ imm = (abcdefgh << 8) | 0xff;
+ }
+ imm = bitfield_replicate(imm, 32);
+ break;
+ case 7:
+ if (!cmode_0 && !is_neg) {
+ imm = bitfield_replicate(abcdefgh, 8);
+ } else if (!cmode_0 && is_neg) {
+ int i;
+ imm = 0;
+ for (i = 0; i < 8; i++) {
+ if ((abcdefgh) & (1 << i)) {
+ imm |= 0xffULL << (i * 8);
+ }
+ }
+ } else if (cmode_0) {
+ if (is_neg) {
+ imm = (abcdefgh & 0x3f) << 48;
+ if (abcdefgh & 0x80) {
+ imm |= 0x8000000000000000ULL;
+ }
+ if (abcdefgh & 0x40) {
+ imm |= 0x3fc0000000000000ULL;
+ } else {
+ imm |= 0x4000000000000000ULL;
+ }
+ } else {
+ imm = (abcdefgh & 0x3f) << 19;
+ if (abcdefgh & 0x80) {
+ imm |= 0x80000000;
+ }
+ if (abcdefgh & 0x40) {
+ imm |= 0x3e000000;
+ } else {
+ imm |= 0x40000000;
+ }
+ imm |= (imm << 32);
+ }
+ }
+ break;
+ }
+
+ if (cmode_3_1 != 7 && is_neg) {
+ imm = ~imm;
+ }
+
+ tcg_imm = tcg_const_i64(imm);
+ tcg_rd = new_tmp_a64(s);
+
+ for (i = 0; i < 2; i++) {
+ int foffs = i ? fp_reg_hi_offset(s, rd) : fp_reg_offset(s, rd, MO_64);
+
+ if (i == 1 && !is_q) {
+ /* non-quad ops clear high half of vector */
+ tcg_gen_movi_i64(tcg_rd, 0);
+ } else if ((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9) {
+ tcg_gen_ld_i64(tcg_rd, cpu_env, foffs);
+ if (is_neg) {
+ /* AND (BIC) */
+ tcg_gen_and_i64(tcg_rd, tcg_rd, tcg_imm);
+ } else {
+ /* ORR */
+ tcg_gen_or_i64(tcg_rd, tcg_rd, tcg_imm);
+ }
+ } else {
+ /* MOVI */
+ tcg_gen_mov_i64(tcg_rd, tcg_imm);
+ }
+ tcg_gen_st_i64(tcg_rd, cpu_env, foffs);
+ }
+
+ tcg_temp_free_i64(tcg_imm);
+}
+
+/* C3.6.7 AdvSIMD scalar copy
+ * 31 30 29 28 21 20 16 15 14 11 10 9 5 4 0
+ * +-----+----+-----------------+------+---+------+---+------+------+
+ * | 0 1 | op | 1 1 1 1 0 0 0 0 | imm5 | 0 | imm4 | 1 | Rn | Rd |
+ * +-----+----+-----------------+------+---+------+---+------+------+
+ */
+static void disas_simd_scalar_copy(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int imm4 = extract32(insn, 11, 4);
+ int imm5 = extract32(insn, 16, 5);
+ int op = extract32(insn, 29, 1);
+
+ if (op != 0 || imm4 != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* DUP (element, scalar) */
+ handle_simd_dupes(s, rd, rn, imm5);
+}
+
+/* C3.6.8 AdvSIMD scalar pairwise
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 1 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_scalar_pairwise(DisasContext *s, uint32_t insn)
+{
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ TCGv_ptr fpst;
+
+ /* For some ops (the FP ones), size[1] is part of the encoding.
+ * For ADDP strictly it is not but size[1] is always 1 for valid
+ * encodings.
+ */
+ opcode |= (extract32(size, 1, 1) << 5);
+
+ switch (opcode) {
+ case 0x3b: /* ADDP */
+ if (u || size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ TCGV_UNUSED_PTR(fpst);
+ break;
+ case 0xc: /* FMAXNMP */
+ case 0xd: /* FADDP */
+ case 0xf: /* FMAXP */
+ case 0x2c: /* FMINNMP */
+ case 0x2f: /* FMINP */
+ /* FP op, size[0] is 32 or 64 bit */
+ if (!u) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ size = extract32(size, 0, 1) ? 3 : 2;
+ fpst = get_fpstatus_ptr();
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, 0, MO_64);
+ read_vec_element(s, tcg_op2, rn, 1, MO_64);
+
+ switch (opcode) {
+ case 0x3b: /* ADDP */
+ tcg_gen_add_i64(tcg_res, tcg_op1, tcg_op2);
+ break;
+ case 0xc: /* FMAXNMP */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op1, rn, 0, MO_32);
+ read_vec_element_i32(s, tcg_op2, rn, 1, MO_32);
+
+ switch (opcode) {
+ case 0xc: /* FMAXNMP */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xd: /* FADDP */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0xf: /* FMAXP */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2c: /* FMINNMP */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x2f: /* FMINP */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ tcg_temp_free_ptr(fpst);
+ }
+}
+
+/*
+ * Common SSHR[RA]/USHR[RA] - Shift right (optional rounding/accumulate)
+ *
+ * This code is handles the common shifting code and is used by both
+ * the vector and scalar code.
+ */
+static void handle_shri_with_rndacc(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
+ TCGv_i64 tcg_rnd, bool accumulate,
+ bool is_u, int size, int shift)
+{
+ bool extended_result = false;
+ bool round = !TCGV_IS_UNUSED_I64(tcg_rnd);
+ int ext_lshift = 0;
+ TCGv_i64 tcg_src_hi;
+
+ if (round && size == 3) {
+ extended_result = true;
+ ext_lshift = 64 - shift;
+ tcg_src_hi = tcg_temp_new_i64();
+ } else if (shift == 64) {
+ if (!accumulate && is_u) {
+ /* result is zero */
+ tcg_gen_movi_i64(tcg_res, 0);
+ return;
+ }
+ }
+
+ /* Deal with the rounding step */
+ if (round) {
+ if (extended_result) {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ if (!is_u) {
+ /* take care of sign extending tcg_res */
+ tcg_gen_sari_i64(tcg_src_hi, tcg_src, 63);
+ tcg_gen_add2_i64(tcg_src, tcg_src_hi,
+ tcg_src, tcg_src_hi,
+ tcg_rnd, tcg_zero);
+ } else {
+ tcg_gen_add2_i64(tcg_src, tcg_src_hi,
+ tcg_src, tcg_zero,
+ tcg_rnd, tcg_zero);
+ }
+ tcg_temp_free_i64(tcg_zero);
+ } else {
+ tcg_gen_add_i64(tcg_src, tcg_src, tcg_rnd);
+ }
+ }
+
+ /* Now do the shift right */
+ if (round && extended_result) {
+ /* extended case, >64 bit precision required */
+ if (ext_lshift == 0) {
+ /* special case, only high bits matter */
+ tcg_gen_mov_i64(tcg_src, tcg_src_hi);
+ } else {
+ tcg_gen_shri_i64(tcg_src, tcg_src, shift);
+ tcg_gen_shli_i64(tcg_src_hi, tcg_src_hi, ext_lshift);
+ tcg_gen_or_i64(tcg_src, tcg_src, tcg_src_hi);
+ }
+ } else {
+ if (is_u) {
+ if (shift == 64) {
+ /* essentially shifting in 64 zeros */
+ tcg_gen_movi_i64(tcg_src, 0);
+ } else {
+ tcg_gen_shri_i64(tcg_src, tcg_src, shift);
+ }
+ } else {
+ if (shift == 64) {
+ /* effectively extending the sign-bit */
+ tcg_gen_sari_i64(tcg_src, tcg_src, 63);
+ } else {
+ tcg_gen_sari_i64(tcg_src, tcg_src, shift);
+ }
+ }
+ }
+
+ if (accumulate) {
+ tcg_gen_add_i64(tcg_res, tcg_res, tcg_src);
+ } else {
+ tcg_gen_mov_i64(tcg_res, tcg_src);
+ }
+
+ if (extended_result) {
+ tcg_temp_free_i64(tcg_src_hi);
+ }
+}
+
+/* Common SHL/SLI - Shift left with an optional insert */
+static void handle_shli_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
+ bool insert, int shift)
+{
+ if (insert) { /* SLI */
+ tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, shift, 64 - shift);
+ } else { /* SHL */
+ tcg_gen_shli_i64(tcg_res, tcg_src, shift);
+ }
+}
+
+/* SRI: shift right with insert */
+static void handle_shri_with_ins(TCGv_i64 tcg_res, TCGv_i64 tcg_src,
+ int size, int shift)
+{
+ int esize = 8 << size;
+
+ /* shift count same as element size is valid but does nothing;
+ * special case to avoid potential shift by 64.
+ */
+ if (shift != esize) {
+ tcg_gen_shri_i64(tcg_src, tcg_src, shift);
+ tcg_gen_deposit_i64(tcg_res, tcg_res, tcg_src, 0, esize - shift);
+ }
+}
+
+/* SSHR[RA]/USHR[RA] - Scalar shift right (optional rounding/accumulate) */
+static void handle_scalar_simd_shri(DisasContext *s,
+ bool is_u, int immh, int immb,
+ int opcode, int rn, int rd)
+{
+ const int size = 3;
+ int immhb = immh << 3 | immb;
+ int shift = 2 * (8 << size) - immhb;
+ bool accumulate = false;
+ bool round = false;
+ bool insert = false;
+ TCGv_i64 tcg_rn;
+ TCGv_i64 tcg_rd;
+ TCGv_i64 tcg_round;
+
+ if (!extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0x02: /* SSRA / USRA (accumulate) */
+ accumulate = true;
+ break;
+ case 0x04: /* SRSHR / URSHR (rounding) */
+ round = true;
+ break;
+ case 0x06: /* SRSRA / URSRA (accum + rounding) */
+ accumulate = round = true;
+ break;
+ case 0x08: /* SRI */
+ insert = true;
+ break;
+ }
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ TCGV_UNUSED_I64(tcg_round);
+ }
+
+ tcg_rn = read_fp_dreg(s, rn);
+ tcg_rd = (accumulate || insert) ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
+
+ if (insert) {
+ handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
+ } else {
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ accumulate, is_u, size, shift);
+ }
+
+ write_fp_dreg(s, rd, tcg_rd);
+
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+}
+
+/* SHL/SLI - Scalar shift left */
+static void handle_scalar_simd_shli(DisasContext *s, bool insert,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = immhb - (8 << size);
+ TCGv_i64 tcg_rn = new_tmp_a64(s);
+ TCGv_i64 tcg_rd = new_tmp_a64(s);
+
+ if (!extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rn = read_fp_dreg(s, rn);
+ tcg_rd = insert ? read_fp_dreg(s, rd) : tcg_temp_new_i64();
+
+ handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
+
+ write_fp_dreg(s, rd, tcg_rd);
+
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+}
+
+/* SQSHRN/SQSHRUN - Saturating (signed/unsigned) shift right with
+ * (signed/unsigned) narrowing */
+static void handle_vec_simd_sqshrn(DisasContext *s, bool is_scalar, bool is_q,
+ bool is_u_shift, bool is_u_narrow,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int esize = 8 << size;
+ int shift = (2 * esize) - immhb;
+ int elements = is_scalar ? 1 : (64 / esize);
+ bool round = extract32(opcode, 0, 1);
+ TCGMemOp ldop = (size + 1) | (is_u_shift ? 0 : MO_SIGN);
+ TCGv_i64 tcg_rn, tcg_rd, tcg_round;
+ TCGv_i32 tcg_rd_narrowed;
+ TCGv_i64 tcg_final;
+
+ static NeonGenNarrowEnvFn * const signed_narrow_fns[4][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_unarrow_sat8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_unarrow_sat16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_unarrow_sat32 },
+ { NULL, NULL },
+ };
+ static NeonGenNarrowEnvFn * const unsigned_narrow_fns[4] = {
+ gen_helper_neon_narrow_sat_u8,
+ gen_helper_neon_narrow_sat_u16,
+ gen_helper_neon_narrow_sat_u32,
+ NULL
+ };
+ NeonGenNarrowEnvFn *narrowfn;
+
+ int i;
+
+ assert(size < 4);
+
+ if (extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_u_shift) {
+ narrowfn = unsigned_narrow_fns[size];
+ } else {
+ narrowfn = signed_narrow_fns[size][is_u_narrow ? 1 : 0];
+ }
+
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_rd_narrowed = tcg_temp_new_i32();
+ tcg_final = tcg_const_i64(0);
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ TCGV_UNUSED_I64(tcg_round);
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, ldop);
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ false, is_u_shift, size+1, shift);
+ narrowfn(tcg_rd_narrowed, cpu_env, tcg_rd);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_rd_narrowed);
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ write_vec_element(s, tcg_final, rd, 0, MO_64);
+ } else {
+ write_vec_element(s, tcg_final, rd, 1, MO_64);
+ }
+
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i32(tcg_rd_narrowed);
+ tcg_temp_free_i64(tcg_final);
+ return;
+}
+
+/* SQSHLU, UQSHL, SQSHL: saturating left shifts */
+static void handle_simd_qshl(DisasContext *s, bool scalar, bool is_q,
+ bool src_unsigned, bool dst_unsigned,
+ int immh, int immb, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int shift = immhb - (8 << size);
+ int pass;
+
+ assert(immh != 0);
+ assert(!(scalar && is_q));
+
+ if (!scalar) {
+ if (!is_q && extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Since we use the variable-shift helpers we must
+ * replicate the shift count into each element of
+ * the tcg_shift value.
+ */
+ switch (size) {
+ case 0:
+ shift |= shift << 8;
+ /* fall through */
+ case 1:
+ shift |= shift << 16;
+ break;
+ case 2:
+ case 3:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_shift = tcg_const_i64(shift);
+ static NeonGenTwo64OpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qshl_s64, gen_helper_neon_qshlu_s64 },
+ { NULL, gen_helper_neon_qshl_u64 },
+ };
+ NeonGenTwo64OpEnvFn *genfn = fns[src_unsigned][dst_unsigned];
+ int maxpass = is_q ? 2 : 1;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ write_vec_element(s, tcg_op, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_op);
+ }
+ tcg_temp_free_i64(tcg_shift);
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ TCGv_i32 tcg_shift = tcg_const_i32(shift);
+ static NeonGenTwoOpEnvFn * const fns[2][2][3] = {
+ {
+ { gen_helper_neon_qshl_s8,
+ gen_helper_neon_qshl_s16,
+ gen_helper_neon_qshl_s32 },
+ { gen_helper_neon_qshlu_s8,
+ gen_helper_neon_qshlu_s16,
+ gen_helper_neon_qshlu_s32 }
+ }, {
+ { NULL, NULL, NULL },
+ { gen_helper_neon_qshl_u8,
+ gen_helper_neon_qshl_u16,
+ gen_helper_neon_qshl_u32 }
+ }
+ };
+ NeonGenTwoOpEnvFn *genfn = fns[src_unsigned][dst_unsigned][size];
+ TCGMemOp memop = scalar ? size : MO_32;
+ int maxpass = scalar ? 1 : is_q ? 4 : 2;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, memop);
+ genfn(tcg_op, cpu_env, tcg_op, tcg_shift);
+ if (scalar) {
+ switch (size) {
+ case 0:
+ tcg_gen_ext8u_i32(tcg_op, tcg_op);
+ break;
+ case 1:
+ tcg_gen_ext16u_i32(tcg_op, tcg_op);
+ break;
+ case 2:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_fp_sreg(s, rd, tcg_op);
+ } else {
+ write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_op);
+ }
+ tcg_temp_free_i32(tcg_shift);
+
+ if (!is_q && !scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+}
+
+/* Common vector code for handling integer to FP conversion */
+static void handle_simd_intfp_conv(DisasContext *s, int rd, int rn,
+ int elements, int is_signed,
+ int fracbits, int size)
+{
+ bool is_double = size == 3 ? true : false;
+ TCGv_ptr tcg_fpst = get_fpstatus_ptr();
+ TCGv_i32 tcg_shift = tcg_const_i32(fracbits);
+ TCGv_i64 tcg_int = tcg_temp_new_i64();
+ TCGMemOp mop = size | (is_signed ? MO_SIGN : 0);
+ int pass;
+
+ for (pass = 0; pass < elements; pass++) {
+ read_vec_element(s, tcg_int, rn, pass, mop);
+
+ if (is_double) {
+ TCGv_i64 tcg_double = tcg_temp_new_i64();
+ if (is_signed) {
+ gen_helper_vfp_sqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_uqtod(tcg_double, tcg_int,
+ tcg_shift, tcg_fpst);
+ }
+ if (elements == 1) {
+ write_fp_dreg(s, rd, tcg_double);
+ } else {
+ write_vec_element(s, tcg_double, rd, pass, MO_64);
+ }
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single = tcg_temp_new_i32();
+ if (is_signed) {
+ gen_helper_vfp_sqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpst);
+ } else {
+ gen_helper_vfp_uqtos(tcg_single, tcg_int,
+ tcg_shift, tcg_fpst);
+ }
+ if (elements == 1) {
+ write_fp_sreg(s, rd, tcg_single);
+ } else {
+ write_vec_element_i32(s, tcg_single, rd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tcg_single);
+ }
+ }
+
+ if (!is_double && elements == 2) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_int);
+ tcg_temp_free_ptr(tcg_fpst);
+ tcg_temp_free_i32(tcg_shift);
+}
+
+/* UCVTF/SCVTF - Integer to FP conversion */
+static void handle_simd_shift_intfp_conv(DisasContext *s, bool is_scalar,
+ bool is_q, bool is_u,
+ int immh, int immb, int opcode,
+ int rn, int rd)
+{
+ bool is_double = extract32(immh, 3, 1);
+ int size = is_double ? MO_64 : MO_32;
+ int elements;
+ int immhb = immh << 3 | immb;
+ int fracbits = (is_double ? 128 : 64) - immhb;
+
+ if (!extract32(immh, 2, 2)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_scalar) {
+ elements = 1;
+ } else {
+ elements = is_double ? 2 : is_q ? 4 : 2;
+ if (is_double && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* immh == 0 would be a failure of the decode logic */
+ g_assert(immh);
+
+ handle_simd_intfp_conv(s, rd, rn, elements, !is_u, fracbits, size);
+}
+
+/* FCVTZS, FVCVTZU - FP to fixedpoint conversion */
+static void handle_simd_shift_fpint_conv(DisasContext *s, bool is_scalar,
+ bool is_q, bool is_u,
+ int immh, int immb, int rn, int rd)
+{
+ bool is_double = extract32(immh, 3, 1);
+ int immhb = immh << 3 | immb;
+ int fracbits = (is_double ? 128 : 64) - immhb;
+ int pass;
+ TCGv_ptr tcg_fpstatus;
+ TCGv_i32 tcg_rmode, tcg_shift;
+
+ if (!extract32(immh, 2, 2)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!is_scalar && !is_q && is_double) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ assert(!(is_scalar && is_q));
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(FPROUNDING_ZERO));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_fpstatus = get_fpstatus_ptr();
+ tcg_shift = tcg_const_i32(fracbits);
+
+ if (is_double) {
+ int maxpass = is_scalar ? 1 : 2;
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ if (is_u) {
+ gen_helper_vfp_touqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosqd(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ }
+ write_vec_element(s, tcg_op, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_op);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ int maxpass = is_scalar ? 1 : is_q ? 4 : 2;
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+ if (is_u) {
+ gen_helper_vfp_touls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ } else {
+ gen_helper_vfp_tosls(tcg_op, tcg_op, tcg_shift, tcg_fpstatus);
+ }
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_op);
+ } else {
+ write_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ }
+ tcg_temp_free_i32(tcg_op);
+ }
+ if (!is_q && !is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+
+ tcg_temp_free_ptr(tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+}
+
+/* C3.6.9 AdvSIMD scalar shift by immediate
+ * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
+ * +-----+---+-------------+------+------+--------+---+------+------+
+ * | 0 1 | U | 1 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
+ * +-----+---+-------------+------+------+--------+---+------+------+
+ *
+ * This is the scalar version so it works on a fixed sized registers
+ */
+static void disas_simd_scalar_shift_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 5);
+ int immb = extract32(insn, 16, 3);
+ int immh = extract32(insn, 19, 4);
+ bool is_u = extract32(insn, 29, 1);
+
+ if (immh == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x08: /* SRI */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x00: /* SSHR / USHR */
+ case 0x02: /* SSRA / USRA */
+ case 0x04: /* SRSHR / URSHR */
+ case 0x06: /* SRSRA / URSRA */
+ handle_scalar_simd_shri(s, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x0a: /* SHL / SLI */
+ handle_scalar_simd_shli(s, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x1c: /* SCVTF, UCVTF */
+ handle_simd_shift_intfp_conv(s, true, false, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0x10: /* SQSHRUN, SQSHRUN2 */
+ case 0x11: /* SQRSHRUN, SQRSHRUN2 */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_vec_simd_sqshrn(s, true, false, false, true,
+ immh, immb, opcode, rn, rd);
+ break;
+ case 0x12: /* SQSHRN, SQSHRN2, UQSHRN */
+ case 0x13: /* SQRSHRN, SQRSHRN2, UQRSHRN, UQRSHRN2 */
+ handle_vec_simd_sqshrn(s, true, false, is_u, is_u,
+ immh, immb, opcode, rn, rd);
+ break;
+ case 0xc: /* SQSHLU */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_qshl(s, true, false, false, true, immh, immb, rn, rd);
+ break;
+ case 0xe: /* SQSHL, UQSHL */
+ handle_simd_qshl(s, true, false, is_u, is_u, immh, immb, rn, rd);
+ break;
+ case 0x1f: /* FCVTZS, FCVTZU */
+ handle_simd_shift_fpint_conv(s, true, false, is_u, immh, immb, rn, rd);
+ break;
+ default:
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* C3.6.10 AdvSIMD scalar three different
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+------+--------+-----+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
+ * +-----+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_simd_scalar_three_reg_diff(DisasContext *s, uint32_t insn)
+{
+ bool is_u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ if (is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ if (size == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 2) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, 0, MO_32 | MO_SIGN);
+ read_vec_element(s, tcg_op2, rm, 0, MO_32 | MO_SIGN);
+
+ tcg_gen_mul_i64(tcg_res, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env, tcg_res, tcg_res);
+
+ switch (opcode) {
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ break;
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ tcg_gen_neg_i64(tcg_res, tcg_res);
+ /* fall through */
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ read_vec_element(s, tcg_op1, rd, 0, MO_64);
+ gen_helper_neon_addl_saturate_s64(tcg_res, cpu_env,
+ tcg_res, tcg_op1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element_i32(s, tcg_op1, rn, 0, MO_16);
+ read_vec_element_i32(s, tcg_op2, rm, 0, MO_16);
+
+ gen_helper_neon_mull_s16(tcg_res, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env, tcg_res, tcg_res);
+
+ switch (opcode) {
+ case 0xd: /* SQDMULL, SQDMULL2 */
+ break;
+ case 0xb: /* SQDMLSL, SQDMLSL2 */
+ gen_helper_neon_negl_u32(tcg_res, tcg_res);
+ /* fall through */
+ case 0x9: /* SQDMLAL, SQDMLAL2 */
+ {
+ TCGv_i64 tcg_op3 = tcg_temp_new_i64();
+ read_vec_element(s, tcg_op3, rd, 0, MO_32);
+ gen_helper_neon_addl_saturate_s32(tcg_res, cpu_env,
+ tcg_res, tcg_op3);
+ tcg_temp_free_i64(tcg_op3);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_gen_ext32u_i64(tcg_res, tcg_res);
+ write_fp_dreg(s, rd, tcg_res);
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+ }
+}
+
+static void handle_3same_64(DisasContext *s, int opcode, bool u,
+ TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 tcg_rm)
+{
+ /* Handle 64x64->64 opcodes which are shared between the scalar
+ * and vector 3-same groups. We cover every opcode where size == 3
+ * is valid in either the three-reg-same (integer, not pairwise)
+ * or scalar-three-reg-same groups. (Some opcodes are not yet
+ * implemented.)
+ */
+ TCGCond cond;
+
+ switch (opcode) {
+ case 0x1: /* SQADD */
+ if (u) {
+ gen_helper_neon_qadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x5: /* SQSUB */
+ if (u) {
+ gen_helper_neon_qsub_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qsub_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x6: /* CMGT, CMHI */
+ /* 64 bit integer comparison, result = test ? (2^64 - 1) : 0.
+ * We implement this using setcond (test) and then negating.
+ */
+ cond = u ? TCG_COND_GTU : TCG_COND_GT;
+ do_cmop:
+ tcg_gen_setcond_i64(cond, tcg_rd, tcg_rn, tcg_rm);
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ break;
+ case 0x7: /* CMGE, CMHS */
+ cond = u ? TCG_COND_GEU : TCG_COND_GE;
+ goto do_cmop;
+ case 0x11: /* CMTST, CMEQ */
+ if (u) {
+ cond = TCG_COND_EQ;
+ goto do_cmop;
+ }
+ /* CMTST : test is "if (X & Y != 0)". */
+ tcg_gen_and_i64(tcg_rd, tcg_rn, tcg_rm);
+ tcg_gen_setcondi_i64(TCG_COND_NE, tcg_rd, tcg_rd, 0);
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ break;
+ case 0x8: /* SSHL, USHL */
+ if (u) {
+ gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x9: /* SQSHL, UQSHL */
+ if (u) {
+ gen_helper_neon_qshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0xa: /* SRSHL, URSHL */
+ if (u) {
+ gen_helper_neon_rshl_u64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_rshl_s64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0xb: /* SQRSHL, UQRSHL */
+ if (u) {
+ gen_helper_neon_qrshl_u64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ } else {
+ gen_helper_neon_qrshl_s64(tcg_rd, cpu_env, tcg_rn, tcg_rm);
+ }
+ break;
+ case 0x10: /* ADD, SUB */
+ if (u) {
+ tcg_gen_sub_i64(tcg_rd, tcg_rn, tcg_rm);
+ } else {
+ tcg_gen_add_i64(tcg_rd, tcg_rn, tcg_rm);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Handle the 3-same-operands float operations; shared by the scalar
+ * and vector encodings. The caller must filter out any encodings
+ * not allocated for the encoding it is dealing with.
+ */
+static void handle_3same_float(DisasContext *s, int size, int elements,
+ int fpopcode, int rd, int rn, int rm)
+{
+ int pass;
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ for (pass = 0; pass < elements; pass++) {
+ if (size) {
+ /* Double */
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ switch (fpopcode) {
+ case 0x39: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negd(tcg_op1, tcg_op1);
+ /* fall through */
+ case 0x19: /* FMLA */
+ read_vec_element(s, tcg_res, rd, pass, MO_64);
+ gen_helper_vfp_muladdd(tcg_res, tcg_op1, tcg_op2,
+ tcg_res, fpst);
+ break;
+ case 0x18: /* FMAXNM */
+ gen_helper_vfp_maxnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1a: /* FADD */
+ gen_helper_vfp_addd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1b: /* FMULX */
+ gen_helper_vfp_mulxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1c: /* FCMEQ */
+ gen_helper_neon_ceq_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1e: /* FMAX */
+ gen_helper_vfp_maxd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1f: /* FRECPS */
+ gen_helper_recpsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x38: /* FMINNM */
+ gen_helper_vfp_minnumd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3a: /* FSUB */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3e: /* FMIN */
+ gen_helper_vfp_mind(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5b: /* FMUL */
+ gen_helper_vfp_muld(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5c: /* FCMGE */
+ gen_helper_neon_cge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5d: /* FACGE */
+ gen_helper_neon_acge_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5f: /* FDIV */
+ gen_helper_vfp_divd(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7a: /* FABD */
+ gen_helper_vfp_subd(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_absd(tcg_res, tcg_res);
+ break;
+ case 0x7c: /* FCMGT */
+ gen_helper_neon_cgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7d: /* FACGT */
+ gen_helper_neon_acgt_f64(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ } else {
+ /* Single */
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
+
+ switch (fpopcode) {
+ case 0x39: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negs(tcg_op1, tcg_op1);
+ /* fall through */
+ case 0x19: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ gen_helper_vfp_muladds(tcg_res, tcg_op1, tcg_op2,
+ tcg_res, fpst);
+ break;
+ case 0x1a: /* FADD */
+ gen_helper_vfp_adds(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1b: /* FMULX */
+ gen_helper_vfp_mulxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1c: /* FCMEQ */
+ gen_helper_neon_ceq_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1e: /* FMAX */
+ gen_helper_vfp_maxs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x1f: /* FRECPS */
+ gen_helper_recpsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x18: /* FMAXNM */
+ gen_helper_vfp_maxnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x38: /* FMINNM */
+ gen_helper_vfp_minnums(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3a: /* FSUB */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3e: /* FMIN */
+ gen_helper_vfp_mins(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x3f: /* FRSQRTS */
+ gen_helper_rsqrtsf_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5b: /* FMUL */
+ gen_helper_vfp_muls(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5c: /* FCMGE */
+ gen_helper_neon_cge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5d: /* FACGE */
+ gen_helper_neon_acge_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5f: /* FDIV */
+ gen_helper_vfp_divs(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7a: /* FABD */
+ gen_helper_vfp_subs(tcg_res, tcg_op1, tcg_op2, fpst);
+ gen_helper_vfp_abss(tcg_res, tcg_res);
+ break;
+ case 0x7c: /* FCMGT */
+ gen_helper_neon_cgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7d: /* FACGT */
+ gen_helper_neon_acgt_f32(tcg_res, tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (elements == 1) {
+ /* scalar single so clear high part */
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tcg_tmp, tcg_res);
+ write_vec_element(s, tcg_tmp, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_tmp);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+ }
+
+ tcg_temp_free_ptr(fpst);
+
+ if ((elements << size) < 4) {
+ /* scalar, or non-quad vector op */
+ clear_vec_high(s, rd);
+ }
+}
+
+/* C3.6.11 AdvSIMD scalar three same
+ * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+------+--------+---+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
+ * +-----+---+-----------+------+---+------+--------+---+------+------+
+ */
+static void disas_simd_scalar_three_reg_same(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+ TCGv_i64 tcg_rd;
+
+ if (opcode >= 0x18) {
+ /* Floating point: U, size[1] and opcode indicate operation */
+ int fpopcode = opcode | (extract32(size, 1, 1) << 5) | (u << 6);
+ switch (fpopcode) {
+ case 0x1b: /* FMULX */
+ case 0x1f: /* FRECPS */
+ case 0x3f: /* FRSQRTS */
+ case 0x5d: /* FACGE */
+ case 0x7d: /* FACGT */
+ case 0x1c: /* FCMEQ */
+ case 0x5c: /* FCMGE */
+ case 0x7c: /* FCMGT */
+ case 0x7a: /* FABD */
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_3same_float(s, extract32(size, 0, 1), 1, fpopcode, rd, rn, rm);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x1: /* SQADD, UQADD */
+ case 0x5: /* SQSUB, UQSUB */
+ case 0x9: /* SQSHL, UQSHL */
+ case 0xb: /* SQRSHL, UQRSHL */
+ break;
+ case 0x8: /* SSHL, USHL */
+ case 0xa: /* SRSHL, URSHL */
+ case 0x6: /* CMGT, CMHI */
+ case 0x7: /* CMGE, CMHS */
+ case 0x11: /* CMTST, CMEQ */
+ case 0x10: /* ADD, SUB (vector) */
+ if (size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x16: /* SQDMULH, SQRDMULH (vector) */
+ if (size != 1 && size != 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rd = tcg_temp_new_i64();
+
+ if (size == 3) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i64 tcg_rm = read_fp_dreg(s, rm);
+
+ handle_3same_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rm);
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rm);
+ } else {
+ /* Do a single operation on the lowest element in the vector.
+ * We use the standard Neon helpers and rely on 0 OP 0 == 0 with
+ * no side effects for all these operations.
+ * OPTME: special-purpose helpers would avoid doing some
+ * unnecessary work in the helper for the 8 and 16 bit cases.
+ */
+ NeonGenTwoOpEnvFn *genenvfn;
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rm = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd32 = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_rn, rn, 0, size);
+ read_vec_element_i32(s, tcg_rm, rm, 0, size);
+
+ switch (opcode) {
+ case 0x1: /* SQADD, UQADD */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
+ { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
+ { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x5: /* SQSUB, UQSUB */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
+ { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
+ { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x9: /* SQSHL, UQSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
+ { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
+ { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xb: /* SQRSHL, UQRSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
+ { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
+ { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x16: /* SQDMULH, SQRDMULH */
+ {
+ static NeonGenTwoOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
+ { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
+ };
+ assert(size == 1 || size == 2);
+ genenvfn = fns[size - 1][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ genenvfn(tcg_rd32, cpu_env, tcg_rn, tcg_rm);
+ tcg_gen_extu_i32_i64(tcg_rd, tcg_rd32);
+ tcg_temp_free_i32(tcg_rd32);
+ tcg_temp_free_i32(tcg_rn);
+ tcg_temp_free_i32(tcg_rm);
+ }
+
+ write_fp_dreg(s, rd, tcg_rd);
+
+ tcg_temp_free_i64(tcg_rd);
+}
+
+static void handle_2misc_64(DisasContext *s, int opcode, bool u,
+ TCGv_i64 tcg_rd, TCGv_i64 tcg_rn,
+ TCGv_i32 tcg_rmode, TCGv_ptr tcg_fpstatus)
+{
+ /* Handle 64->64 opcodes which are shared between the scalar and
+ * vector 2-reg-misc groups. We cover every integer opcode where size == 3
+ * is valid in either group and also the double-precision fp ops.
+ * The caller only need provide tcg_rmode and tcg_fpstatus if the op
+ * requires them.
+ */
+ TCGCond cond;
+
+ switch (opcode) {
+ case 0x4: /* CLS, CLZ */
+ if (u) {
+ gen_helper_clz64(tcg_rd, tcg_rn);
+ } else {
+ gen_helper_cls64(tcg_rd, tcg_rn);
+ }
+ break;
+ case 0x5: /* NOT */
+ /* This opcode is shared with CNT and RBIT but we have earlier
+ * enforced that size == 3 if and only if this is the NOT insn.
+ */
+ tcg_gen_not_i64(tcg_rd, tcg_rn);
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ if (u) {
+ gen_helper_neon_qneg_s64(tcg_rd, cpu_env, tcg_rn);
+ } else {
+ gen_helper_neon_qabs_s64(tcg_rd, cpu_env, tcg_rn);
+ }
+ break;
+ case 0xa: /* CMLT */
+ /* 64 bit integer comparison against zero, result is
+ * test ? (2^64 - 1) : 0. We implement via setcond(!test) and
+ * subtracting 1.
+ */
+ cond = TCG_COND_LT;
+ do_cmop:
+ tcg_gen_setcondi_i64(cond, tcg_rd, tcg_rn, 0);
+ tcg_gen_neg_i64(tcg_rd, tcg_rd);
+ break;
+ case 0x8: /* CMGT, CMGE */
+ cond = u ? TCG_COND_GE : TCG_COND_GT;
+ goto do_cmop;
+ case 0x9: /* CMEQ, CMLE */
+ cond = u ? TCG_COND_LE : TCG_COND_EQ;
+ goto do_cmop;
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ tcg_gen_neg_i64(tcg_rd, tcg_rn);
+ } else {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ tcg_gen_neg_i64(tcg_rd, tcg_rn);
+ tcg_gen_movcond_i64(TCG_COND_GT, tcg_rd, tcg_rn, tcg_zero,
+ tcg_rn, tcg_rd);
+ tcg_temp_free_i64(tcg_zero);
+ }
+ break;
+ case 0x2f: /* FABS */
+ gen_helper_vfp_absd(tcg_rd, tcg_rn);
+ break;
+ case 0x6f: /* FNEG */
+ gen_helper_vfp_negd(tcg_rd, tcg_rn);
+ break;
+ case 0x7f: /* FSQRT */
+ gen_helper_vfp_sqrtd(tcg_rd, tcg_rn, cpu_env);
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touqd(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_rintd(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_rintd_exact(tcg_rd, tcg_rn, tcg_fpstatus);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static void handle_2misc_fcmp_zero(DisasContext *s, int opcode,
+ bool is_scalar, bool is_u, bool is_q,
+ int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+ TCGv_ptr fpst;
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ fpst = get_fpstatus_ptr();
+
+ if (is_double) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ NeonGenTwoDoubleOPFn *genfn;
+ bool swap = false;
+ int pass;
+
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fallthrough */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_neon_cgt_f64;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_neon_ceq_f64;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_neon_cge_f64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ if (swap) {
+ genfn(tcg_res, tcg_zero, tcg_op, fpst);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero, fpst);
+ }
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ }
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_zero);
+ tcg_temp_free_i64(tcg_op);
+ } else {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ NeonGenTwoSingleOPFn *genfn;
+ bool swap = false;
+ int pass, maxpasses;
+
+ switch (opcode) {
+ case 0x2e: /* FCMLT (zero) */
+ swap = true;
+ /* fall through */
+ case 0x2c: /* FCMGT (zero) */
+ genfn = gen_helper_neon_cgt_f32;
+ break;
+ case 0x2d: /* FCMEQ (zero) */
+ genfn = gen_helper_neon_ceq_f32;
+ break;
+ case 0x6d: /* FCMLE (zero) */
+ swap = true;
+ /* fall through */
+ case 0x6c: /* FCMGE (zero) */
+ genfn = gen_helper_neon_cge_f32;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+ if (swap) {
+ genfn(tcg_res, tcg_zero, tcg_op, fpst);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero, fpst);
+ }
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+ }
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_zero);
+ tcg_temp_free_i32(tcg_op);
+ if (!is_q && !is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+
+ tcg_temp_free_ptr(fpst);
+}
+
+static void handle_2misc_reciprocal(DisasContext *s, int opcode,
+ bool is_scalar, bool is_u, bool is_q,
+ int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+ TCGv_ptr fpst = get_fpstatus_ptr();
+
+ if (is_double) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+ int pass;
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ switch (opcode) {
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f64(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f64(tcg_res, tcg_op, fpst);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f64(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ }
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ } else {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+
+ switch (opcode) {
+ case 0x3c: /* URECPE */
+ gen_helper_recpe_u32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3d: /* FRECPE */
+ gen_helper_recpe_f32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x3f: /* FRECPX */
+ gen_helper_frecpx_f32(tcg_res, tcg_op, fpst);
+ break;
+ case 0x7d: /* FRSQRTE */
+ gen_helper_rsqrte_f32(tcg_res, tcg_op, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+ }
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ if (!is_q && !is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ }
+ tcg_temp_free_ptr(fpst);
+}
+
+static void handle_2misc_narrow(DisasContext *s, bool scalar,
+ int opcode, bool u, bool is_q,
+ int size, int rn, int rd)
+{
+ /* Handle 2-reg-misc ops which are narrowing (so each 2*size element
+ * in the source becomes a size element in the destination).
+ */
+ int pass;
+ TCGv_i32 tcg_res[2];
+ int destelt = is_q ? 2 : 0;
+ int passes = scalar ? 1 : 2;
+
+ if (scalar) {
+ tcg_res[1] = tcg_const_i32(0);
+ }
+
+ for (pass = 0; pass < passes; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenNarrowFn *genfn = NULL;
+ NeonGenNarrowEnvFn *genenvfn = NULL;
+
+ if (scalar) {
+ read_vec_element(s, tcg_op, rn, pass, size + 1);
+ } else {
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ }
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x12: /* XTN, SQXTUN */
+ {
+ static NeonGenNarrowFn * const xtnfns[3] = {
+ gen_helper_neon_narrow_u8,
+ gen_helper_neon_narrow_u16,
+ tcg_gen_extrl_i64_i32,
+ };
+ static NeonGenNarrowEnvFn * const sqxtunfns[3] = {
+ gen_helper_neon_unarrow_sat8,
+ gen_helper_neon_unarrow_sat16,
+ gen_helper_neon_unarrow_sat32,
+ };
+ if (u) {
+ genenvfn = sqxtunfns[size];
+ } else {
+ genfn = xtnfns[size];
+ }
+ break;
+ }
+ case 0x14: /* SQXTN, UQXTN */
+ {
+ static NeonGenNarrowEnvFn * const fns[3][2] = {
+ { gen_helper_neon_narrow_sat_s8,
+ gen_helper_neon_narrow_sat_u8 },
+ { gen_helper_neon_narrow_sat_s16,
+ gen_helper_neon_narrow_sat_u16 },
+ { gen_helper_neon_narrow_sat_s32,
+ gen_helper_neon_narrow_sat_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x16: /* FCVTN, FCVTN2 */
+ /* 32 bit to 16 bit or 64 bit to 32 bit float conversion */
+ if (size == 2) {
+ gen_helper_vfp_fcvtsd(tcg_res[pass], tcg_op, cpu_env);
+ } else {
+ TCGv_i32 tcg_lo = tcg_temp_new_i32();
+ TCGv_i32 tcg_hi = tcg_temp_new_i32();
+ tcg_gen_extr_i64_i32(tcg_lo, tcg_hi, tcg_op);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env);
+ gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env);
+ tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16);
+ tcg_temp_free_i32(tcg_lo);
+ tcg_temp_free_i32(tcg_hi);
+ }
+ break;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ /* 64 bit to 32 bit float conversion
+ * with von Neumann rounding (round to odd)
+ */
+ assert(size == 2);
+ gen_helper_fcvtx_f64_to_f32(tcg_res[pass], tcg_op, cpu_env);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op);
+ } else if (genenvfn) {
+ genenvfn(tcg_res[pass], cpu_env, tcg_op);
+ }
+
+ tcg_temp_free_i64(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, destelt + pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+/* Remaining saturating accumulating ops */
+static void handle_2misc_satacc(DisasContext *s, bool is_scalar, bool is_u,
+ bool is_q, int size, int rn, int rd)
+{
+ bool is_double = (size == 3);
+
+ if (is_double) {
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+ int pass;
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ read_vec_element(s, tcg_rn, rn, pass, MO_64);
+ read_vec_element(s, tcg_rd, rd, pass, MO_64);
+
+ if (is_u) { /* USQADD */
+ gen_helper_neon_uqadd_s64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ } else { /* SUQADD */
+ gen_helper_neon_sqadd_u64(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ }
+ write_vec_element(s, tcg_rd, rd, pass, MO_64);
+ }
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ } else {
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ if (is_scalar) {
+ read_vec_element_i32(s, tcg_rn, rn, pass, size);
+ read_vec_element_i32(s, tcg_rd, rd, pass, size);
+ } else {
+ read_vec_element_i32(s, tcg_rn, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
+ }
+
+ if (is_u) { /* USQADD */
+ switch (size) {
+ case 0:
+ gen_helper_neon_uqadd_s8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 1:
+ gen_helper_neon_uqadd_s16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 2:
+ gen_helper_neon_uqadd_s32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else { /* SUQADD */
+ switch (size) {
+ case 0:
+ gen_helper_neon_sqadd_u8(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 1:
+ gen_helper_neon_sqadd_u16(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ case 2:
+ gen_helper_neon_sqadd_u32(tcg_rd, cpu_env, tcg_rn, tcg_rd);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if (is_scalar) {
+ TCGv_i64 tcg_zero = tcg_const_i64(0);
+ write_vec_element(s, tcg_zero, rd, 0, MO_64);
+ tcg_temp_free_i64(tcg_zero);
+ }
+ write_vec_element_i32(s, tcg_rd, rd, pass, MO_32);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(tcg_rn);
+ }
+}
+
+/* C3.6.12 AdvSIMD scalar two reg misc
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 1 | U | 1 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_scalar_two_reg_misc(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 12, 5);
+ int size = extract32(insn, 22, 2);
+ bool u = extract32(insn, 29, 1);
+ bool is_fcvt = false;
+ int rmode;
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr tcg_fpstatus;
+
+ switch (opcode) {
+ case 0x3: /* USQADD / SUQADD*/
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_satacc(s, true, u, false, size, rn, rd);
+ return;
+ case 0x7: /* SQABS / SQNEG */
+ break;
+ case 0xa: /* CMLT */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xb: /* ABS, NEG */
+ if (size != 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x12: /* SQXTUN */
+ if (!u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x14: /* SQXTN, UQXTN */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, true, opcode, u, false, size, rn, rd);
+ return;
+ case 0xc ... 0xf:
+ case 0x16 ... 0x1d:
+ case 0x1f:
+ /* Floating point: U, size[1] and opcode indicate operation;
+ * size[0] indicates single or double precision.
+ */
+ opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
+ size = extract32(size, 0, 1) ? 3 : 2;
+ switch (opcode) {
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ handle_2misc_fcmp_zero(s, opcode, true, u, true, size, rn, rd);
+ return;
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ bool is_signed = (opcode == 0x1d);
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, 1, is_signed, 0, size);
+ return;
+ }
+ case 0x3d: /* FRECPE */
+ case 0x3f: /* FRECPX */
+ case 0x7d: /* FRSQRTE */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_reciprocal(s, opcode, true, u, true, size, rn, rd);
+ return;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ is_fcvt = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ break;
+ case 0x1c: /* FCVTAS */
+ case 0x5c: /* FCVTAU */
+ /* TIEAWAY doesn't fit in the usual rounding mode encoding */
+ is_fcvt = true;
+ rmode = FPROUNDING_TIEAWAY;
+ break;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ if (size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, true, opcode, u, false, size - 1, rn, rd);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_fcvt) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_fpstatus = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_I32(tcg_rmode);
+ TCGV_UNUSED_PTR(tcg_fpstatus);
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_rn = read_fp_dreg(s, rn);
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
+
+ handle_2misc_64(s, opcode, u, tcg_rd, tcg_rn, tcg_rmode, tcg_fpstatus);
+ write_fp_dreg(s, rd, tcg_rd);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ } else {
+ TCGv_i32 tcg_rn = tcg_temp_new_i32();
+ TCGv_i32 tcg_rd = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_rn, rn, 0, size);
+
+ switch (opcode) {
+ case 0x7: /* SQABS, SQNEG */
+ {
+ NeonGenOneOpEnvFn *genfn;
+ static NeonGenOneOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
+ { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
+ { gen_helper_neon_qabs_s32, gen_helper_neon_qneg_s32 },
+ };
+ genfn = fns[size][u];
+ genfn(tcg_rd, cpu_env, tcg_rn);
+ break;
+ }
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touls(tcg_rd, tcg_rn, tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ write_fp_sreg(s, rd, tcg_rd);
+ tcg_temp_free_i32(tcg_rd);
+ tcg_temp_free_i32(tcg_rn);
+ }
+
+ if (is_fcvt) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_ptr(tcg_fpstatus);
+ }
+}
+
+/* SSHR[RA]/USHR[RA] - Vector shift right (optional rounding/accumulate) */
+static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = 2 * (8 << size) - immhb;
+ bool accumulate = false;
+ bool round = false;
+ bool insert = false;
+ int dsize = is_q ? 128 : 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ TCGMemOp memop = size | (is_u ? 0 : MO_SIGN);
+ TCGv_i64 tcg_rn = new_tmp_a64(s);
+ TCGv_i64 tcg_rd = new_tmp_a64(s);
+ TCGv_i64 tcg_round;
+ int i;
+
+ if (extract32(immh, 3, 1) && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size > 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ switch (opcode) {
+ case 0x02: /* SSRA / USRA (accumulate) */
+ accumulate = true;
+ break;
+ case 0x04: /* SRSHR / URSHR (rounding) */
+ round = true;
+ break;
+ case 0x06: /* SRSRA / URSRA (accum + rounding) */
+ accumulate = round = true;
+ break;
+ case 0x08: /* SRI */
+ insert = true;
+ break;
+ }
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ TCGV_UNUSED_I64(tcg_round);
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, memop);
+ if (accumulate || insert) {
+ read_vec_element(s, tcg_rd, rd, i, memop);
+ }
+
+ if (insert) {
+ handle_shri_with_ins(tcg_rd, tcg_rn, size, shift);
+ } else {
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ accumulate, is_u, size, shift);
+ }
+
+ write_vec_element(s, tcg_rd, rd, i, size);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+}
+
+/* SHL/SLI - Vector shift left */
+static void handle_vec_simd_shli(DisasContext *s, bool is_q, bool insert,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = immhb - (8 << size);
+ int dsize = is_q ? 128 : 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ TCGv_i64 tcg_rn = new_tmp_a64(s);
+ TCGv_i64 tcg_rd = new_tmp_a64(s);
+ int i;
+
+ if (extract32(immh, 3, 1) && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (size > 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, size);
+ if (insert) {
+ read_vec_element(s, tcg_rd, rd, i, size);
+ }
+
+ handle_shli_with_ins(tcg_rd, tcg_rn, insert, shift);
+
+ write_vec_element(s, tcg_rd, rd, i, size);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+/* USHLL/SHLL - Vector shift left with widening */
+static void handle_vec_simd_wshli(DisasContext *s, bool is_q, bool is_u,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int size = 32 - clz32(immh) - 1;
+ int immhb = immh << 3 | immb;
+ int shift = immhb - (8 << size);
+ int dsize = 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ TCGv_i64 tcg_rn = new_tmp_a64(s);
+ TCGv_i64 tcg_rd = new_tmp_a64(s);
+ int i;
+
+ if (size >= 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* For the LL variants the store is larger than the load,
+ * so if rd == rn we would overwrite parts of our input.
+ * So load everything right now and use shifts in the main loop.
+ */
+ read_vec_element(s, tcg_rn, rn, is_q ? 1 : 0, MO_64);
+
+ for (i = 0; i < elements; i++) {
+ tcg_gen_shri_i64(tcg_rd, tcg_rn, i * esize);
+ ext_and_shift_reg(tcg_rd, tcg_rd, size | (!is_u << 2), 0);
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, shift);
+ write_vec_element(s, tcg_rd, rd, i, size + 1);
+ }
+}
+
+/* SHRN/RSHRN - Shift right with narrowing (and potential rounding) */
+static void handle_vec_simd_shrn(DisasContext *s, bool is_q,
+ int immh, int immb, int opcode, int rn, int rd)
+{
+ int immhb = immh << 3 | immb;
+ int size = 32 - clz32(immh) - 1;
+ int dsize = 64;
+ int esize = 8 << size;
+ int elements = dsize/esize;
+ int shift = (2 * esize) - immhb;
+ bool round = extract32(opcode, 0, 1);
+ TCGv_i64 tcg_rn, tcg_rd, tcg_final;
+ TCGv_i64 tcg_round;
+ int i;
+
+ if (extract32(immh, 3, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ tcg_final = tcg_temp_new_i64();
+ read_vec_element(s, tcg_final, rd, is_q ? 1 : 0, MO_64);
+
+ if (round) {
+ uint64_t round_const = 1ULL << (shift - 1);
+ tcg_round = tcg_const_i64(round_const);
+ } else {
+ TCGV_UNUSED_I64(tcg_round);
+ }
+
+ for (i = 0; i < elements; i++) {
+ read_vec_element(s, tcg_rn, rn, i, size+1);
+ handle_shri_with_rndacc(tcg_rd, tcg_rn, tcg_round,
+ false, true, size+1, shift);
+
+ tcg_gen_deposit_i64(tcg_final, tcg_final, tcg_rd, esize * i, esize);
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ write_vec_element(s, tcg_final, rd, 0, MO_64);
+ } else {
+ write_vec_element(s, tcg_final, rd, 1, MO_64);
+ }
+
+ if (round) {
+ tcg_temp_free_i64(tcg_round);
+ }
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_final);
+ return;
+}
+
+
+/* C3.6.14 AdvSIMD shift by immediate
+ * 31 30 29 28 23 22 19 18 16 15 11 10 9 5 4 0
+ * +---+---+---+-------------+------+------+--------+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 1 0 | immh | immb | opcode | 1 | Rn | Rd |
+ * +---+---+---+-------------+------+------+--------+---+------+------+
+ */
+static void disas_simd_shift_imm(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int opcode = extract32(insn, 11, 5);
+ int immb = extract32(insn, 16, 3);
+ int immh = extract32(insn, 19, 4);
+ bool is_u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+
+ switch (opcode) {
+ case 0x08: /* SRI */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x00: /* SSHR / USHR */
+ case 0x02: /* SSRA / USRA (accumulate) */
+ case 0x04: /* SRSHR / URSHR (rounding) */
+ case 0x06: /* SRSRA / URSRA (accum + rounding) */
+ handle_vec_simd_shri(s, is_q, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x0a: /* SHL / SLI */
+ handle_vec_simd_shli(s, is_q, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x10: /* SHRN */
+ case 0x11: /* RSHRN / SQRSHRUN */
+ if (is_u) {
+ handle_vec_simd_sqshrn(s, false, is_q, false, true, immh, immb,
+ opcode, rn, rd);
+ } else {
+ handle_vec_simd_shrn(s, is_q, immh, immb, opcode, rn, rd);
+ }
+ break;
+ case 0x12: /* SQSHRN / UQSHRN */
+ case 0x13: /* SQRSHRN / UQRSHRN */
+ handle_vec_simd_sqshrn(s, false, is_q, is_u, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0x14: /* SSHLL / USHLL */
+ handle_vec_simd_wshli(s, is_q, is_u, immh, immb, opcode, rn, rd);
+ break;
+ case 0x1c: /* SCVTF / UCVTF */
+ handle_simd_shift_intfp_conv(s, false, is_q, is_u, immh, immb,
+ opcode, rn, rd);
+ break;
+ case 0xc: /* SQSHLU */
+ if (!is_u) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_qshl(s, false, is_q, false, true, immh, immb, rn, rd);
+ break;
+ case 0xe: /* SQSHL, UQSHL */
+ handle_simd_qshl(s, false, is_q, is_u, is_u, immh, immb, rn, rd);
+ break;
+ case 0x1f: /* FCVTZS/ FCVTZU */
+ handle_simd_shift_fpint_conv(s, false, is_q, is_u, immh, immb, rn, rd);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+/* Generate code to do a "long" addition or subtraction, ie one done in
+ * TCGv_i64 on vector lanes twice the width specified by size.
+ */
+static void gen_neon_addl(int size, bool is_sub, TCGv_i64 tcg_res,
+ TCGv_i64 tcg_op1, TCGv_i64 tcg_op2)
+{
+ static NeonGenTwo64OpFn * const fns[3][2] = {
+ { gen_helper_neon_addl_u16, gen_helper_neon_subl_u16 },
+ { gen_helper_neon_addl_u32, gen_helper_neon_subl_u32 },
+ { tcg_gen_add_i64, tcg_gen_sub_i64 },
+ };
+ NeonGenTwo64OpFn *genfn;
+ assert(size < 3);
+
+ genfn = fns[size][is_sub];
+ genfn(tcg_res, tcg_op1, tcg_op2);
+}
+
+static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ /* 3-reg-different widening insns: 64 x 64 -> 128 */
+ TCGv_i64 tcg_res[2];
+ int pass, accop;
+
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = tcg_temp_new_i64();
+
+ /* Does this op do an adding accumulate, a subtracting accumulate,
+ * or no accumulate at all?
+ */
+ switch (opcode) {
+ case 5:
+ case 8:
+ case 9:
+ accop = 1;
+ break;
+ case 10:
+ case 11:
+ accop = -1;
+ break;
+ default:
+ accop = 0;
+ break;
+ }
+
+ if (accop != 0) {
+ read_vec_element(s, tcg_res[0], rd, 0, MO_64);
+ read_vec_element(s, tcg_res[1], rd, 1, MO_64);
+ }
+
+ /* size == 2 means two 32x32->64 operations; this is worth special
+ * casing because we can generally handle it inline.
+ */
+ if (size == 2) {
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_passres;
+ TCGMemOp memop = MO_32 | (is_u ? 0 : MO_SIGN);
+
+ int elt = pass + is_q * 2;
+
+ read_vec_element(s, tcg_op1, rn, elt, memop);
+ read_vec_element(s, tcg_op2, rm, elt, memop);
+
+ if (accop == 0) {
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ switch (opcode) {
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ tcg_gen_add_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ tcg_gen_sub_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ {
+ TCGv_i64 tcg_tmp1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_tmp2 = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(tcg_tmp1, tcg_op1, tcg_op2);
+ tcg_gen_sub_i64(tcg_tmp2, tcg_op2, tcg_op1);
+ tcg_gen_movcond_i64(is_u ? TCG_COND_GEU : TCG_COND_GE,
+ tcg_passres,
+ tcg_op1, tcg_op2, tcg_tmp1, tcg_tmp2);
+ tcg_temp_free_i64(tcg_tmp1);
+ tcg_temp_free_i64(tcg_tmp2);
+ break;
+ }
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
+ tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ tcg_gen_mul_i64(tcg_passres, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (opcode == 9 || opcode == 11) {
+ /* saturating accumulate ops */
+ if (accop < 0) {
+ tcg_gen_neg_i64(tcg_passres, tcg_passres);
+ }
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ tcg_res[pass], tcg_passres);
+ } else if (accop > 0) {
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ } else if (accop < 0) {
+ tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ }
+
+ if (accop != 0) {
+ tcg_temp_free_i64(tcg_passres);
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ /* size 0 or 1, generally helper functions */
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_passres;
+ int elt = pass + is_q * 2;
+
+ read_vec_element_i32(s, tcg_op1, rn, elt, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, elt, MO_32);
+
+ if (accop == 0) {
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ switch (opcode) {
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ {
+ TCGv_i64 tcg_op2_64 = tcg_temp_new_i64();
+ static NeonGenWidenFn * const widenfns[2][2] = {
+ { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
+ { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
+ };
+ NeonGenWidenFn *widenfn = widenfns[size][is_u];
+
+ widenfn(tcg_op2_64, tcg_op2);
+ widenfn(tcg_passres, tcg_op1);
+ gen_neon_addl(size, (opcode == 2), tcg_passres,
+ tcg_passres, tcg_op2_64);
+ tcg_temp_free_i64(tcg_op2_64);
+ break;
+ }
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ if (size == 0) {
+ if (is_u) {
+ gen_helper_neon_abdl_u16(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_abdl_s16(tcg_passres, tcg_op1, tcg_op2);
+ }
+ } else {
+ if (is_u) {
+ gen_helper_neon_abdl_u32(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_abdl_s32(tcg_passres, tcg_op1, tcg_op2);
+ }
+ }
+ break;
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* UMULL, UMULL2, SMULL, SMULL2 */
+ if (size == 0) {
+ if (is_u) {
+ gen_helper_neon_mull_u8(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_mull_s8(tcg_passres, tcg_op1, tcg_op2);
+ }
+ } else {
+ if (is_u) {
+ gen_helper_neon_mull_u16(tcg_passres, tcg_op1, tcg_op2);
+ } else {
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
+ }
+ }
+ break;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ assert(size == 1);
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op1, tcg_op2);
+ gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ break;
+ case 14: /* PMULL */
+ assert(size == 0);
+ gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+
+ if (accop != 0) {
+ if (opcode == 9 || opcode == 11) {
+ /* saturating accumulate ops */
+ if (accop < 0) {
+ gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
+ }
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ } else {
+ gen_neon_addl(size, (accop < 0), tcg_res[pass],
+ tcg_res[pass], tcg_passres);
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ }
+ }
+
+ write_vec_element(s, tcg_res[0], rd, 0, MO_64);
+ write_vec_element(s, tcg_res[1], rd, 1, MO_64);
+ tcg_temp_free_i64(tcg_res[0]);
+ tcg_temp_free_i64(tcg_res[1]);
+}
+
+static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ TCGv_i64 tcg_res[2];
+ int part = is_q ? 2 : 0;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i64 tcg_op2_wide = tcg_temp_new_i64();
+ static NeonGenWidenFn * const widenfns[3][2] = {
+ { gen_helper_neon_widen_s8, gen_helper_neon_widen_u8 },
+ { gen_helper_neon_widen_s16, gen_helper_neon_widen_u16 },
+ { tcg_gen_ext_i32_i64, tcg_gen_extu_i32_i64 },
+ };
+ NeonGenWidenFn *widenfn = widenfns[size][is_u];
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element_i32(s, tcg_op2, rm, part + pass, MO_32);
+ widenfn(tcg_op2_wide, tcg_op2);
+ tcg_temp_free_i32(tcg_op2);
+ tcg_res[pass] = tcg_temp_new_i64();
+ gen_neon_addl(size, (opcode == 3),
+ tcg_res[pass], tcg_op1, tcg_op2_wide);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2_wide);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in)
+{
+ tcg_gen_addi_i64(in, in, 1U << 31);
+ tcg_gen_extrh_i64_i32(res, in);
+}
+
+static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size,
+ int opcode, int rd, int rn, int rm)
+{
+ TCGv_i32 tcg_res[2];
+ int part = is_q ? 2 : 0;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_wideres = tcg_temp_new_i64();
+ static NeonGenNarrowFn * const narrowfns[3][2] = {
+ { gen_helper_neon_narrow_high_u8,
+ gen_helper_neon_narrow_round_high_u8 },
+ { gen_helper_neon_narrow_high_u16,
+ gen_helper_neon_narrow_round_high_u16 },
+ { tcg_gen_extrh_i64_i32, do_narrow_round_high_u32 },
+ };
+ NeonGenNarrowFn *gennarrow = narrowfns[size][is_u];
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ gen_neon_addl(size, (opcode == 6), tcg_wideres, tcg_op1, tcg_op2);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+
+ tcg_res[pass] = tcg_temp_new_i32();
+ gennarrow(tcg_res[pass], tcg_wideres);
+ tcg_temp_free_i64(tcg_wideres);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass + part, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm)
+{
+ /* PMULL of 64 x 64 -> 128 is an odd special case because it
+ * is the only three-reg-diff instruction which produces a
+ * 128-bit wide result from a single operation. However since
+ * it's possible to calculate the two halves more or less
+ * separately we just use two helper calls.
+ */
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, is_q, MO_64);
+ read_vec_element(s, tcg_op2, rm, is_q, MO_64);
+ gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2);
+ write_vec_element(s, tcg_res, rd, 0, MO_64);
+ gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2);
+ write_vec_element(s, tcg_res, rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res);
+}
+
+/* C3.6.15 AdvSIMD three different
+ * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 0 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+-----+------+------+
+ */
+static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn)
+{
+ /* Instructions in this group fall into three basic classes
+ * (in each case with the operation working on each element in
+ * the input vectors):
+ * (1) widening 64 x 64 -> 128 (with possibly Vd as an extra
+ * 128 bit input)
+ * (2) wide 64 x 128 -> 128
+ * (3) narrowing 128 x 128 -> 64
+ * Here we do initial decode, catch unallocated cases and
+ * dispatch to separate functions for each class.
+ */
+ int is_q = extract32(insn, 30, 1);
+ int is_u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 4);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ switch (opcode) {
+ case 1: /* SADDW, SADDW2, UADDW, UADDW2 */
+ case 3: /* SSUBW, SSUBW2, USUBW, USUBW2 */
+ /* 64 x 128 -> 128 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_3rd_wide(s, is_q, is_u, size, opcode, rd, rn, rm);
+ break;
+ case 4: /* ADDHN, ADDHN2, RADDHN, RADDHN2 */
+ case 6: /* SUBHN, SUBHN2, RSUBHN, RSUBHN2 */
+ /* 128 x 128 -> 64 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm);
+ break;
+ case 14: /* PMULL, PMULL2 */
+ if (is_u || size == 1 || size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (size == 3) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_pmull_64(s, is_q, rd, rn, rm);
+ return;
+ }
+ goto is_widening;
+ case 9: /* SQDMLAL, SQDMLAL2 */
+ case 11: /* SQDMLSL, SQDMLSL2 */
+ case 13: /* SQDMULL, SQDMULL2 */
+ if (is_u || size == 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0: /* SADDL, SADDL2, UADDL, UADDL2 */
+ case 2: /* SSUBL, SSUBL2, USUBL, USUBL2 */
+ case 5: /* SABAL, SABAL2, UABAL, UABAL2 */
+ case 7: /* SABDL, SABDL2, UABDL, UABDL2 */
+ case 8: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 10: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 12: /* SMULL, SMULL2, UMULL, UMULL2 */
+ /* 64 x 64 -> 128 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_widening:
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_3rd_widening(s, is_q, is_u, size, opcode, rd, rn, rm);
+ break;
+ default:
+ /* opcode 15 not allocated */
+ unallocated_encoding(s);
+ break;
+ }
+}
+
+/* Logic op (opcode == 3) subgroup of C3.6.16. */
+static void disas_simd_3same_logic(DisasContext *s, uint32_t insn)
+{
+ int rd = extract32(insn, 0, 5);
+ int rn = extract32(insn, 5, 5);
+ int rm = extract32(insn, 16, 5);
+ int size = extract32(insn, 22, 2);
+ bool is_u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ TCGv_i64 tcg_op1, tcg_op2, tcg_res[2];
+ int pass;
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ tcg_op1 = tcg_temp_new_i64();
+ tcg_op2 = tcg_temp_new_i64();
+ tcg_res[0] = tcg_temp_new_i64();
+ tcg_res[1] = tcg_temp_new_i64();
+
+ for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ if (!is_u) {
+ switch (size) {
+ case 0: /* AND */
+ tcg_gen_and_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 1: /* BIC */
+ tcg_gen_andc_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 2: /* ORR */
+ tcg_gen_or_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 3: /* ORN */
+ tcg_gen_orc_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ }
+ } else {
+ if (size != 0) {
+ /* B* ops need res loaded to operate on */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ }
+
+ switch (size) {
+ case 0: /* EOR */
+ tcg_gen_xor_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 1: /* BSL bitwise select */
+ tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_op2);
+ tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_res[pass]);
+ tcg_gen_xor_i64(tcg_res[pass], tcg_op2, tcg_op1);
+ break;
+ case 2: /* BIT, bitwise insert if true */
+ tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
+ tcg_gen_and_i64(tcg_op1, tcg_op1, tcg_op2);
+ tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
+ break;
+ case 3: /* BIF, bitwise insert if false */
+ tcg_gen_xor_i64(tcg_op1, tcg_op1, tcg_res[pass]);
+ tcg_gen_andc_i64(tcg_op1, tcg_op1, tcg_op2);
+ tcg_gen_xor_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
+ break;
+ }
+ }
+ }
+
+ write_vec_element(s, tcg_res[0], rd, 0, MO_64);
+ if (!is_q) {
+ tcg_gen_movi_i64(tcg_res[1], 0);
+ }
+ write_vec_element(s, tcg_res[1], rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ tcg_temp_free_i64(tcg_res[0]);
+ tcg_temp_free_i64(tcg_res[1]);
+}
+
+/* Helper functions for 32 bit comparisons */
+static void gen_max_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_GE, res, op1, op2, op1, op2);
+}
+
+static void gen_max_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_GEU, res, op1, op2, op1, op2);
+}
+
+static void gen_min_s32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_LE, res, op1, op2, op1, op2);
+}
+
+static void gen_min_u32(TCGv_i32 res, TCGv_i32 op1, TCGv_i32 op2)
+{
+ tcg_gen_movcond_i32(TCG_COND_LEU, res, op1, op2, op1, op2);
+}
+
+/* Pairwise op subgroup of C3.6.16.
+ *
+ * This is called directly or via the handle_3same_float for float pairwise
+ * operations where the opcode and size are calculated differently.
+ */
+static void handle_simd_3same_pair(DisasContext *s, int is_q, int u, int opcode,
+ int size, int rn, int rm, int rd)
+{
+ TCGv_ptr fpst;
+ int pass;
+
+ /* Floating point operations need fpst */
+ if (opcode >= 0x58) {
+ fpst = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_PTR(fpst);
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ /* These operations work on the concatenated rm:rn, with each pair of
+ * adjacent elements being operated on to produce an element in the result.
+ */
+ if (size == 3) {
+ TCGv_i64 tcg_res[2];
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ int passreg = (pass == 0) ? rn : rm;
+
+ read_vec_element(s, tcg_op1, passreg, 0, MO_64);
+ read_vec_element(s, tcg_op2, passreg, 1, MO_64);
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ switch (opcode) {
+ case 0x17: /* ADDP */
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ break;
+ case 0x58: /* FMAXNMP */
+ gen_helper_vfp_maxnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5a: /* FADDP */
+ gen_helper_vfp_addd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5e: /* FMAXP */
+ gen_helper_vfp_maxd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x78: /* FMINNMP */
+ gen_helper_vfp_minnumd(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7e: /* FMINP */
+ gen_helper_vfp_mind(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ } else {
+ int maxpass = is_q ? 4 : 2;
+ TCGv_i32 tcg_res[4];
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ NeonGenTwoOpFn *genfn = NULL;
+ int passreg = pass < (maxpass / 2) ? rn : rm;
+ int passelt = (is_q && (pass & 1)) ? 2 : 0;
+
+ read_vec_element_i32(s, tcg_op1, passreg, passelt, MO_32);
+ read_vec_element_i32(s, tcg_op2, passreg, passelt + 1, MO_32);
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ switch (opcode) {
+ case 0x17: /* ADDP */
+ {
+ static NeonGenTwoOpFn * const fns[3] = {
+ gen_helper_neon_padd_u8,
+ gen_helper_neon_padd_u16,
+ tcg_gen_add_i32,
+ };
+ genfn = fns[size];
+ break;
+ }
+ case 0x14: /* SMAXP, UMAXP */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_pmax_s8, gen_helper_neon_pmax_u8 },
+ { gen_helper_neon_pmax_s16, gen_helper_neon_pmax_u16 },
+ { gen_max_s32, gen_max_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x15: /* SMINP, UMINP */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_pmin_s8, gen_helper_neon_pmin_u8 },
+ { gen_helper_neon_pmin_s16, gen_helper_neon_pmin_u16 },
+ { gen_min_s32, gen_min_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ /* The FP operations are all on single floats (32 bit) */
+ case 0x58: /* FMAXNMP */
+ gen_helper_vfp_maxnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5a: /* FADDP */
+ gen_helper_vfp_adds(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x5e: /* FMAXP */
+ gen_helper_vfp_maxs(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x78: /* FMINNMP */
+ gen_helper_vfp_minnums(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ case 0x7e: /* FMINP */
+ gen_helper_vfp_mins(tcg_res[pass], tcg_op1, tcg_op2, fpst);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /* FP ops called directly, otherwise call now */
+ if (genfn) {
+ genfn(tcg_res[pass], tcg_op1, tcg_op2);
+ }
+
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+
+ for (pass = 0; pass < maxpass; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ }
+
+ if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ tcg_temp_free_ptr(fpst);
+ }
+}
+
+/* Floating point op subgroup of C3.6.16. */
+static void disas_simd_3same_float(DisasContext *s, uint32_t insn)
+{
+ /* For floating point ops, the U, size[1] and opcode bits
+ * together indicate the operation. size[0] indicates single
+ * or double.
+ */
+ int fpopcode = extract32(insn, 11, 5)
+ | (extract32(insn, 23, 1) << 5)
+ | (extract32(insn, 29, 1) << 6);
+ int is_q = extract32(insn, 30, 1);
+ int size = extract32(insn, 22, 1);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+
+ int datasize = is_q ? 128 : 64;
+ int esize = 32 << size;
+ int elements = datasize / esize;
+
+ if (size == 1 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (fpopcode) {
+ case 0x58: /* FMAXNMP */
+ case 0x5a: /* FADDP */
+ case 0x5e: /* FMAXP */
+ case 0x78: /* FMINNMP */
+ case 0x7e: /* FMINP */
+ if (size && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_simd_3same_pair(s, is_q, 0, fpopcode, size ? MO_64 : MO_32,
+ rn, rm, rd);
+ return;
+ case 0x1b: /* FMULX */
+ case 0x1f: /* FRECPS */
+ case 0x3f: /* FRSQRTS */
+ case 0x5d: /* FACGE */
+ case 0x7d: /* FACGT */
+ case 0x19: /* FMLA */
+ case 0x39: /* FMLS */
+ case 0x18: /* FMAXNM */
+ case 0x1a: /* FADD */
+ case 0x1c: /* FCMEQ */
+ case 0x1e: /* FMAX */
+ case 0x38: /* FMINNM */
+ case 0x3a: /* FSUB */
+ case 0x3e: /* FMIN */
+ case 0x5b: /* FMUL */
+ case 0x5c: /* FCMGE */
+ case 0x5f: /* FDIV */
+ case 0x7a: /* FABD */
+ case 0x7c: /* FCMGT */
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_3same_float(s, size, elements, fpopcode, rd, rn, rm);
+ return;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+}
+
+/* Integer op subgroup of C3.6.16. */
+static void disas_simd_3same_int(DisasContext *s, uint32_t insn)
+{
+ int is_q = extract32(insn, 30, 1);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 11, 5);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int pass;
+
+ switch (opcode) {
+ case 0x13: /* MUL, PMUL */
+ if (u && size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x0: /* SHADD, UHADD */
+ case 0x2: /* SRHADD, URHADD */
+ case 0x4: /* SHSUB, UHSUB */
+ case 0xc: /* SMAX, UMAX */
+ case 0xd: /* SMIN, UMIN */
+ case 0xe: /* SABD, UABD */
+ case 0xf: /* SABA, UABA */
+ case 0x12: /* MLA, MLS */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x16: /* SQDMULH, SQRDMULH */
+ if (size == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ default:
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 3) {
+ assert(is_q);
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass, MO_64);
+ read_vec_element(s, tcg_op2, rm, pass, MO_64);
+
+ handle_3same_64(s, opcode, u, tcg_res, tcg_op1, tcg_op2);
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
+ TCGv_i32 tcg_op1 = tcg_temp_new_i32();
+ TCGv_i32 tcg_op2 = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ NeonGenTwoOpFn *genfn = NULL;
+ NeonGenTwoOpEnvFn *genenvfn = NULL;
+
+ read_vec_element_i32(s, tcg_op1, rn, pass, MO_32);
+ read_vec_element_i32(s, tcg_op2, rm, pass, MO_32);
+
+ switch (opcode) {
+ case 0x0: /* SHADD, UHADD */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_hadd_s8, gen_helper_neon_hadd_u8 },
+ { gen_helper_neon_hadd_s16, gen_helper_neon_hadd_u16 },
+ { gen_helper_neon_hadd_s32, gen_helper_neon_hadd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x1: /* SQADD, UQADD */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qadd_s8, gen_helper_neon_qadd_u8 },
+ { gen_helper_neon_qadd_s16, gen_helper_neon_qadd_u16 },
+ { gen_helper_neon_qadd_s32, gen_helper_neon_qadd_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x2: /* SRHADD, URHADD */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_rhadd_s8, gen_helper_neon_rhadd_u8 },
+ { gen_helper_neon_rhadd_s16, gen_helper_neon_rhadd_u16 },
+ { gen_helper_neon_rhadd_s32, gen_helper_neon_rhadd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x4: /* SHSUB, UHSUB */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_hsub_s8, gen_helper_neon_hsub_u8 },
+ { gen_helper_neon_hsub_s16, gen_helper_neon_hsub_u16 },
+ { gen_helper_neon_hsub_s32, gen_helper_neon_hsub_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x5: /* SQSUB, UQSUB */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qsub_s8, gen_helper_neon_qsub_u8 },
+ { gen_helper_neon_qsub_s16, gen_helper_neon_qsub_u16 },
+ { gen_helper_neon_qsub_s32, gen_helper_neon_qsub_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0x6: /* CMGT, CMHI */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_u8 },
+ { gen_helper_neon_cgt_s16, gen_helper_neon_cgt_u16 },
+ { gen_helper_neon_cgt_s32, gen_helper_neon_cgt_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x7: /* CMGE, CMHS */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_cge_s8, gen_helper_neon_cge_u8 },
+ { gen_helper_neon_cge_s16, gen_helper_neon_cge_u16 },
+ { gen_helper_neon_cge_s32, gen_helper_neon_cge_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x8: /* SSHL, USHL */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 },
+ { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 },
+ { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x9: /* SQSHL, UQSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qshl_s8, gen_helper_neon_qshl_u8 },
+ { gen_helper_neon_qshl_s16, gen_helper_neon_qshl_u16 },
+ { gen_helper_neon_qshl_s32, gen_helper_neon_qshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xa: /* SRSHL, URSHL */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_rshl_s8, gen_helper_neon_rshl_u8 },
+ { gen_helper_neon_rshl_s16, gen_helper_neon_rshl_u16 },
+ { gen_helper_neon_rshl_s32, gen_helper_neon_rshl_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0xb: /* SQRSHL, UQRSHL */
+ {
+ static NeonGenTwoOpEnvFn * const fns[3][2] = {
+ { gen_helper_neon_qrshl_s8, gen_helper_neon_qrshl_u8 },
+ { gen_helper_neon_qrshl_s16, gen_helper_neon_qrshl_u16 },
+ { gen_helper_neon_qrshl_s32, gen_helper_neon_qrshl_u32 },
+ };
+ genenvfn = fns[size][u];
+ break;
+ }
+ case 0xc: /* SMAX, UMAX */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_max_s8, gen_helper_neon_max_u8 },
+ { gen_helper_neon_max_s16, gen_helper_neon_max_u16 },
+ { gen_max_s32, gen_max_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+
+ case 0xd: /* SMIN, UMIN */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_min_s8, gen_helper_neon_min_u8 },
+ { gen_helper_neon_min_s16, gen_helper_neon_min_u16 },
+ { gen_min_s32, gen_min_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0xe: /* SABD, UABD */
+ case 0xf: /* SABA, UABA */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_abd_s8, gen_helper_neon_abd_u8 },
+ { gen_helper_neon_abd_s16, gen_helper_neon_abd_u16 },
+ { gen_helper_neon_abd_s32, gen_helper_neon_abd_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x10: /* ADD, SUB */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
+ { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
+ { tcg_gen_add_i32, tcg_gen_sub_i32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x11: /* CMTST, CMEQ */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_tst_u8, gen_helper_neon_ceq_u8 },
+ { gen_helper_neon_tst_u16, gen_helper_neon_ceq_u16 },
+ { gen_helper_neon_tst_u32, gen_helper_neon_ceq_u32 },
+ };
+ genfn = fns[size][u];
+ break;
+ }
+ case 0x13: /* MUL, PMUL */
+ if (u) {
+ /* PMUL */
+ assert(size == 0);
+ genfn = gen_helper_neon_mul_p8;
+ break;
+ }
+ /* fall through : MUL */
+ case 0x12: /* MLA, MLS */
+ {
+ static NeonGenTwoOpFn * const fns[3] = {
+ gen_helper_neon_mul_u8,
+ gen_helper_neon_mul_u16,
+ tcg_gen_mul_i32,
+ };
+ genfn = fns[size];
+ break;
+ }
+ case 0x16: /* SQDMULH, SQRDMULH */
+ {
+ static NeonGenTwoOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qdmulh_s16, gen_helper_neon_qrdmulh_s16 },
+ { gen_helper_neon_qdmulh_s32, gen_helper_neon_qrdmulh_s32 },
+ };
+ assert(size == 1 || size == 2);
+ genenvfn = fns[size - 1][u];
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ if (genenvfn) {
+ genenvfn(tcg_res, cpu_env, tcg_op1, tcg_op2);
+ } else {
+ genfn(tcg_res, tcg_op1, tcg_op2);
+ }
+
+ if (opcode == 0xf || opcode == 0x12) {
+ /* SABA, UABA, MLA, MLS: accumulating ops */
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_add_u8, gen_helper_neon_sub_u8 },
+ { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
+ { tcg_gen_add_i32, tcg_gen_sub_i32 },
+ };
+ bool is_sub = (opcode == 0x12 && u); /* MLS */
+
+ genfn = fns[size][is_sub];
+ read_vec_element_i32(s, tcg_op1, rd, pass, MO_32);
+ genfn(tcg_res, tcg_op1, tcg_res);
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op1);
+ tcg_temp_free_i32(tcg_op2);
+ }
+ }
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+}
+
+/* C3.6.16 AdvSIMD three same
+ * 31 30 29 28 24 23 22 21 20 16 15 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+------+--------+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 | Rm | opcode | 1 | Rn | Rd |
+ * +---+---+---+-----------+------+---+------+--------+---+------+------+
+ */
+static void disas_simd_three_reg_same(DisasContext *s, uint32_t insn)
+{
+ int opcode = extract32(insn, 11, 5);
+
+ switch (opcode) {
+ case 0x3: /* logic ops */
+ disas_simd_3same_logic(s, insn);
+ break;
+ case 0x17: /* ADDP */
+ case 0x14: /* SMAXP, UMAXP */
+ case 0x15: /* SMINP, UMINP */
+ {
+ /* Pairwise operations */
+ int is_q = extract32(insn, 30, 1);
+ int u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ if (opcode == 0x17) {
+ if (u || (size == 3 && !is_q)) {
+ unallocated_encoding(s);
+ return;
+ }
+ } else {
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ }
+ handle_simd_3same_pair(s, is_q, u, opcode, size, rn, rm, rd);
+ break;
+ }
+ case 0x18 ... 0x31:
+ /* floating point ops, sz[1] and U are part of opcode */
+ disas_simd_3same_float(s, insn);
+ break;
+ default:
+ disas_simd_3same_int(s, insn);
+ break;
+ }
+}
+
+static void handle_2misc_widening(DisasContext *s, int opcode, bool is_q,
+ int size, int rn, int rd)
+{
+ /* Handle 2-reg-misc ops which are widening (so each size element
+ * in the source becomes a 2*size element in the destination.
+ * The only instruction like this is FCVTL.
+ */
+ int pass;
+
+ if (size == 3) {
+ /* 32 -> 64 bit fp conversion */
+ TCGv_i64 tcg_res[2];
+ int srcelt = is_q ? 2 : 0;
+
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element_i32(s, tcg_op, rn, srcelt + pass, MO_32);
+ gen_helper_vfp_fcvtds(tcg_res[pass], tcg_op, cpu_env);
+ tcg_temp_free_i32(tcg_op);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ } else {
+ /* 16 -> 32 bit fp conversion */
+ int srcelt = is_q ? 4 : 0;
+ TCGv_i32 tcg_res[4];
+
+ for (pass = 0; pass < 4; pass++) {
+ tcg_res[pass] = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_res[pass], rn, srcelt + pass, MO_16);
+ gen_helper_vfp_fcvt_f16_to_f32(tcg_res[pass], tcg_res[pass],
+ cpu_env);
+ }
+ for (pass = 0; pass < 4; pass++) {
+ write_vec_element_i32(s, tcg_res[pass], rd, pass, MO_32);
+ tcg_temp_free_i32(tcg_res[pass]);
+ }
+ }
+}
+
+static void handle_rev(DisasContext *s, int opcode, bool u,
+ bool is_q, int size, int rn, int rd)
+{
+ int op = (opcode << 1) | u;
+ int opsz = op + size;
+ int grp_size = 3 - opsz;
+ int dsize = is_q ? 128 : 64;
+ int i;
+
+ if (opsz >= 3) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (size == 0) {
+ /* Special case bytes, use bswap op on each group of elements */
+ int groups = dsize / (8 << grp_size);
+
+ for (i = 0; i < groups; i++) {
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_tmp, rn, i, grp_size);
+ switch (grp_size) {
+ case MO_16:
+ tcg_gen_bswap16_i64(tcg_tmp, tcg_tmp);
+ break;
+ case MO_32:
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp);
+ break;
+ case MO_64:
+ tcg_gen_bswap64_i64(tcg_tmp, tcg_tmp);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ write_vec_element(s, tcg_tmp, rd, i, grp_size);
+ tcg_temp_free_i64(tcg_tmp);
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ int revmask = (1 << grp_size) - 1;
+ int esize = 8 << size;
+ int elements = dsize / esize;
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
+ TCGv_i64 tcg_rd = tcg_const_i64(0);
+ TCGv_i64 tcg_rd_hi = tcg_const_i64(0);
+
+ for (i = 0; i < elements; i++) {
+ int e_rev = (i & 0xf) ^ revmask;
+ int off = e_rev * esize;
+ read_vec_element(s, tcg_rn, rn, i, size);
+ if (off >= 64) {
+ tcg_gen_deposit_i64(tcg_rd_hi, tcg_rd_hi,
+ tcg_rn, off - 64, esize);
+ } else {
+ tcg_gen_deposit_i64(tcg_rd, tcg_rd, tcg_rn, off, esize);
+ }
+ }
+ write_vec_element(s, tcg_rd, rd, 0, MO_64);
+ write_vec_element(s, tcg_rd_hi, rd, 1, MO_64);
+
+ tcg_temp_free_i64(tcg_rd_hi);
+ tcg_temp_free_i64(tcg_rd);
+ tcg_temp_free_i64(tcg_rn);
+ }
+}
+
+static void handle_2misc_pairwise(DisasContext *s, int opcode, bool u,
+ bool is_q, int size, int rn, int rd)
+{
+ /* Implement the pairwise operations from 2-misc:
+ * SADDLP, UADDLP, SADALP, UADALP.
+ * These all add pairs of elements in the input to produce a
+ * double-width result element in the output (possibly accumulating).
+ */
+ bool accum = (opcode == 0x6);
+ int maxpass = is_q ? 2 : 1;
+ int pass;
+ TCGv_i64 tcg_res[2];
+
+ if (size == 2) {
+ /* 32 + 32 -> 64 op */
+ TCGMemOp memop = size + (u ? 0 : MO_SIGN);
+
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op1 = tcg_temp_new_i64();
+ TCGv_i64 tcg_op2 = tcg_temp_new_i64();
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op1, rn, pass * 2, memop);
+ read_vec_element(s, tcg_op2, rn, pass * 2 + 1, memop);
+ tcg_gen_add_i64(tcg_res[pass], tcg_op1, tcg_op2);
+ if (accum) {
+ read_vec_element(s, tcg_op1, rd, pass, MO_64);
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_op1);
+ }
+
+ tcg_temp_free_i64(tcg_op1);
+ tcg_temp_free_i64(tcg_op2);
+ }
+ } else {
+ for (pass = 0; pass < maxpass; pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ NeonGenOneOpFn *genfn;
+ static NeonGenOneOpFn * const fns[2][2] = {
+ { gen_helper_neon_addlp_s8, gen_helper_neon_addlp_u8 },
+ { gen_helper_neon_addlp_s16, gen_helper_neon_addlp_u16 },
+ };
+
+ genfn = fns[size][u];
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+ genfn(tcg_res[pass], tcg_op);
+
+ if (accum) {
+ read_vec_element(s, tcg_op, rd, pass, MO_64);
+ if (size == 0) {
+ gen_helper_neon_addl_u16(tcg_res[pass],
+ tcg_res[pass], tcg_op);
+ } else {
+ gen_helper_neon_addl_u32(tcg_res[pass],
+ tcg_res[pass], tcg_op);
+ }
+ }
+ tcg_temp_free_i64(tcg_op);
+ }
+ }
+ if (!is_q) {
+ tcg_res[1] = tcg_const_i64(0);
+ }
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+static void handle_shll(DisasContext *s, bool is_q, int size, int rn, int rd)
+{
+ /* Implement SHLL and SHLL2 */
+ int pass;
+ int part = is_q ? 2 : 0;
+ TCGv_i64 tcg_res[2];
+
+ for (pass = 0; pass < 2; pass++) {
+ static NeonGenWidenFn * const widenfns[3] = {
+ gen_helper_neon_widen_u8,
+ gen_helper_neon_widen_u16,
+ tcg_gen_extu_i32_i64,
+ };
+ NeonGenWidenFn *widenfn = widenfns[size];
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, part + pass, MO_32);
+ tcg_res[pass] = tcg_temp_new_i64();
+ widenfn(tcg_res[pass], tcg_op);
+ tcg_gen_shli_i64(tcg_res[pass], tcg_res[pass], 8 << size);
+
+ tcg_temp_free_i32(tcg_op);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+}
+
+/* C3.6.17 AdvSIMD two reg misc
+ * 31 30 29 28 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ * | 0 | Q | U | 0 1 1 1 0 | size | 1 0 0 0 0 | opcode | 1 0 | Rn | Rd |
+ * +---+---+---+-----------+------+-----------+--------+-----+------+------+
+ */
+static void disas_simd_two_reg_misc(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ bool u = extract32(insn, 29, 1);
+ bool is_q = extract32(insn, 30, 1);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool need_fpstatus = false;
+ bool need_rmode = false;
+ int rmode = -1;
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr tcg_fpstatus;
+
+ switch (opcode) {
+ case 0x0: /* REV64, REV32 */
+ case 0x1: /* REV16 */
+ handle_rev(s, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x5: /* CNT, NOT, RBIT */
+ if (u && size == 0) {
+ /* NOT: adjust size so we can use the 64-bits-at-a-time loop. */
+ size = 3;
+ break;
+ } else if (u && size == 1) {
+ /* RBIT */
+ break;
+ } else if (!u && size == 0) {
+ /* CNT */
+ break;
+ }
+ unallocated_encoding(s);
+ return;
+ case 0x12: /* XTN, XTN2, SQXTUN, SQXTUN2 */
+ case 0x14: /* SQXTN, SQXTN2, UQXTN, UQXTN2 */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ handle_2misc_narrow(s, false, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x4: /* CLS, CLZ */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x2: /* SADDLP, UADDLP */
+ case 0x6: /* SADALP, UADALP */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_pairwise(s, opcode, u, is_q, size, rn, rd);
+ return;
+ case 0x13: /* SHLL, SHLL2 */
+ if (u == 0 || size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_shll(s, is_q, size, rn, rd);
+ return;
+ case 0xa: /* CMLT */
+ if (u == 1) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xb: /* ABS, NEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x3: /* SUQADD, USQADD */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_satacc(s, false, u, is_q, size, rn, rd);
+ return;
+ case 0x7: /* SQABS, SQNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0xc ... 0xf:
+ case 0x16 ... 0x1d:
+ case 0x1f:
+ {
+ /* Floating point: U, size[1] and opcode indicate operation;
+ * size[0] indicates single or double precision.
+ */
+ int is_double = extract32(size, 0, 1);
+ opcode |= (extract32(size, 1, 1) << 5) | (u << 6);
+ size = is_double ? 3 : 2;
+ switch (opcode) {
+ case 0x2f: /* FABS */
+ case 0x6f: /* FNEG */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1d: /* SCVTF */
+ case 0x5d: /* UCVTF */
+ {
+ bool is_signed = (opcode == 0x1d) ? true : false;
+ int elements = is_double ? 2 : is_q ? 4 : 2;
+ if (is_double && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_simd_intfp_conv(s, rd, rn, elements, is_signed, 0, size);
+ return;
+ }
+ case 0x2c: /* FCMGT (zero) */
+ case 0x2d: /* FCMEQ (zero) */
+ case 0x2e: /* FCMLT (zero) */
+ case 0x6c: /* FCMGE (zero) */
+ case 0x6d: /* FCMLE (zero) */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ handle_2misc_fcmp_zero(s, opcode, false, u, is_q, size, rn, rd);
+ return;
+ case 0x7f: /* FSQRT */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ need_fpstatus = true;
+ need_rmode = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x5c: /* FCVTAU */
+ case 0x1c: /* FCVTAS */
+ need_fpstatus = true;
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x3c: /* URECPE */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x3d: /* FRECPE */
+ case 0x7d: /* FRSQRTE */
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_reciprocal(s, opcode, false, u, is_q, size, rn, rd);
+ return;
+ case 0x56: /* FCVTXN, FCVTXN2 */
+ if (size == 2) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x16: /* FCVTN, FCVTN2 */
+ /* handle_2misc_narrow does a 2*size -> size operation, but these
+ * instructions encode the source size rather than dest size.
+ */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_narrow(s, false, opcode, 0, is_q, size - 1, rn, rd);
+ return;
+ case 0x17: /* FCVTL, FCVTL2 */
+ if (!fp_access_check(s)) {
+ return;
+ }
+ handle_2misc_widening(s, opcode, is_q, size, rn, rd);
+ return;
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ need_rmode = true;
+ rmode = extract32(opcode, 5, 1) | (extract32(opcode, 0, 1) << 1);
+ /* fall through */
+ case 0x59: /* FRINTX */
+ case 0x79: /* FRINTI */
+ need_fpstatus = true;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x58: /* FRINTA */
+ need_rmode = true;
+ rmode = FPROUNDING_TIEAWAY;
+ need_fpstatus = true;
+ if (size == 3 && !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x7c: /* URSQRTE */
+ if (size == 3) {
+ unallocated_encoding(s);
+ return;
+ }
+ need_fpstatus = true;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ }
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (need_fpstatus) {
+ tcg_fpstatus = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_PTR(tcg_fpstatus);
+ }
+ if (need_rmode) {
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ } else {
+ TCGV_UNUSED_I32(tcg_rmode);
+ }
+
+ if (size == 3) {
+ /* All 64-bit element operations can be shared with scalar 2misc */
+ int pass;
+
+ for (pass = 0; pass < (is_q ? 2 : 1); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+
+ handle_2misc_64(s, opcode, u, tcg_res, tcg_op,
+ tcg_rmode, tcg_fpstatus);
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_op);
+ }
+ } else {
+ int pass;
+
+ for (pass = 0; pass < (is_q ? 4 : 2); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+ TCGCond cond;
+
+ read_vec_element_i32(s, tcg_op, rn, pass, MO_32);
+
+ if (size == 2) {
+ /* Special cases for 32 bit elements */
+ switch (opcode) {
+ case 0xa: /* CMLT */
+ /* 32 bit integer comparison against zero, result is
+ * test ? (2^32 - 1) : 0. We implement via setcond(test)
+ * and inverting.
+ */
+ cond = TCG_COND_LT;
+ do_cmop:
+ tcg_gen_setcondi_i32(cond, tcg_res, tcg_op, 0);
+ tcg_gen_neg_i32(tcg_res, tcg_res);
+ break;
+ case 0x8: /* CMGT, CMGE */
+ cond = u ? TCG_COND_GE : TCG_COND_GT;
+ goto do_cmop;
+ case 0x9: /* CMEQ, CMLE */
+ cond = u ? TCG_COND_LE : TCG_COND_EQ;
+ goto do_cmop;
+ case 0x4: /* CLS */
+ if (u) {
+ gen_helper_clz32(tcg_res, tcg_op);
+ } else {
+ gen_helper_cls32(tcg_res, tcg_op);
+ }
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ if (u) {
+ gen_helper_neon_qneg_s32(tcg_res, cpu_env, tcg_op);
+ } else {
+ gen_helper_neon_qabs_s32(tcg_res, cpu_env, tcg_op);
+ }
+ break;
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ tcg_gen_neg_i32(tcg_res, tcg_op);
+ } else {
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ tcg_gen_neg_i32(tcg_res, tcg_op);
+ tcg_gen_movcond_i32(TCG_COND_GT, tcg_res, tcg_op,
+ tcg_zero, tcg_op, tcg_res);
+ tcg_temp_free_i32(tcg_zero);
+ }
+ break;
+ case 0x2f: /* FABS */
+ gen_helper_vfp_abss(tcg_res, tcg_op);
+ break;
+ case 0x6f: /* FNEG */
+ gen_helper_vfp_negs(tcg_res, tcg_op);
+ break;
+ case 0x7f: /* FSQRT */
+ gen_helper_vfp_sqrts(tcg_res, tcg_op, cpu_env);
+ break;
+ case 0x1a: /* FCVTNS */
+ case 0x1b: /* FCVTMS */
+ case 0x1c: /* FCVTAS */
+ case 0x3a: /* FCVTPS */
+ case 0x3b: /* FCVTZS */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_tosls(tcg_res, tcg_op,
+ tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x5a: /* FCVTNU */
+ case 0x5b: /* FCVTMU */
+ case 0x5c: /* FCVTAU */
+ case 0x7a: /* FCVTPU */
+ case 0x7b: /* FCVTZU */
+ {
+ TCGv_i32 tcg_shift = tcg_const_i32(0);
+ gen_helper_vfp_touls(tcg_res, tcg_op,
+ tcg_shift, tcg_fpstatus);
+ tcg_temp_free_i32(tcg_shift);
+ break;
+ }
+ case 0x18: /* FRINTN */
+ case 0x19: /* FRINTM */
+ case 0x38: /* FRINTP */
+ case 0x39: /* FRINTZ */
+ case 0x58: /* FRINTA */
+ case 0x79: /* FRINTI */
+ gen_helper_rints(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x59: /* FRINTX */
+ gen_helper_rints_exact(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ case 0x7c: /* URSQRTE */
+ gen_helper_rsqrte_u32(tcg_res, tcg_op, tcg_fpstatus);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ /* Use helpers for 8 and 16 bit elements */
+ switch (opcode) {
+ case 0x5: /* CNT, RBIT */
+ /* For these two insns size is part of the opcode specifier
+ * (handled earlier); they always operate on byte elements.
+ */
+ if (u) {
+ gen_helper_neon_rbit_u8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_cnt_u8(tcg_res, tcg_op);
+ }
+ break;
+ case 0x7: /* SQABS, SQNEG */
+ {
+ NeonGenOneOpEnvFn *genfn;
+ static NeonGenOneOpEnvFn * const fns[2][2] = {
+ { gen_helper_neon_qabs_s8, gen_helper_neon_qneg_s8 },
+ { gen_helper_neon_qabs_s16, gen_helper_neon_qneg_s16 },
+ };
+ genfn = fns[size][u];
+ genfn(tcg_res, cpu_env, tcg_op);
+ break;
+ }
+ case 0x8: /* CMGT, CMGE */
+ case 0x9: /* CMEQ, CMLE */
+ case 0xa: /* CMLT */
+ {
+ static NeonGenTwoOpFn * const fns[3][2] = {
+ { gen_helper_neon_cgt_s8, gen_helper_neon_cgt_s16 },
+ { gen_helper_neon_cge_s8, gen_helper_neon_cge_s16 },
+ { gen_helper_neon_ceq_u8, gen_helper_neon_ceq_u16 },
+ };
+ NeonGenTwoOpFn *genfn;
+ int comp;
+ bool reverse;
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+
+ /* comp = index into [CMGT, CMGE, CMEQ, CMLE, CMLT] */
+ comp = (opcode - 0x8) * 2 + u;
+ /* ...but LE, LT are implemented as reverse GE, GT */
+ reverse = (comp > 2);
+ if (reverse) {
+ comp = 4 - comp;
+ }
+ genfn = fns[comp][size];
+ if (reverse) {
+ genfn(tcg_res, tcg_zero, tcg_op);
+ } else {
+ genfn(tcg_res, tcg_op, tcg_zero);
+ }
+ tcg_temp_free_i32(tcg_zero);
+ break;
+ }
+ case 0xb: /* ABS, NEG */
+ if (u) {
+ TCGv_i32 tcg_zero = tcg_const_i32(0);
+ if (size) {
+ gen_helper_neon_sub_u16(tcg_res, tcg_zero, tcg_op);
+ } else {
+ gen_helper_neon_sub_u8(tcg_res, tcg_zero, tcg_op);
+ }
+ tcg_temp_free_i32(tcg_zero);
+ } else {
+ if (size) {
+ gen_helper_neon_abs_s16(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_abs_s8(tcg_res, tcg_op);
+ }
+ }
+ break;
+ case 0x4: /* CLS, CLZ */
+ if (u) {
+ if (size == 0) {
+ gen_helper_neon_clz_u8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_clz_u16(tcg_res, tcg_op);
+ }
+ } else {
+ if (size == 0) {
+ gen_helper_neon_cls_s8(tcg_res, tcg_op);
+ } else {
+ gen_helper_neon_cls_s16(tcg_res, tcg_op);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_op);
+ }
+ }
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+
+ if (need_rmode) {
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ }
+ if (need_fpstatus) {
+ tcg_temp_free_ptr(tcg_fpstatus);
+ }
+}
+
+/* C3.6.13 AdvSIMD scalar x indexed element
+ * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
+ * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * | 0 1 | U | 1 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
+ * +-----+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * C3.6.18 AdvSIMD vector x indexed element
+ * 31 30 29 28 24 23 22 21 20 19 16 15 12 11 10 9 5 4 0
+ * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
+ * | 0 | Q | U | 0 1 1 1 1 | size | L | M | Rm | opc | H | 0 | Rn | Rd |
+ * +---+---+---+-----------+------+---+---+------+-----+---+---+------+------+
+ */
+static void disas_simd_indexed(DisasContext *s, uint32_t insn)
+{
+ /* This encoding has two kinds of instruction:
+ * normal, where we perform elt x idxelt => elt for each
+ * element in the vector
+ * long, where we perform elt x idxelt and generate a result of
+ * double the width of the input element
+ * The long ops have a 'part' specifier (ie come in INSN, INSN2 pairs).
+ */
+ bool is_scalar = extract32(insn, 28, 1);
+ bool is_q = extract32(insn, 30, 1);
+ bool u = extract32(insn, 29, 1);
+ int size = extract32(insn, 22, 2);
+ int l = extract32(insn, 21, 1);
+ int m = extract32(insn, 20, 1);
+ /* Note that the Rm field here is only 4 bits, not 5 as it usually is */
+ int rm = extract32(insn, 16, 4);
+ int opcode = extract32(insn, 12, 4);
+ int h = extract32(insn, 11, 1);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ bool is_long = false;
+ bool is_fp = false;
+ int index;
+ TCGv_ptr fpst;
+
+ switch (opcode) {
+ case 0x0: /* MLA */
+ case 0x4: /* MLS */
+ if (!u || is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ case 0xa: /* SMULL, SMULL2, UMULL, UMULL2 */
+ if (is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_long = true;
+ break;
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ case 0xb: /* SQDMULL, SQDMULL2 */
+ is_long = true;
+ /* fall through */
+ case 0xc: /* SQDMULH */
+ case 0xd: /* SQRDMULH */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x8: /* MUL */
+ if (u || is_scalar) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
+ case 0x1: /* FMLA */
+ case 0x5: /* FMLS */
+ if (u) {
+ unallocated_encoding(s);
+ return;
+ }
+ /* fall through */
+ case 0x9: /* FMUL, FMULX */
+ if (!extract32(size, 1, 1)) {
+ unallocated_encoding(s);
+ return;
+ }
+ is_fp = true;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (is_fp) {
+ /* low bit of size indicates single/double */
+ size = extract32(size, 0, 1) ? 3 : 2;
+ if (size == 2) {
+ index = h << 1 | l;
+ } else {
+ if (l || !is_q) {
+ unallocated_encoding(s);
+ return;
+ }
+ index = h;
+ }
+ rm |= (m << 4);
+ } else {
+ switch (size) {
+ case 1:
+ index = h << 2 | l << 1 | m;
+ break;
+ case 2:
+ index = h << 1 | l;
+ rm |= (m << 4);
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+ }
+
+ if (!fp_access_check(s)) {
+ return;
+ }
+
+ if (is_fp) {
+ fpst = get_fpstatus_ptr();
+ } else {
+ TCGV_UNUSED_PTR(fpst);
+ }
+
+ if (size == 3) {
+ TCGv_i64 tcg_idx = tcg_temp_new_i64();
+ int pass;
+
+ assert(is_fp && is_q && !is_long);
+
+ read_vec_element(s, tcg_idx, rm, index, MO_64);
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_res = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_op, rn, pass, MO_64);
+
+ switch (opcode) {
+ case 0x5: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negd(tcg_op, tcg_op);
+ /* fall through */
+ case 0x1: /* FMLA */
+ read_vec_element(s, tcg_res, rd, pass, MO_64);
+ gen_helper_vfp_muladdd(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
+ break;
+ case 0x9: /* FMUL, FMULX */
+ if (u) {
+ gen_helper_vfp_mulxd(tcg_res, tcg_op, tcg_idx, fpst);
+ } else {
+ gen_helper_vfp_muld(tcg_res, tcg_op, tcg_idx, fpst);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ write_vec_element(s, tcg_res, rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+ }
+
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+
+ tcg_temp_free_i64(tcg_idx);
+ } else if (!is_long) {
+ /* 32 bit floating point, or 16 or 32 bit integer.
+ * For the 16 bit scalar case we use the usual Neon helpers and
+ * rely on the fact that 0 op 0 == 0 with no side effects.
+ */
+ TCGv_i32 tcg_idx = tcg_temp_new_i32();
+ int pass, maxpasses;
+
+ if (is_scalar) {
+ maxpasses = 1;
+ } else {
+ maxpasses = is_q ? 4 : 2;
+ }
+
+ read_vec_element_i32(s, tcg_idx, rm, index, size);
+
+ if (size == 1 && !is_scalar) {
+ /* The simplest way to handle the 16x16 indexed ops is to duplicate
+ * the index into both halves of the 32 bit tcg_idx and then use
+ * the usual Neon helpers.
+ */
+ tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
+ }
+
+ for (pass = 0; pass < maxpasses; pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i32 tcg_res = tcg_temp_new_i32();
+
+ read_vec_element_i32(s, tcg_op, rn, pass, is_scalar ? size : MO_32);
+
+ switch (opcode) {
+ case 0x0: /* MLA */
+ case 0x4: /* MLS */
+ case 0x8: /* MUL */
+ {
+ static NeonGenTwoOpFn * const fns[2][2] = {
+ { gen_helper_neon_add_u16, gen_helper_neon_sub_u16 },
+ { tcg_gen_add_i32, tcg_gen_sub_i32 },
+ };
+ NeonGenTwoOpFn *genfn;
+ bool is_sub = opcode == 0x4;
+
+ if (size == 1) {
+ gen_helper_neon_mul_u16(tcg_res, tcg_op, tcg_idx);
+ } else {
+ tcg_gen_mul_i32(tcg_res, tcg_op, tcg_idx);
+ }
+ if (opcode == 0x8) {
+ break;
+ }
+ read_vec_element_i32(s, tcg_op, rd, pass, MO_32);
+ genfn = fns[size - 1][is_sub];
+ genfn(tcg_res, tcg_op, tcg_res);
+ break;
+ }
+ case 0x5: /* FMLS */
+ /* As usual for ARM, separate negation for fused multiply-add */
+ gen_helper_vfp_negs(tcg_op, tcg_op);
+ /* fall through */
+ case 0x1: /* FMLA */
+ read_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ gen_helper_vfp_muladds(tcg_res, tcg_op, tcg_idx, tcg_res, fpst);
+ break;
+ case 0x9: /* FMUL, FMULX */
+ if (u) {
+ gen_helper_vfp_mulxs(tcg_res, tcg_op, tcg_idx, fpst);
+ } else {
+ gen_helper_vfp_muls(tcg_res, tcg_op, tcg_idx, fpst);
+ }
+ break;
+ case 0xc: /* SQDMULH */
+ if (size == 1) {
+ gen_helper_neon_qdmulh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_qdmulh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ }
+ break;
+ case 0xd: /* SQRDMULH */
+ if (size == 1) {
+ gen_helper_neon_qrdmulh_s16(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_qrdmulh_s32(tcg_res, cpu_env,
+ tcg_op, tcg_idx);
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (is_scalar) {
+ write_fp_sreg(s, rd, tcg_res);
+ } else {
+ write_vec_element_i32(s, tcg_res, rd, pass, MO_32);
+ }
+
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ tcg_temp_free_i32(tcg_idx);
+
+ if (!is_q) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ /* long ops: 16x16->32 or 32x32->64 */
+ TCGv_i64 tcg_res[2];
+ int pass;
+ bool satop = extract32(opcode, 0, 1);
+ TCGMemOp memop = MO_32;
+
+ if (satop || !u) {
+ memop |= MO_SIGN;
+ }
+
+ if (size == 2) {
+ TCGv_i64 tcg_idx = tcg_temp_new_i64();
+
+ read_vec_element(s, tcg_idx, rm, index, memop);
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i64 tcg_op = tcg_temp_new_i64();
+ TCGv_i64 tcg_passres;
+ int passelt;
+
+ if (is_scalar) {
+ passelt = 0;
+ } else {
+ passelt = pass + (is_q * 2);
+ }
+
+ read_vec_element(s, tcg_op, rn, passelt, memop);
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ if (opcode == 0xa || opcode == 0xb) {
+ /* Non-accumulating ops */
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ tcg_gen_mul_i64(tcg_passres, tcg_op, tcg_idx);
+ tcg_temp_free_i64(tcg_op);
+
+ if (satop) {
+ /* saturating, doubling */
+ gen_helper_neon_addl_saturate_s64(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ }
+
+ if (opcode == 0xa || opcode == 0xb) {
+ continue;
+ }
+
+ /* Accumulating op: handle accumulate step */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+
+ switch (opcode) {
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ tcg_gen_add_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ break;
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ tcg_gen_sub_i64(tcg_res[pass], tcg_res[pass], tcg_passres);
+ break;
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ tcg_gen_neg_i64(tcg_passres, tcg_passres);
+ /* fall through */
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ gen_helper_neon_addl_saturate_s64(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ tcg_temp_free_i64(tcg_idx);
+
+ if (is_scalar) {
+ clear_vec_high(s, rd);
+ }
+ } else {
+ TCGv_i32 tcg_idx = tcg_temp_new_i32();
+
+ assert(size == 1);
+ read_vec_element_i32(s, tcg_idx, rm, index, size);
+
+ if (!is_scalar) {
+ /* The simplest way to handle the 16x16 indexed ops is to
+ * duplicate the index into both halves of the 32 bit tcg_idx
+ * and then use the usual Neon helpers.
+ */
+ tcg_gen_deposit_i32(tcg_idx, tcg_idx, tcg_idx, 16, 16);
+ }
+
+ for (pass = 0; pass < (is_scalar ? 1 : 2); pass++) {
+ TCGv_i32 tcg_op = tcg_temp_new_i32();
+ TCGv_i64 tcg_passres;
+
+ if (is_scalar) {
+ read_vec_element_i32(s, tcg_op, rn, pass, size);
+ } else {
+ read_vec_element_i32(s, tcg_op, rn,
+ pass + (is_q * 2), MO_32);
+ }
+
+ tcg_res[pass] = tcg_temp_new_i64();
+
+ if (opcode == 0xa || opcode == 0xb) {
+ /* Non-accumulating ops */
+ tcg_passres = tcg_res[pass];
+ } else {
+ tcg_passres = tcg_temp_new_i64();
+ }
+
+ if (memop & MO_SIGN) {
+ gen_helper_neon_mull_s16(tcg_passres, tcg_op, tcg_idx);
+ } else {
+ gen_helper_neon_mull_u16(tcg_passres, tcg_op, tcg_idx);
+ }
+ if (satop) {
+ gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env,
+ tcg_passres, tcg_passres);
+ }
+ tcg_temp_free_i32(tcg_op);
+
+ if (opcode == 0xa || opcode == 0xb) {
+ continue;
+ }
+
+ /* Accumulating op: handle accumulate step */
+ read_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+
+ switch (opcode) {
+ case 0x2: /* SMLAL, SMLAL2, UMLAL, UMLAL2 */
+ gen_helper_neon_addl_u32(tcg_res[pass], tcg_res[pass],
+ tcg_passres);
+ break;
+ case 0x6: /* SMLSL, SMLSL2, UMLSL, UMLSL2 */
+ gen_helper_neon_subl_u32(tcg_res[pass], tcg_res[pass],
+ tcg_passres);
+ break;
+ case 0x7: /* SQDMLSL, SQDMLSL2 */
+ gen_helper_neon_negl_u32(tcg_passres, tcg_passres);
+ /* fall through */
+ case 0x3: /* SQDMLAL, SQDMLAL2 */
+ gen_helper_neon_addl_saturate_s32(tcg_res[pass], cpu_env,
+ tcg_res[pass],
+ tcg_passres);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i64(tcg_passres);
+ }
+ tcg_temp_free_i32(tcg_idx);
+
+ if (is_scalar) {
+ tcg_gen_ext32u_i64(tcg_res[0], tcg_res[0]);
+ }
+ }
+
+ if (is_scalar) {
+ tcg_res[1] = tcg_const_i64(0);
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ write_vec_element(s, tcg_res[pass], rd, pass, MO_64);
+ tcg_temp_free_i64(tcg_res[pass]);
+ }
+ }
+
+ if (!TCGV_IS_UNUSED_PTR(fpst)) {
+ tcg_temp_free_ptr(fpst);
+ }
+}
+
+/* C3.6.19 Crypto AES
+ * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----------------+------+-----------+--------+-----+------+------+
+ * | 0 1 0 0 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----------------+------+-----------+--------+-----+------+------+
+ */
+static void disas_crypto_aes(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ int decrypt;
+ TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_decrypt;
+ CryptoThreeOpEnvFn *genfn;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
+ || size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0x4: /* AESE */
+ decrypt = 0;
+ genfn = gen_helper_crypto_aese;
+ break;
+ case 0x6: /* AESMC */
+ decrypt = 0;
+ genfn = gen_helper_crypto_aesmc;
+ break;
+ case 0x5: /* AESD */
+ decrypt = 1;
+ genfn = gen_helper_crypto_aese;
+ break;
+ case 0x7: /* AESIMC */
+ decrypt = 1;
+ genfn = gen_helper_crypto_aesmc;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ /* Note that we convert the Vx register indexes into the
+ * index within the vfp.regs[] array, so we can share the
+ * helper with the AArch32 instructions.
+ */
+ tcg_rd_regno = tcg_const_i32(rd << 1);
+ tcg_rn_regno = tcg_const_i32(rn << 1);
+ tcg_decrypt = tcg_const_i32(decrypt);
+
+ genfn(cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_decrypt);
+
+ tcg_temp_free_i32(tcg_rd_regno);
+ tcg_temp_free_i32(tcg_rn_regno);
+ tcg_temp_free_i32(tcg_decrypt);
+}
+
+/* C3.6.20 Crypto three-reg SHA
+ * 31 24 23 22 21 20 16 15 14 12 11 10 9 5 4 0
+ * +-----------------+------+---+------+---+--------+-----+------+------+
+ * | 0 1 0 1 1 1 1 0 | size | 0 | Rm | 0 | opcode | 0 0 | Rn | Rd |
+ * +-----------------+------+---+------+---+--------+-----+------+------+
+ */
+static void disas_crypto_three_reg_sha(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 3);
+ int rm = extract32(insn, 16, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ CryptoThreeOpEnvFn *genfn;
+ TCGv_i32 tcg_rd_regno, tcg_rn_regno, tcg_rm_regno;
+ int feature = ARM_FEATURE_V8_SHA256;
+
+ if (size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0: /* SHA1C */
+ case 1: /* SHA1P */
+ case 2: /* SHA1M */
+ case 3: /* SHA1SU0 */
+ genfn = NULL;
+ feature = ARM_FEATURE_V8_SHA1;
+ break;
+ case 4: /* SHA256H */
+ genfn = gen_helper_crypto_sha256h;
+ break;
+ case 5: /* SHA256H2 */
+ genfn = gen_helper_crypto_sha256h2;
+ break;
+ case 6: /* SHA256SU1 */
+ genfn = gen_helper_crypto_sha256su1;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!arm_dc_feature(s, feature)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd_regno = tcg_const_i32(rd << 1);
+ tcg_rn_regno = tcg_const_i32(rn << 1);
+ tcg_rm_regno = tcg_const_i32(rm << 1);
+
+ if (genfn) {
+ genfn(cpu_env, tcg_rd_regno, tcg_rn_regno, tcg_rm_regno);
+ } else {
+ TCGv_i32 tcg_opcode = tcg_const_i32(opcode);
+
+ gen_helper_crypto_sha1_3reg(cpu_env, tcg_rd_regno,
+ tcg_rn_regno, tcg_rm_regno, tcg_opcode);
+ tcg_temp_free_i32(tcg_opcode);
+ }
+
+ tcg_temp_free_i32(tcg_rd_regno);
+ tcg_temp_free_i32(tcg_rn_regno);
+ tcg_temp_free_i32(tcg_rm_regno);
+}
+
+/* C3.6.21 Crypto two-reg SHA
+ * 31 24 23 22 21 17 16 12 11 10 9 5 4 0
+ * +-----------------+------+-----------+--------+-----+------+------+
+ * | 0 1 0 1 1 1 1 0 | size | 1 0 1 0 0 | opcode | 1 0 | Rn | Rd |
+ * +-----------------+------+-----------+--------+-----+------+------+
+ */
+static void disas_crypto_two_reg_sha(DisasContext *s, uint32_t insn)
+{
+ int size = extract32(insn, 22, 2);
+ int opcode = extract32(insn, 12, 5);
+ int rn = extract32(insn, 5, 5);
+ int rd = extract32(insn, 0, 5);
+ CryptoTwoOpEnvFn *genfn;
+ int feature;
+ TCGv_i32 tcg_rd_regno, tcg_rn_regno;
+
+ if (size != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ switch (opcode) {
+ case 0: /* SHA1H */
+ feature = ARM_FEATURE_V8_SHA1;
+ genfn = gen_helper_crypto_sha1h;
+ break;
+ case 1: /* SHA1SU1 */
+ feature = ARM_FEATURE_V8_SHA1;
+ genfn = gen_helper_crypto_sha1su1;
+ break;
+ case 2: /* SHA256SU0 */
+ feature = ARM_FEATURE_V8_SHA256;
+ genfn = gen_helper_crypto_sha256su0;
+ break;
+ default:
+ unallocated_encoding(s);
+ return;
+ }
+
+ if (!arm_dc_feature(s, feature)) {
+ unallocated_encoding(s);
+ return;
+ }
+
+ tcg_rd_regno = tcg_const_i32(rd << 1);
+ tcg_rn_regno = tcg_const_i32(rn << 1);
+
+ genfn(cpu_env, tcg_rd_regno, tcg_rn_regno);
+
+ tcg_temp_free_i32(tcg_rd_regno);
+ tcg_temp_free_i32(tcg_rn_regno);
+}
+
+/* C3.6 Data processing - SIMD, inc Crypto
+ *
+ * As the decode gets a little complex we are using a table based
+ * approach for this part of the decode.
+ */
+static const AArch64DecodeTable data_proc_simd[] = {
+ /* pattern , mask , fn */
+ { 0x0e200400, 0x9f200400, disas_simd_three_reg_same },
+ { 0x0e200000, 0x9f200c00, disas_simd_three_reg_diff },
+ { 0x0e200800, 0x9f3e0c00, disas_simd_two_reg_misc },
+ { 0x0e300800, 0x9f3e0c00, disas_simd_across_lanes },
+ { 0x0e000400, 0x9fe08400, disas_simd_copy },
+ { 0x0f000000, 0x9f000400, disas_simd_indexed }, /* vector indexed */
+ /* simd_mod_imm decode is a subset of simd_shift_imm, so must precede it */
+ { 0x0f000400, 0x9ff80400, disas_simd_mod_imm },
+ { 0x0f000400, 0x9f800400, disas_simd_shift_imm },
+ { 0x0e000000, 0xbf208c00, disas_simd_tb },
+ { 0x0e000800, 0xbf208c00, disas_simd_zip_trn },
+ { 0x2e000000, 0xbf208400, disas_simd_ext },
+ { 0x5e200400, 0xdf200400, disas_simd_scalar_three_reg_same },
+ { 0x5e200000, 0xdf200c00, disas_simd_scalar_three_reg_diff },
+ { 0x5e200800, 0xdf3e0c00, disas_simd_scalar_two_reg_misc },
+ { 0x5e300800, 0xdf3e0c00, disas_simd_scalar_pairwise },
+ { 0x5e000400, 0xdfe08400, disas_simd_scalar_copy },
+ { 0x5f000000, 0xdf000400, disas_simd_indexed }, /* scalar indexed */
+ { 0x5f000400, 0xdf800400, disas_simd_scalar_shift_imm },
+ { 0x4e280800, 0xff3e0c00, disas_crypto_aes },
+ { 0x5e000000, 0xff208c00, disas_crypto_three_reg_sha },
+ { 0x5e280800, 0xff3e0c00, disas_crypto_two_reg_sha },
+ { 0x00000000, 0x00000000, NULL }
+};
+
+static void disas_data_proc_simd(DisasContext *s, uint32_t insn)
+{
+ /* Note that this is called with all non-FP cases from
+ * table C3-6 so it must UNDEF for entries not specifically
+ * allocated to instructions in that table.
+ */
+ AArch64DecodeFn *fn = lookup_disas_fn(&data_proc_simd[0], insn);
+ if (fn) {
+ fn(s, insn);
+ } else {
+ unallocated_encoding(s);
+ }
+}
+
+/* C3.6 Data processing - SIMD and floating point */
+static void disas_data_proc_simd_fp(DisasContext *s, uint32_t insn)
+{
+ if (extract32(insn, 28, 1) == 1 && extract32(insn, 30, 1) == 0) {
+ disas_data_proc_fp(s, insn);
+ } else {
+ /* SIMD, including crypto */
+ disas_data_proc_simd(s, insn);
+ }
+}
+
+/* C3.1 A64 instruction index by encoding */
+static void disas_a64_insn(CPUARMState *env, DisasContext *s)
+{
+ uint32_t insn;
+
+ insn = arm_ldl_code(env, s->pc, s->sctlr_b);
+ s->insn = insn;
+ s->pc += 4;
+
+ s->fp_access_checked = false;
+
+ switch (extract32(insn, 25, 4)) {
+ case 0x0: case 0x1: case 0x2: case 0x3: /* UNALLOCATED */
+ unallocated_encoding(s);
+ break;
+ case 0x8: case 0x9: /* Data processing - immediate */
+ disas_data_proc_imm(s, insn);
+ break;
+ case 0xa: case 0xb: /* Branch, exception generation and system insns */
+ disas_b_exc_sys(s, insn);
+ break;
+ case 0x4:
+ case 0x6:
+ case 0xc:
+ case 0xe: /* Loads and stores */
+ disas_ldst(s, insn);
+ break;
+ case 0x5:
+ case 0xd: /* Data processing - register */
+ disas_data_proc_reg(s, insn);
+ break;
+ case 0x7:
+ case 0xf: /* Data processing - SIMD and floating point */
+ disas_data_proc_simd_fp(s, insn);
+ break;
+ default:
+ assert(FALSE); /* all 15 cases should be handled above */
+ break;
+ }
+
+ /* if we allocated any temporaries, free them here */
+ free_tmp_a64(s);
+}
+
+void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
+{
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ DisasContext dc1, *dc = &dc1;
+ target_ulong pc_start;
+ target_ulong next_page_start;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+
+ dc->tb = tb;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->condjmp = 0;
+
+ dc->aarch64 = 1;
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
+ dc->thumb = 0;
+ dc->sctlr_b = 0;
+ dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
+ dc->condexec_mask = 0;
+ dc->condexec_cond = 0;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->tbi0 = ARM_TBFLAG_TBI0(tb->flags);
+ dc->tbi1 = ARM_TBFLAG_TBI1(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = (dc->current_el == 0);
+#endif
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
+ dc->vec_len = 0;
+ dc->vec_stride = 0;
+ dc->cp_regs = cpu->cp_regs;
+ dc->features = env->features;
+
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
+ dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
+ dc->is_ldex = false;
+ dc->ss_same_el = (arm_debug_target_el(env) == dc->current_el);
+
+ init_tmp_a64_array(dc);
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+
+ tcg_clear_temp_count();
+
+ do {
+ dc->insn_start_idx = tcg_op_buf_count();
+ tcg_gen_insn_start(dc->pc, 0, 0);
+ num_insns++;
+
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ CPUBreakpoint *bp;
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
+ if (bp->flags & BP_CPU) {
+ gen_a64_set_pc_im(dc->pc);
+ gen_helper_check_breakpoints(cpu_env);
+ /* End the TB early; it likely won't be executed */
+ dc->is_jmp = DISAS_UPDATE;
+ } else {
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ /* The address covered by the breakpoint must be
+ included in [tb->pc, tb->pc + tb->size) in order
+ to for it to be properly cleared -- thus we
+ increment the PC here so that the logic setting
+ tb->size below does the right thing. */
+ dc->pc += 4;
+ goto done_generating;
+ }
+ break;
+ }
+ }
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(num_insns == 1);
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
+ default_exception_el(dc));
+ dc->is_jmp = DISAS_EXC;
+ break;
+ }
+
+ disas_a64_insn(env, dc);
+
+ if (tcg_check_temp_count()) {
+ fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
+ dc->pc);
+ }
+
+ /* Translation stops when a conditional branch is encountered.
+ * Otherwise the subsequent code could get translated several times.
+ * Also stop translation when a page boundary is reached. This
+ * ensures prefetch aborts occur at the right place.
+ */
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
+ !cs->singlestep_enabled &&
+ !singlestep &&
+ !dc->ss_active &&
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ if (unlikely(cs->singlestep_enabled || dc->ss_active)
+ && dc->is_jmp != DISAS_EXC) {
+ /* Note that this means single stepping WFI doesn't halt the CPU.
+ * For conditional branch insns this is harmless unreachable code as
+ * gen_goto_tb() has already handled emitting the debug exception
+ * (and thus a tb-jump is not possible when singlestepping).
+ */
+ assert(dc->is_jmp != DISAS_TB_JUMP);
+ if (dc->is_jmp != DISAS_JUMP) {
+ gen_a64_set_pc_im(dc->pc);
+ }
+ if (cs->singlestep_enabled) {
+ gen_exception_internal(EXCP_DEBUG);
+ } else {
+ gen_step_complete_exception(dc);
+ }
+ } else {
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, dc->pc);
+ break;
+ default:
+ case DISAS_UPDATE:
+ gen_a64_set_pc_im(dc->pc);
+ /* fall through */
+ case DISAS_JUMP:
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ case DISAS_EXC:
+ case DISAS_SWI:
+ break;
+ case DISAS_WFE:
+ gen_a64_set_pc_im(dc->pc);
+ gen_helper_wfe(cpu_env);
+ break;
+ case DISAS_YIELD:
+ gen_a64_set_pc_im(dc->pc);
+ gen_helper_yield(cpu_env);
+ break;
+ case DISAS_WFI:
+ /* This is a special case because we don't want to just halt the CPU
+ * if trying to debug across a WFI.
+ */
+ gen_a64_set_pc_im(dc->pc);
+ gen_helper_wfi(cpu_env);
+ /* The helper doesn't necessarily throw an exception, but we
+ * must go back to the main loop to check for interrupts anyway.
+ */
+ tcg_gen_exit_tb(0);
+ break;
+ }
+ }
+
+done_generating:
+ gen_tb_end(tb, num_insns);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
+ qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc->pc - pc_start,
+ 4 | (bswap_code(dc->sctlr_b) ? 2 : 0));
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+}
diff --git a/target/arm/translate.c b/target/arm/translate.c
new file mode 100644
index 0000000000..0ad9070b45
--- /dev/null
+++ b/target/arm/translate.c
@@ -0,0 +1,12055 @@
+/*
+ * ARM translation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Copyright (c) 2007 OpenedHand, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "internals.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "qemu/bitops.h"
+#include "arm_ldst.h"
+#include "exec/semihost.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
+#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
+/* currently all emulated v5 cores are also v5TE, so don't bother */
+#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
+#define ENABLE_ARCH_5J 0
+#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
+#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
+#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
+#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
+#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
+
+#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
+
+#include "translate.h"
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(s) 1
+#else
+#define IS_USER(s) (s->user)
+#endif
+
+TCGv_env cpu_env;
+/* We reuse the same 64-bit temporaries for efficiency. */
+static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
+static TCGv_i32 cpu_R[16];
+TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
+TCGv_i64 cpu_exclusive_addr;
+TCGv_i64 cpu_exclusive_val;
+
+/* FIXME: These should be removed. */
+static TCGv_i32 cpu_F0s, cpu_F1s;
+static TCGv_i64 cpu_F0d, cpu_F1d;
+
+#include "exec/gen-icount.h"
+
+static const char *regnames[] =
+ { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
+
+/* initialize TCG globals. */
+void arm_translate_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUARMState, regs[i]),
+ regnames[i]);
+ }
+ cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
+ cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
+ cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
+ cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
+
+ cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
+ cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUARMState, exclusive_val), "exclusive_val");
+
+ a64_translate_init();
+}
+
+static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ * insns:
+ * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+ * otherwise, access as if at PL0.
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1E3:
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline TCGv_i32 load_cpu_offset(int offset)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp, cpu_env, offset);
+ return tmp;
+}
+
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
+
+static inline void store_cpu_offset(TCGv_i32 var, int offset)
+{
+ tcg_gen_st_i32(var, cpu_env, offset);
+ tcg_temp_free_i32(var);
+}
+
+#define store_cpu_field(var, name) \
+ store_cpu_offset(var, offsetof(CPUARMState, name))
+
+/* Set a variable to the value of a CPU register. */
+static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
+{
+ if (reg == 15) {
+ uint32_t addr;
+ /* normally, since we updated PC, we need only to add one insn */
+ if (s->thumb)
+ addr = (long)s->pc + 2;
+ else
+ addr = (long)s->pc + 4;
+ tcg_gen_movi_i32(var, addr);
+ } else {
+ tcg_gen_mov_i32(var, cpu_R[reg]);
+ }
+}
+
+/* Create a new temporary and set it to the value of a CPU register. */
+static inline TCGv_i32 load_reg(DisasContext *s, int reg)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ load_reg_var(s, tmp, reg);
+ return tmp;
+}
+
+/* Set a CPU register. The source must be a temporary and will be
+ marked as dead. */
+static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
+{
+ if (reg == 15) {
+ /* In Thumb mode, we must ignore bit 0.
+ * In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
+ * are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
+ * We choose to ignore [1:0] in ARM mode for all architecture versions.
+ */
+ tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
+ s->is_jmp = DISAS_JUMP;
+ }
+ tcg_gen_mov_i32(cpu_R[reg], var);
+ tcg_temp_free_i32(var);
+}
+
+/* Value extensions. */
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
+#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
+#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
+
+#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
+#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
+
+
+static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
+{
+ TCGv_i32 tmp_mask = tcg_const_i32(mask);
+ gen_helper_cpsr_write(cpu_env, var, tmp_mask);
+ tcg_temp_free_i32(tmp_mask);
+}
+/* Set NZCV flags from the high 4 bits of var. */
+#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
+
+static void gen_exception_internal(int excp)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+
+ assert(excp_is_internal(excp));
+ gen_helper_exception_internal(cpu_env, tcg_excp);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
+{
+ TCGv_i32 tcg_excp = tcg_const_i32(excp);
+ TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
+
+ tcg_temp_free_i32(tcg_el);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_excp);
+}
+
+static void gen_ss_advance(DisasContext *s)
+{
+ /* If the singlestep state is Active-not-pending, advance to
+ * Active-pending.
+ */
+ if (s->ss_active) {
+ s->pstate_ss = 0;
+ gen_helper_clear_pstate_ss(cpu_env);
+ }
+}
+
+static void gen_step_complete_exception(DisasContext *s)
+{
+ /* We just completed step of an insn. Move from Active-not-pending
+ * to Active-pending, and then also take the swstep exception.
+ * This corresponds to making the (IMPDEF) choice to prioritize
+ * swstep exceptions over asynchronous exceptions taken to an exception
+ * level where debug is disabled. This choice has the advantage that
+ * we do not need to maintain internal state corresponding to the
+ * ISV/EX syndrome bits between completion of the step and generation
+ * of the exception, and our syndrome information is always correct.
+ */
+ gen_ss_advance(s);
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
+ default_exception_el(s));
+ s->is_jmp = DISAS_EXC;
+}
+
+static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 tmp1 = tcg_temp_new_i32();
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(tmp1, a);
+ tcg_gen_ext16s_i32(tmp2, b);
+ tcg_gen_mul_i32(tmp1, tmp1, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_sari_i32(a, a, 16);
+ tcg_gen_sari_i32(b, b, 16);
+ tcg_gen_mul_i32(b, b, a);
+ tcg_gen_mov_i32(a, tmp1);
+ tcg_temp_free_i32(tmp1);
+}
+
+/* Byteswap each halfword. */
+static void gen_rev16(TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(tmp, var, 8);
+ tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
+ tcg_gen_shli_i32(var, var, 8);
+ tcg_gen_andi_i32(var, var, 0xff00ff00);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+/* Byteswap low halfword and sign extend. */
+static void gen_revsh(TCGv_i32 var)
+{
+ tcg_gen_ext16u_i32(var, var);
+ tcg_gen_bswap16_i32(var, var);
+ tcg_gen_ext16s_i32(var, var);
+}
+
+/* Unsigned bitfield extract. */
+static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
+{
+ if (shift)
+ tcg_gen_shri_i32(var, var, shift);
+ tcg_gen_andi_i32(var, var, mask);
+}
+
+/* Signed bitfield extract. */
+static void gen_sbfx(TCGv_i32 var, int shift, int width)
+{
+ uint32_t signbit;
+
+ if (shift)
+ tcg_gen_sari_i32(var, var, shift);
+ if (shift + width < 32) {
+ signbit = 1u << (width - 1);
+ tcg_gen_andi_i32(var, var, (1u << width) - 1);
+ tcg_gen_xori_i32(var, var, signbit);
+ tcg_gen_subi_i32(var, var, signbit);
+ }
+}
+
+/* Return (b << 32) + a. Mark inputs as dead */
+static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
+{
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tmp64, b);
+ tcg_temp_free_i32(b);
+ tcg_gen_shli_i64(tmp64, tmp64, 32);
+ tcg_gen_add_i64(a, tmp64, a);
+
+ tcg_temp_free_i64(tmp64);
+ return a;
+}
+
+/* Return (b << 32) - a. Mark inputs as dead. */
+static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
+{
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(tmp64, b);
+ tcg_temp_free_i32(b);
+ tcg_gen_shli_i64(tmp64, tmp64, 32);
+ tcg_gen_sub_i64(a, tmp64, a);
+
+ tcg_temp_free_i64(tmp64);
+ return a;
+}
+
+/* 32x32->64 multiply. Marks inputs as dead. */
+static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+ TCGv_i64 ret;
+
+ tcg_gen_mulu2_i32(lo, hi, a, b);
+ tcg_temp_free_i32(a);
+ tcg_temp_free_i32(b);
+
+ ret = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(ret, lo, hi);
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+
+ return ret;
+}
+
+static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+ TCGv_i64 ret;
+
+ tcg_gen_muls2_i32(lo, hi, a, b);
+ tcg_temp_free_i32(a);
+ tcg_temp_free_i32(b);
+
+ ret = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(ret, lo, hi);
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+
+ return ret;
+}
+
+/* Swap low and high halfwords. */
+static void gen_swap_half(TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(tmp, var, 16);
+ tcg_gen_shli_i32(var, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
+ tmp = (t0 ^ t1) & 0x8000;
+ t0 &= ~0x8000;
+ t1 &= ~0x8000;
+ t0 = (t0 + t1) ^ tmp;
+ */
+
+static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andi_i32(tmp, tmp, 0x8000);
+ tcg_gen_andi_i32(t0, t0, ~0x8000);
+ tcg_gen_andi_i32(t1, t1, ~0x8000);
+ tcg_gen_add_i32(t0, t0, t1);
+ tcg_gen_xor_i32(t0, t0, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(t1);
+}
+
+/* Set CF to the top bit of var. */
+static void gen_set_CF_bit31(TCGv_i32 var)
+{
+ tcg_gen_shri_i32(cpu_CF, var, 31);
+}
+
+/* Set N and Z flags from var. */
+static inline void gen_logic_CC(TCGv_i32 var)
+{
+ tcg_gen_mov_i32(cpu_NF, var);
+ tcg_gen_mov_i32(cpu_ZF, var);
+}
+
+/* T0 += T1 + CF. */
+static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
+{
+ tcg_gen_add_i32(t0, t0, t1);
+ tcg_gen_add_i32(t0, t0, cpu_CF);
+}
+
+/* dest = T0 + T1 + CF. */
+static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ tcg_gen_add_i32(dest, t0, t1);
+ tcg_gen_add_i32(dest, dest, cpu_CF);
+}
+
+/* dest = T0 - T1 + CF - 1. */
+static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ tcg_gen_sub_i32(dest, t0, t1);
+ tcg_gen_add_i32(dest, dest, cpu_CF);
+ tcg_gen_subi_i32(dest, dest, 1);
+}
+
+/* dest = T0 + T1. Compute C, N, V and Z flags */
+static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
+/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
+static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ if (TCG_TARGET_HAS_add2_i32) {
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
+ } else {
+ TCGv_i64 q0 = tcg_temp_new_i64();
+ TCGv_i64 q1 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(q0, t0);
+ tcg_gen_extu_i32_i64(q1, t1);
+ tcg_gen_add_i64(q0, q0, q1);
+ tcg_gen_extu_i32_i64(q1, cpu_CF);
+ tcg_gen_add_i64(q0, q0, q1);
+ tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
+ tcg_temp_free_i64(q0);
+ tcg_temp_free_i64(q1);
+ }
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
+/* dest = T0 - T1. Compute C, N, V and Z flags */
+static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp;
+ tcg_gen_sub_i32(cpu_NF, t0, t1);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0, t1);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_temp_free_i32(tmp);
+ tcg_gen_mov_i32(dest, cpu_NF);
+}
+
+/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
+static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_not_i32(tmp, t1);
+ gen_adc_CC(dest, t0, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+#define GEN_SHIFT(name) \
+static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
+{ \
+ TCGv_i32 tmp1, tmp2, tmp3; \
+ tmp1 = tcg_temp_new_i32(); \
+ tcg_gen_andi_i32(tmp1, t1, 0xff); \
+ tmp2 = tcg_const_i32(0); \
+ tmp3 = tcg_const_i32(0x1f); \
+ tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
+ tcg_temp_free_i32(tmp3); \
+ tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
+ tcg_gen_##name##_i32(dest, tmp2, tmp1); \
+ tcg_temp_free_i32(tmp2); \
+ tcg_temp_free_i32(tmp1); \
+}
+GEN_SHIFT(shl)
+GEN_SHIFT(shr)
+#undef GEN_SHIFT
+
+static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 tmp1, tmp2;
+ tmp1 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp1, t1, 0xff);
+ tmp2 = tcg_const_i32(0x1f);
+ tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_sar_i32(dest, t0, tmp1);
+ tcg_temp_free_i32(tmp1);
+}
+
+static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
+{
+ TCGv_i32 c0 = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_neg_i32(tmp, src);
+ tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
+ tcg_temp_free_i32(c0);
+ tcg_temp_free_i32(tmp);
+}
+
+static void shifter_out_im(TCGv_i32 var, int shift)
+{
+ if (shift == 0) {
+ tcg_gen_andi_i32(cpu_CF, var, 1);
+ } else {
+ tcg_gen_shri_i32(cpu_CF, var, shift);
+ if (shift != 31) {
+ tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
+ }
+ }
+}
+
+/* Shift by immediate. Includes special handling for shift == 0. */
+static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
+ int shift, int flags)
+{
+ switch (shiftop) {
+ case 0: /* LSL */
+ if (shift != 0) {
+ if (flags)
+ shifter_out_im(var, 32 - shift);
+ tcg_gen_shli_i32(var, var, shift);
+ }
+ break;
+ case 1: /* LSR */
+ if (shift == 0) {
+ if (flags) {
+ tcg_gen_shri_i32(cpu_CF, var, 31);
+ }
+ tcg_gen_movi_i32(var, 0);
+ } else {
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ tcg_gen_shri_i32(var, var, shift);
+ }
+ break;
+ case 2: /* ASR */
+ if (shift == 0)
+ shift = 32;
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ if (shift == 32)
+ shift = 31;
+ tcg_gen_sari_i32(var, var, shift);
+ break;
+ case 3: /* ROR/RRX */
+ if (shift != 0) {
+ if (flags)
+ shifter_out_im(var, shift - 1);
+ tcg_gen_rotri_i32(var, var, shift); break;
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_CF, 31);
+ if (flags)
+ shifter_out_im(var, 0);
+ tcg_gen_shri_i32(var, var, 1);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ }
+};
+
+static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
+ TCGv_i32 shift, int flags)
+{
+ if (flags) {
+ switch (shiftop) {
+ case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
+ case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
+ }
+ } else {
+ switch (shiftop) {
+ case 0:
+ gen_shl(var, var, shift);
+ break;
+ case 1:
+ gen_shr(var, var, shift);
+ break;
+ case 2:
+ gen_sar(var, var, shift);
+ break;
+ case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
+ tcg_gen_rotr_i32(var, var, shift); break;
+ }
+ }
+ tcg_temp_free_i32(shift);
+}
+
+#define PAS_OP(pfx) \
+ switch (op2) { \
+ case 0: gen_pas_helper(glue(pfx,add16)); break; \
+ case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
+ case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
+ case 3: gen_pas_helper(glue(pfx,sub16)); break; \
+ case 4: gen_pas_helper(glue(pfx,add8)); break; \
+ case 7: gen_pas_helper(glue(pfx,sub8)); break; \
+ }
+static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_ptr tmp;
+
+ switch (op1) {
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
+ case 1:
+ tmp = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
+ PAS_OP(s)
+ tcg_temp_free_ptr(tmp);
+ break;
+ case 5:
+ tmp = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
+ PAS_OP(u)
+ tcg_temp_free_ptr(tmp);
+ break;
+#undef gen_pas_helper
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
+ case 2:
+ PAS_OP(q);
+ break;
+ case 3:
+ PAS_OP(sh);
+ break;
+ case 6:
+ PAS_OP(uq);
+ break;
+ case 7:
+ PAS_OP(uh);
+ break;
+#undef gen_pas_helper
+ }
+}
+#undef PAS_OP
+
+/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
+#define PAS_OP(pfx) \
+ switch (op1) { \
+ case 0: gen_pas_helper(glue(pfx,add8)); break; \
+ case 1: gen_pas_helper(glue(pfx,add16)); break; \
+ case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
+ case 4: gen_pas_helper(glue(pfx,sub8)); break; \
+ case 5: gen_pas_helper(glue(pfx,sub16)); break; \
+ case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
+ }
+static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_ptr tmp;
+
+ switch (op2) {
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
+ case 0:
+ tmp = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
+ PAS_OP(s)
+ tcg_temp_free_ptr(tmp);
+ break;
+ case 4:
+ tmp = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
+ PAS_OP(u)
+ tcg_temp_free_ptr(tmp);
+ break;
+#undef gen_pas_helper
+#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
+ case 1:
+ PAS_OP(q);
+ break;
+ case 2:
+ PAS_OP(sh);
+ break;
+ case 5:
+ PAS_OP(uq);
+ break;
+ case 6:
+ PAS_OP(uh);
+ break;
+#undef gen_pas_helper
+ }
+}
+#undef PAS_OP
+
+/*
+ * Generate a conditional based on ARM condition code cc.
+ * This is common between ARM and Aarch64 targets.
+ */
+void arm_test_cc(DisasCompare *cmp, int cc)
+{
+ TCGv_i32 value;
+ TCGCond cond;
+ bool global = true;
+
+ switch (cc) {
+ case 0: /* eq: Z */
+ case 1: /* ne: !Z */
+ cond = TCG_COND_EQ;
+ value = cpu_ZF;
+ break;
+
+ case 2: /* cs: C */
+ case 3: /* cc: !C */
+ cond = TCG_COND_NE;
+ value = cpu_CF;
+ break;
+
+ case 4: /* mi: N */
+ case 5: /* pl: !N */
+ cond = TCG_COND_LT;
+ value = cpu_NF;
+ break;
+
+ case 6: /* vs: V */
+ case 7: /* vc: !V */
+ cond = TCG_COND_LT;
+ value = cpu_VF;
+ break;
+
+ case 8: /* hi: C && !Z */
+ case 9: /* ls: !C || Z -> !(C && !Z) */
+ cond = TCG_COND_NE;
+ value = tcg_temp_new_i32();
+ global = false;
+ /* CF is 1 for C, so -CF is an all-bits-set mask for C;
+ ZF is non-zero for !Z; so AND the two subexpressions. */
+ tcg_gen_neg_i32(value, cpu_CF);
+ tcg_gen_and_i32(value, value, cpu_ZF);
+ break;
+
+ case 10: /* ge: N == V -> N ^ V == 0 */
+ case 11: /* lt: N != V -> N ^ V != 0 */
+ /* Since we're only interested in the sign bit, == 0 is >= 0. */
+ cond = TCG_COND_GE;
+ value = tcg_temp_new_i32();
+ global = false;
+ tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
+ break;
+
+ case 12: /* gt: !Z && N == V */
+ case 13: /* le: Z || N != V */
+ cond = TCG_COND_NE;
+ value = tcg_temp_new_i32();
+ global = false;
+ /* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
+ * the sign bit then AND with ZF to yield the result. */
+ tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
+ tcg_gen_sari_i32(value, value, 31);
+ tcg_gen_andc_i32(value, cpu_ZF, value);
+ break;
+
+ case 14: /* always */
+ case 15: /* always */
+ /* Use the ALWAYS condition, which will fold early.
+ * It doesn't matter what we use for the value. */
+ cond = TCG_COND_ALWAYS;
+ value = cpu_ZF;
+ goto no_invert;
+
+ default:
+ fprintf(stderr, "Bad condition code 0x%x\n", cc);
+ abort();
+ }
+
+ if (cc & 1) {
+ cond = tcg_invert_cond(cond);
+ }
+
+ no_invert:
+ cmp->cond = cond;
+ cmp->value = value;
+ cmp->value_global = global;
+}
+
+void arm_free_cc(DisasCompare *cmp)
+{
+ if (!cmp->value_global) {
+ tcg_temp_free_i32(cmp->value);
+ }
+}
+
+void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
+{
+ tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
+}
+
+void arm_gen_test_cc(int cc, TCGLabel *label)
+{
+ DisasCompare cmp;
+ arm_test_cc(&cmp, cc);
+ arm_jump_cc(&cmp, label);
+ arm_free_cc(&cmp);
+}
+
+static const uint8_t table_logic_cc[16] = {
+ 1, /* and */
+ 1, /* xor */
+ 0, /* sub */
+ 0, /* rsb */
+ 0, /* add */
+ 0, /* adc */
+ 0, /* sbc */
+ 0, /* rsc */
+ 1, /* andl */
+ 1, /* xorl */
+ 0, /* cmp */
+ 0, /* cmn */
+ 1, /* orr */
+ 1, /* mov */
+ 1, /* bic */
+ 1, /* mvn */
+};
+
+/* Set PC and Thumb state from an immediate address. */
+static inline void gen_bx_im(DisasContext *s, uint32_t addr)
+{
+ TCGv_i32 tmp;
+
+ s->is_jmp = DISAS_JUMP;
+ if (s->thumb != (addr & 1)) {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, addr & 1);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_gen_movi_i32(cpu_R[15], addr & ~1);
+}
+
+/* Set PC and Thumb state from var. var is marked as dead. */
+static inline void gen_bx(DisasContext *s, TCGv_i32 var)
+{
+ s->is_jmp = DISAS_JUMP;
+ tcg_gen_andi_i32(cpu_R[15], var, ~1);
+ tcg_gen_andi_i32(var, var, 1);
+ store_cpu_field(var, thumb);
+}
+
+/* Variant of store_reg which uses branch&exchange logic when storing
+ to r15 in ARM architecture v7 and above. The source must be a temporary
+ and will be marked as dead. */
+static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
+{
+ if (reg == 15 && ENABLE_ARCH_7) {
+ gen_bx(s, var);
+ } else {
+ store_reg(s, reg, var);
+ }
+}
+
+/* Variant of store_reg which uses branch&exchange logic when storing
+ * to r15 in ARM architecture v5T and above. This is used for storing
+ * the results of a LDR/LDM/POP into r15, and corresponds to the cases
+ * in the ARM ARM which use the LoadWritePC() pseudocode function. */
+static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
+{
+ if (reg == 15 && ENABLE_ARCH_5) {
+ gen_bx(s, var);
+ } else {
+ store_reg(s, reg, var);
+ }
+}
+
+#ifdef CONFIG_USER_ONLY
+#define IS_USER_ONLY 1
+#else
+#define IS_USER_ONLY 0
+#endif
+
+/* Abstractions of "generate code to do a guest load/store for
+ * AArch32", where a vaddr is always 32 bits (and is zero
+ * extended if we're a 64 bit core) and data is also
+ * 32 bits unless specifically doing a 64 bit access.
+ * These functions work like tcg_gen_qemu_{ld,st}* except
+ * that the address argument is TCGv_i32 rather than TCGv.
+ */
+
+static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, TCGMemOp op)
+{
+ TCGv addr = tcg_temp_new();
+ tcg_gen_extu_i32_tl(addr, a32);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
+ tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
+ }
+ return addr;
+}
+
+static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_ld_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
+
+static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_st_i32(val, addr, index, opc);
+ tcg_temp_free(addr);
+}
+
+#define DO_GEN_LD(SUFF, OPC) \
+static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+{ \
+ gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
+}
+
+#define DO_GEN_ST(SUFF, OPC) \
+static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+{ \
+ gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
+}
+
+static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
+{
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ tcg_gen_rotri_i64(val, val, 32);
+ }
+}
+
+static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+ tcg_gen_qemu_ld_i64(val, addr, index, opc);
+ gen_aa32_frob64(s, val);
+ tcg_temp_free(addr);
+}
+
+static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
+}
+
+static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, TCGMemOp opc)
+{
+ TCGv addr = gen_aa32_addr(s, a32, opc);
+
+ /* Not needed for user-mode BE32, where we use MO_BE instead. */
+ if (!IS_USER_ONLY && s->sctlr_b) {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_rotri_i64(tmp, val, 32);
+ tcg_gen_qemu_st_i64(tmp, addr, index, opc);
+ tcg_temp_free_i64(tmp);
+ } else {
+ tcg_gen_qemu_st_i64(val, addr, index, opc);
+ }
+ tcg_temp_free(addr);
+}
+
+static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
+}
+
+DO_GEN_LD(8s, MO_SB)
+DO_GEN_LD(8u, MO_UB)
+DO_GEN_LD(16s, MO_SW)
+DO_GEN_LD(16u, MO_UW)
+DO_GEN_LD(32u, MO_UL)
+DO_GEN_ST(8, MO_UB)
+DO_GEN_ST(16, MO_UW)
+DO_GEN_ST(32, MO_UL)
+
+static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
+{
+ tcg_gen_movi_i32(cpu_R[15], val);
+}
+
+static inline void gen_hvc(DisasContext *s, int imm16)
+{
+ /* The pre HVC helper handles cases when HVC gets trapped
+ * as an undefined insn by runtime configuration (ie before
+ * the insn really executes).
+ */
+ gen_set_pc_im(s, s->pc - 4);
+ gen_helper_pre_hvc(cpu_env);
+ /* Otherwise we will treat this as a real exception which
+ * happens after execution of the insn. (The distinction matters
+ * for the PC value reported to the exception handler and also
+ * for single stepping.)
+ */
+ s->svc_imm = imm16;
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_HVC;
+}
+
+static inline void gen_smc(DisasContext *s)
+{
+ /* As with HVC, we may take an exception either before or after
+ * the insn executes.
+ */
+ TCGv_i32 tmp;
+
+ gen_set_pc_im(s, s->pc - 4);
+ tmp = tcg_const_i32(syn_aa32_smc());
+ gen_helper_pre_smc(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_SMC;
+}
+
+static inline void
+gen_set_condexec (DisasContext *s)
+{
+ if (s->condexec_mask) {
+ uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ store_cpu_field(tmp, condexec_bits);
+ }
+}
+
+static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - offset);
+ gen_exception_internal(excp);
+ s->is_jmp = DISAS_JUMP;
+}
+
+static void gen_exception_insn(DisasContext *s, int offset, int excp,
+ int syn, uint32_t target_el)
+{
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - offset);
+ gen_exception(excp, syn, target_el);
+ s->is_jmp = DISAS_JUMP;
+}
+
+/* Force a TB lookup after an instruction that changes the CPU state. */
+static inline void gen_lookup_tb(DisasContext *s)
+{
+ tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
+ s->is_jmp = DISAS_JUMP;
+}
+
+static inline void gen_hlt(DisasContext *s, int imm)
+{
+ /* HLT. This has two purposes.
+ * Architecturally, it is an external halting debug instruction.
+ * Since QEMU doesn't implement external debug, we treat this as
+ * it is required for halting debug disabled: it will UNDEF.
+ * Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
+ * and "HLT 0xF000" is an A32 semihosting syscall. These traps
+ * must trigger semihosting even for ARMv7 and earlier, where
+ * HLT was an undefined encoding.
+ * In system mode, we don't allow userspace access to
+ * semihosting, to provide some semblance of security
+ * (and for consistency with our 32-bit semihosting).
+ */
+ if (semihosting_enabled() &&
+#ifndef CONFIG_USER_ONLY
+ s->current_el != 0 &&
+#endif
+ (imm == (s->thumb ? 0x3c : 0xf000))) {
+ gen_exception_internal_insn(s, 0, EXCP_SEMIHOST);
+ return;
+ }
+
+ gen_exception_insn(s, s->thumb ? 2 : 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
+static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
+ TCGv_i32 var)
+{
+ int val, rm, shift, shiftop;
+ TCGv_i32 offset;
+
+ if (!(insn & (1 << 25))) {
+ /* immediate */
+ val = insn & 0xfff;
+ if (!(insn & (1 << 23)))
+ val = -val;
+ if (val != 0)
+ tcg_gen_addi_i32(var, var, val);
+ } else {
+ /* shift/register */
+ rm = (insn) & 0xf;
+ shift = (insn >> 7) & 0x1f;
+ shiftop = (insn >> 5) & 3;
+ offset = load_reg(s, rm);
+ gen_arm_shift_im(offset, shiftop, shift, 0);
+ if (!(insn & (1 << 23)))
+ tcg_gen_sub_i32(var, var, offset);
+ else
+ tcg_gen_add_i32(var, var, offset);
+ tcg_temp_free_i32(offset);
+ }
+}
+
+static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
+ int extra, TCGv_i32 var)
+{
+ int val, rm;
+ TCGv_i32 offset;
+
+ if (insn & (1 << 22)) {
+ /* immediate */
+ val = (insn & 0xf) | ((insn >> 4) & 0xf0);
+ if (!(insn & (1 << 23)))
+ val = -val;
+ val += extra;
+ if (val != 0)
+ tcg_gen_addi_i32(var, var, val);
+ } else {
+ /* register */
+ if (extra)
+ tcg_gen_addi_i32(var, var, extra);
+ rm = (insn) & 0xf;
+ offset = load_reg(s, rm);
+ if (!(insn & (1 << 23)))
+ tcg_gen_sub_i32(var, var, offset);
+ else
+ tcg_gen_add_i32(var, var, offset);
+ tcg_temp_free_i32(offset);
+ }
+}
+
+static TCGv_ptr get_fpstatus_ptr(int neon)
+{
+ TCGv_ptr statusptr = tcg_temp_new_ptr();
+ int offset;
+ if (neon) {
+ offset = offsetof(CPUARMState, vfp.standard_fp_status);
+ } else {
+ offset = offsetof(CPUARMState, vfp.fp_status);
+ }
+ tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ return statusptr;
+}
+
+#define VFP_OP2(name) \
+static inline void gen_vfp_##name(int dp) \
+{ \
+ TCGv_ptr fpst = get_fpstatus_ptr(0); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
+ } \
+ tcg_temp_free_ptr(fpst); \
+}
+
+VFP_OP2(add)
+VFP_OP2(sub)
+VFP_OP2(mul)
+VFP_OP2(div)
+
+#undef VFP_OP2
+
+static inline void gen_vfp_F1_mul(int dp)
+{
+ /* Like gen_vfp_mul() but put result in F1 */
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ if (dp) {
+ gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
+ } else {
+ gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
+ }
+ tcg_temp_free_ptr(fpst);
+}
+
+static inline void gen_vfp_F1_neg(int dp)
+{
+ /* Like gen_vfp_neg() but put result in F1 */
+ if (dp) {
+ gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
+ } else {
+ gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
+ }
+}
+
+static inline void gen_vfp_abs(int dp)
+{
+ if (dp)
+ gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
+ else
+ gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
+}
+
+static inline void gen_vfp_neg(int dp)
+{
+ if (dp)
+ gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
+ else
+ gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
+}
+
+static inline void gen_vfp_sqrt(int dp)
+{
+ if (dp)
+ gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
+}
+
+static inline void gen_vfp_cmp(int dp)
+{
+ if (dp)
+ gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
+ else
+ gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
+}
+
+static inline void gen_vfp_cmpe(int dp)
+{
+ if (dp)
+ gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
+ else
+ gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
+}
+
+static inline void gen_vfp_F1_ld0(int dp)
+{
+ if (dp)
+ tcg_gen_movi_i64(cpu_F1d, 0);
+ else
+ tcg_gen_movi_i32(cpu_F1s, 0);
+}
+
+#define VFP_GEN_ITOF(name) \
+static inline void gen_vfp_##name(int dp, int neon) \
+{ \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
+ } \
+ tcg_temp_free_ptr(statusptr); \
+}
+
+VFP_GEN_ITOF(uito)
+VFP_GEN_ITOF(sito)
+#undef VFP_GEN_ITOF
+
+#define VFP_GEN_FTOI(name) \
+static inline void gen_vfp_##name(int dp, int neon) \
+{ \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
+ } \
+ tcg_temp_free_ptr(statusptr); \
+}
+
+VFP_GEN_FTOI(toui)
+VFP_GEN_FTOI(touiz)
+VFP_GEN_FTOI(tosi)
+VFP_GEN_FTOI(tosiz)
+#undef VFP_GEN_FTOI
+
+#define VFP_GEN_FIX(name, round) \
+static inline void gen_vfp_##name(int dp, int shift, int neon) \
+{ \
+ TCGv_i32 tmp_shift = tcg_const_i32(shift); \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
+ if (dp) { \
+ gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
+ statusptr); \
+ } else { \
+ gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
+ statusptr); \
+ } \
+ tcg_temp_free_i32(tmp_shift); \
+ tcg_temp_free_ptr(statusptr); \
+}
+VFP_GEN_FIX(tosh, _round_to_zero)
+VFP_GEN_FIX(tosl, _round_to_zero)
+VFP_GEN_FIX(touh, _round_to_zero)
+VFP_GEN_FIX(toul, _round_to_zero)
+VFP_GEN_FIX(shto, )
+VFP_GEN_FIX(slto, )
+VFP_GEN_FIX(uhto, )
+VFP_GEN_FIX(ulto, )
+#undef VFP_GEN_FIX
+
+static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
+{
+ if (dp) {
+ gen_aa32_ld64(s, cpu_F0d, addr, get_mem_index(s));
+ } else {
+ gen_aa32_ld32u(s, cpu_F0s, addr, get_mem_index(s));
+ }
+}
+
+static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
+{
+ if (dp) {
+ gen_aa32_st64(s, cpu_F0d, addr, get_mem_index(s));
+ } else {
+ gen_aa32_st32(s, cpu_F0s, addr, get_mem_index(s));
+ }
+}
+
+static inline long
+vfp_reg_offset (int dp, int reg)
+{
+ if (dp)
+ return offsetof(CPUARMState, vfp.regs[reg]);
+ else if (reg & 1) {
+ return offsetof(CPUARMState, vfp.regs[reg >> 1])
+ + offsetof(CPU_DoubleU, l.upper);
+ } else {
+ return offsetof(CPUARMState, vfp.regs[reg >> 1])
+ + offsetof(CPU_DoubleU, l.lower);
+ }
+}
+
+/* Return the offset of a 32-bit piece of a NEON register.
+ zero is the least significant end of the register. */
+static inline long
+neon_reg_offset (int reg, int n)
+{
+ int sreg;
+ sreg = reg * 2 + n;
+ return vfp_reg_offset(0, sreg);
+}
+
+static TCGv_i32 neon_load_reg(int reg, int pass)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
+ return tmp;
+}
+
+static void neon_store_reg(int reg, int pass, TCGv_i32 var)
+{
+ tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
+ tcg_temp_free_i32(var);
+}
+
+static inline void neon_load_reg64(TCGv_i64 var, int reg)
+{
+ tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
+}
+
+static inline void neon_store_reg64(TCGv_i64 var, int reg)
+{
+ tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
+}
+
+#define tcg_gen_ld_f32 tcg_gen_ld_i32
+#define tcg_gen_ld_f64 tcg_gen_ld_i64
+#define tcg_gen_st_f32 tcg_gen_st_i32
+#define tcg_gen_st_f64 tcg_gen_st_i64
+
+static inline void gen_mov_F0_vreg(int dp, int reg)
+{
+ if (dp)
+ tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
+ else
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
+}
+
+static inline void gen_mov_F1_vreg(int dp, int reg)
+{
+ if (dp)
+ tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
+ else
+ tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
+}
+
+static inline void gen_mov_vreg_F0(int dp, int reg)
+{
+ if (dp)
+ tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
+ else
+ tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
+}
+
+#define ARM_CP_RW_BIT (1 << 20)
+
+static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
+{
+ tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
+}
+
+static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
+{
+ tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
+}
+
+static inline TCGv_i32 iwmmxt_load_creg(int reg)
+{
+ TCGv_i32 var = tcg_temp_new_i32();
+ tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
+ return var;
+}
+
+static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
+{
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
+ tcg_temp_free_i32(var);
+}
+
+static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
+{
+ iwmmxt_store_reg(cpu_M0, rn);
+}
+
+static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_M0, rn);
+}
+
+static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+#define IWMMXT_OP(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV_SIZE(name) \
+IWMMXT_OP_ENV(name##b) \
+IWMMXT_OP_ENV(name##w) \
+IWMMXT_OP_ENV(name##l)
+
+#define IWMMXT_OP_ENV1(name) \
+static inline void gen_op_iwmmxt_##name##_M0(void) \
+{ \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
+}
+
+IWMMXT_OP(maddsq)
+IWMMXT_OP(madduq)
+IWMMXT_OP(sadb)
+IWMMXT_OP(sadw)
+IWMMXT_OP(mulslw)
+IWMMXT_OP(mulshw)
+IWMMXT_OP(mululw)
+IWMMXT_OP(muluhw)
+IWMMXT_OP(macsw)
+IWMMXT_OP(macuw)
+
+IWMMXT_OP_ENV_SIZE(unpackl)
+IWMMXT_OP_ENV_SIZE(unpackh)
+
+IWMMXT_OP_ENV1(unpacklub)
+IWMMXT_OP_ENV1(unpackluw)
+IWMMXT_OP_ENV1(unpacklul)
+IWMMXT_OP_ENV1(unpackhub)
+IWMMXT_OP_ENV1(unpackhuw)
+IWMMXT_OP_ENV1(unpackhul)
+IWMMXT_OP_ENV1(unpacklsb)
+IWMMXT_OP_ENV1(unpacklsw)
+IWMMXT_OP_ENV1(unpacklsl)
+IWMMXT_OP_ENV1(unpackhsb)
+IWMMXT_OP_ENV1(unpackhsw)
+IWMMXT_OP_ENV1(unpackhsl)
+
+IWMMXT_OP_ENV_SIZE(cmpeq)
+IWMMXT_OP_ENV_SIZE(cmpgtu)
+IWMMXT_OP_ENV_SIZE(cmpgts)
+
+IWMMXT_OP_ENV_SIZE(mins)
+IWMMXT_OP_ENV_SIZE(minu)
+IWMMXT_OP_ENV_SIZE(maxs)
+IWMMXT_OP_ENV_SIZE(maxu)
+
+IWMMXT_OP_ENV_SIZE(subn)
+IWMMXT_OP_ENV_SIZE(addn)
+IWMMXT_OP_ENV_SIZE(subu)
+IWMMXT_OP_ENV_SIZE(addu)
+IWMMXT_OP_ENV_SIZE(subs)
+IWMMXT_OP_ENV_SIZE(adds)
+
+IWMMXT_OP_ENV(avgb0)
+IWMMXT_OP_ENV(avgb1)
+IWMMXT_OP_ENV(avgw0)
+IWMMXT_OP_ENV(avgw1)
+
+IWMMXT_OP_ENV(packuw)
+IWMMXT_OP_ENV(packul)
+IWMMXT_OP_ENV(packuq)
+IWMMXT_OP_ENV(packsw)
+IWMMXT_OP_ENV(packsl)
+IWMMXT_OP_ENV(packsq)
+
+static void gen_op_iwmmxt_set_mup(void)
+{
+ TCGv_i32 tmp;
+ tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
+ tcg_gen_ori_i32(tmp, tmp, 2);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
+}
+
+static void gen_op_iwmmxt_set_cup(void)
+{
+ TCGv_i32 tmp;
+ tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
+ tcg_gen_ori_i32(tmp, tmp, 1);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
+}
+
+static void gen_op_iwmmxt_setpsr_nz(void)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
+ store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
+}
+
+static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
+{
+ iwmmxt_load_reg(cpu_V1, rn);
+ tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
+ tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
+}
+
+static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
+ TCGv_i32 dest)
+{
+ int rd;
+ uint32_t offset;
+ TCGv_i32 tmp;
+
+ rd = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rd);
+
+ offset = (insn & 0xff) << ((insn >> 7) & 2);
+ if (insn & (1 << 24)) {
+ /* Pre indexed */
+ if (insn & (1 << 23))
+ tcg_gen_addi_i32(tmp, tmp, offset);
+ else
+ tcg_gen_addi_i32(tmp, tmp, -offset);
+ tcg_gen_mov_i32(dest, tmp);
+ if (insn & (1 << 21))
+ store_reg(s, rd, tmp);
+ else
+ tcg_temp_free_i32(tmp);
+ } else if (insn & (1 << 21)) {
+ /* Post indexed */
+ tcg_gen_mov_i32(dest, tmp);
+ if (insn & (1 << 23))
+ tcg_gen_addi_i32(tmp, tmp, offset);
+ else
+ tcg_gen_addi_i32(tmp, tmp, -offset);
+ store_reg(s, rd, tmp);
+ } else if (!(insn & (1 << 23)))
+ return 1;
+ return 0;
+}
+
+static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
+{
+ int rd = (insn >> 0) & 0xf;
+ TCGv_i32 tmp;
+
+ if (insn & (1 << 8)) {
+ if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
+ return 1;
+ } else {
+ tmp = iwmmxt_load_creg(rd);
+ }
+ } else {
+ tmp = tcg_temp_new_i32();
+ iwmmxt_load_reg(cpu_V0, rd);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
+ }
+ tcg_gen_andi_i32(tmp, tmp, mask);
+ tcg_gen_mov_i32(dest, tmp);
+ tcg_temp_free_i32(tmp);
+ return 0;
+}
+
+/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
+ (ie. an undefined instruction). */
+static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
+{
+ int rd, wrd;
+ int rdhi, rdlo, rd0, rd1, i;
+ TCGv_i32 addr;
+ TCGv_i32 tmp, tmp2, tmp3;
+
+ if ((insn & 0x0e000e00) == 0x0c000000) {
+ if ((insn & 0x0fe00ff0) == 0x0c400000) {
+ wrd = insn & 0xf;
+ rdlo = (insn >> 12) & 0xf;
+ rdhi = (insn >> 16) & 0xf;
+ if (insn & ARM_CP_RW_BIT) { /* TMRRC */
+ iwmmxt_load_reg(cpu_V0, wrd);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
+ } else { /* TMCRR */
+ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
+ iwmmxt_store_reg(cpu_V0, wrd);
+ gen_op_iwmmxt_set_mup();
+ }
+ return 0;
+ }
+
+ wrd = (insn >> 12) & 0xf;
+ addr = tcg_temp_new_i32();
+ if (gen_iwmmxt_address(s, insn, addr)) {
+ tcg_temp_free_i32(addr);
+ return 1;
+ }
+ if (insn & ARM_CP_RW_BIT) {
+ if ((insn >> 28) == 0xf) { /* WLDRW wCx */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ iwmmxt_store_creg(wrd, tmp);
+ } else {
+ i = 1;
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 22)) { /* WLDRD */
+ gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
+ i = 0;
+ } else { /* WLDRW wRd */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ }
+ } else {
+ tmp = tcg_temp_new_i32();
+ if (insn & (1 << 22)) { /* WLDRH */
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ } else { /* WLDRB */
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ }
+ }
+ if (i) {
+ tcg_gen_extu_i32_i64(cpu_M0, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ }
+ } else {
+ if ((insn >> 28) == 0xf) { /* WSTRW wCx */
+ tmp = iwmmxt_load_creg(wrd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ } else {
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = tcg_temp_new_i32();
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 22)) { /* WSTRD */
+ gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
+ } else { /* WSTRW wRd */
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ }
+ } else {
+ if (insn & (1 << 22)) { /* WSTRH */
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ } else { /* WSTRB */
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ }
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ return 0;
+ }
+
+ if ((insn & 0x0f000000) != 0x0e000000)
+ return 1;
+
+ switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
+ case 0x000: /* WOR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_orq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x011: /* TMCR */
+ if (insn & 0xf)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ switch (wrd) {
+ case ARM_IWMMXT_wCID:
+ case ARM_IWMMXT_wCASF:
+ break;
+ case ARM_IWMMXT_wCon:
+ gen_op_iwmmxt_set_cup();
+ /* Fall through. */
+ case ARM_IWMMXT_wCSSF:
+ tmp = iwmmxt_load_creg(wrd);
+ tmp2 = load_reg(s, rd);
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ iwmmxt_store_creg(wrd, tmp);
+ break;
+ case ARM_IWMMXT_wCGR0:
+ case ARM_IWMMXT_wCGR1:
+ case ARM_IWMMXT_wCGR2:
+ case ARM_IWMMXT_wCGR3:
+ gen_op_iwmmxt_set_cup();
+ tmp = load_reg(s, rd);
+ iwmmxt_store_creg(wrd, tmp);
+ break;
+ default:
+ return 1;
+ }
+ break;
+ case 0x100: /* WXOR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_xorq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x111: /* TMRC */
+ if (insn & 0xf)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ tmp = iwmmxt_load_creg(wrd);
+ store_reg(s, rd, tmp);
+ break;
+ case 0x300: /* WANDN */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tcg_gen_neg_i64(cpu_M0, cpu_M0);
+ gen_op_iwmmxt_andq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x200: /* WAND */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ gen_op_iwmmxt_andq_M0_wRn(rd1);
+ gen_op_iwmmxt_setpsr_nz();
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x810: case 0xa10: /* WMADD */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 0) & 0xf;
+ rd1 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maddsq_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_madduq_M0_wRn(rd1);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_unpackll_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 22))
+ gen_op_iwmmxt_sadw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_sadb_M0_wRn(rd1);
+ if (!(insn & (1 << 20)))
+ gen_op_iwmmxt_addl_M0_wRn(wrd);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21)) {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_mulshw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_mulslw_M0_wRn(rd1);
+ } else {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_muluhw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_mululw_M0_wRn(rd1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_macsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_macuw_M0_wRn(rd1);
+ if (!(insn & (1 << 20))) {
+ iwmmxt_load_reg(cpu_V1, wrd);
+ tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
+ break;
+ case 1:
+ gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
+ break;
+ case 2:
+ gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ if (insn & (1 << 22)) {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_avgw1_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_avgw0_M0_wRn(rd1);
+ } else {
+ if (insn & (1 << 20))
+ gen_op_iwmmxt_avgb1_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_avgb0_M0_wRn(rd1);
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
+ tcg_gen_andi_i32(tmp, tmp, 7);
+ iwmmxt_load_reg(cpu_V1, rd1);
+ gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
+ if (((insn >> 6) & 3) == 3)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rd);
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ switch ((insn >> 6) & 3) {
+ case 0:
+ tmp2 = tcg_const_i32(0xff);
+ tmp3 = tcg_const_i32((insn & 7) << 3);
+ break;
+ case 1:
+ tmp2 = tcg_const_i32(0xffff);
+ tmp3 = tcg_const_i32((insn & 3) << 4);
+ break;
+ case 2:
+ tmp2 = tcg_const_i32(0xffffffff);
+ tmp3 = tcg_const_i32((insn & 1) << 5);
+ break;
+ default:
+ TCGV_UNUSED_I32(tmp2);
+ TCGV_UNUSED_I32(tmp3);
+ }
+ gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ if (rd == 15 || ((insn >> 22) & 3) == 3)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = tcg_temp_new_i32();
+ switch ((insn >> 22) & 3) {
+ case 0:
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ if (insn & 8) {
+ tcg_gen_ext8s_i32(tmp, tmp);
+ } else {
+ tcg_gen_andi_i32(tmp, tmp, 0xff);
+ }
+ break;
+ case 1:
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ if (insn & 8) {
+ tcg_gen_ext16s_i32(tmp, tmp);
+ } else {
+ tcg_gen_andi_i32(tmp, tmp, 0xffff);
+ }
+ break;
+ case 2:
+ tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
+ tcg_gen_extrl_i64_i32(tmp, cpu_M0);
+ break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
+ if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
+ return 1;
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
+ break;
+ case 1:
+ tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
+ break;
+ case 2:
+ tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
+ break;
+ }
+ tcg_gen_shli_i32(tmp, tmp, 28);
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
+ if (((insn >> 6) & 3) == 3)
+ return 1;
+ rd = (insn >> 12) & 0xf;
+ wrd = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rd);
+ switch ((insn >> 6) & 3) {
+ case 0:
+ gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
+ break;
+ case 1:
+ gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
+ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
+ return 1;
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp2, tmp);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ for (i = 0; i < 7; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 4);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 1:
+ for (i = 0; i < 3; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 8);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 2:
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ break;
+ }
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
+ break;
+ case 1:
+ gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
+ break;
+ case 2:
+ gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
+ if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
+ return 1;
+ tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp2, tmp);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ for (i = 0; i < 7; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 4);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 1:
+ for (i = 0; i < 3; i ++) {
+ tcg_gen_shli_i32(tmp2, tmp2, 8);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ }
+ break;
+ case 2:
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ break;
+ }
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
+ rd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ switch ((insn >> 22) & 3) {
+ case 0:
+ gen_helper_iwmmxt_msbb(tmp, cpu_M0);
+ break;
+ case 1:
+ gen_helper_iwmmxt_msbw(tmp, cpu_M0);
+ break;
+ case 2:
+ gen_helper_iwmmxt_msbl(tmp, cpu_M0);
+ break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
+ case 0x906: case 0xb06: case 0xd06: case 0xf06:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
+ case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsb_M0();
+ else
+ gen_op_iwmmxt_unpacklub_M0();
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsw_M0();
+ else
+ gen_op_iwmmxt_unpackluw_M0();
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpacklsl_M0();
+ else
+ gen_op_iwmmxt_unpacklul_M0();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
+ case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsb_M0();
+ else
+ gen_op_iwmmxt_unpackhub_M0();
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsw_M0();
+ else
+ gen_op_iwmmxt_unpackhuw_M0();
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_unpackhsl_M0();
+ else
+ gen_op_iwmmxt_unpackhul_M0();
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
+ case 0x214: case 0x614: case 0xa14: case 0xe14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ switch ((insn >> 22) & 3) {
+ case 1:
+ gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
+ case 0x014: case 0x414: case 0x814: case 0xc14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ switch ((insn >> 22) & 3) {
+ case 1:
+ gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
+ case 0x114: case 0x514: case 0x914: case 0xd14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ switch ((insn >> 22) & 3) {
+ case 1:
+ gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
+ case 0x314: case 0x714: case 0xb14: case 0xf14:
+ if (((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_temp_new_i32();
+ switch ((insn >> 22) & 3) {
+ case 1:
+ if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 2:
+ if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ case 3:
+ if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
+ case 0x916: case 0xb16: case 0xd16: case 0xf16:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_minsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_minul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
+ case 0x816: case 0xa16: case 0xc16: case 0xe16:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 0:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsb_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxub_M0_wRn(rd1);
+ break;
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_maxsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_maxul_M0_wRn(rd1);
+ break;
+ case 3:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
+ case 0x402: case 0x502: case 0x602: case 0x702:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_const_i32((insn >> 20) & 3);
+ iwmmxt_load_reg(cpu_V1, rd1);
+ gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
+ case 0x41a: case 0x51a: case 0x61a: case 0x71a:
+ case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
+ case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 20) & 0xf) {
+ case 0x0:
+ gen_op_iwmmxt_subnb_M0_wRn(rd1);
+ break;
+ case 0x1:
+ gen_op_iwmmxt_subub_M0_wRn(rd1);
+ break;
+ case 0x3:
+ gen_op_iwmmxt_subsb_M0_wRn(rd1);
+ break;
+ case 0x4:
+ gen_op_iwmmxt_subnw_M0_wRn(rd1);
+ break;
+ case 0x5:
+ gen_op_iwmmxt_subuw_M0_wRn(rd1);
+ break;
+ case 0x7:
+ gen_op_iwmmxt_subsw_M0_wRn(rd1);
+ break;
+ case 0x8:
+ gen_op_iwmmxt_subnl_M0_wRn(rd1);
+ break;
+ case 0x9:
+ gen_op_iwmmxt_subul_M0_wRn(rd1);
+ break;
+ case 0xb:
+ gen_op_iwmmxt_subsl_M0_wRn(rd1);
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
+ case 0x41e: case 0x51e: case 0x61e: case 0x71e:
+ case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
+ case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
+ gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
+ case 0x418: case 0x518: case 0x618: case 0x718:
+ case 0x818: case 0x918: case 0xa18: case 0xb18:
+ case 0xc18: case 0xd18: case 0xe18: case 0xf18:
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 20) & 0xf) {
+ case 0x0:
+ gen_op_iwmmxt_addnb_M0_wRn(rd1);
+ break;
+ case 0x1:
+ gen_op_iwmmxt_addub_M0_wRn(rd1);
+ break;
+ case 0x3:
+ gen_op_iwmmxt_addsb_M0_wRn(rd1);
+ break;
+ case 0x4:
+ gen_op_iwmmxt_addnw_M0_wRn(rd1);
+ break;
+ case 0x5:
+ gen_op_iwmmxt_adduw_M0_wRn(rd1);
+ break;
+ case 0x7:
+ gen_op_iwmmxt_addsw_M0_wRn(rd1);
+ break;
+ case 0x8:
+ gen_op_iwmmxt_addnl_M0_wRn(rd1);
+ break;
+ case 0x9:
+ gen_op_iwmmxt_addul_M0_wRn(rd1);
+ break;
+ case 0xb:
+ gen_op_iwmmxt_addsl_M0_wRn(rd1);
+ break;
+ default:
+ return 1;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
+ case 0x408: case 0x508: case 0x608: case 0x708:
+ case 0x808: case 0x908: case 0xa08: case 0xb08:
+ case 0xc08: case 0xd08: case 0xe08: case 0xf08:
+ if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
+ return 1;
+ wrd = (insn >> 12) & 0xf;
+ rd0 = (insn >> 16) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ gen_op_iwmmxt_movq_M0_wRn(rd0);
+ switch ((insn >> 22) & 3) {
+ case 1:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsw_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packuw_M0_wRn(rd1);
+ break;
+ case 2:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsl_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packul_M0_wRn(rd1);
+ break;
+ case 3:
+ if (insn & (1 << 21))
+ gen_op_iwmmxt_packsq_M0_wRn(rd1);
+ else
+ gen_op_iwmmxt_packuq_M0_wRn(rd1);
+ break;
+ }
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ gen_op_iwmmxt_set_cup();
+ break;
+ case 0x201: case 0x203: case 0x205: case 0x207:
+ case 0x209: case 0x20b: case 0x20d: case 0x20f:
+ case 0x211: case 0x213: case 0x215: case 0x217:
+ case 0x219: case 0x21b: case 0x21d: case 0x21f:
+ wrd = (insn >> 5) & 0xf;
+ rd0 = (insn >> 12) & 0xf;
+ rd1 = (insn >> 0) & 0xf;
+ if (rd0 == 0xf || rd1 == 0xf)
+ return 1;
+ gen_op_iwmmxt_movq_M0_wRn(wrd);
+ tmp = load_reg(s, rd0);
+ tmp2 = load_reg(s, rd1);
+ switch ((insn >> 16) & 0xf) {
+ case 0x0: /* TMIA */
+ gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0x8: /* TMIAPH */
+ gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
+ if (insn & (1 << 16))
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ if (insn & (1 << 17))
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ default:
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ return 1;
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ gen_op_iwmmxt_movq_wRn_M0(wrd);
+ gen_op_iwmmxt_set_mup();
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
+ (ie. an undefined instruction). */
+static int disas_dsp_insn(DisasContext *s, uint32_t insn)
+{
+ int acc, rd0, rd1, rdhi, rdlo;
+ TCGv_i32 tmp, tmp2;
+
+ if ((insn & 0x0ff00f10) == 0x0e200010) {
+ /* Multiply with Internal Accumulate Format */
+ rd0 = (insn >> 12) & 0xf;
+ rd1 = insn & 0xf;
+ acc = (insn >> 5) & 7;
+
+ if (acc != 0)
+ return 1;
+
+ tmp = load_reg(s, rd0);
+ tmp2 = load_reg(s, rd1);
+ switch ((insn >> 16) & 0xf) {
+ case 0x0: /* MIA */
+ gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0x8: /* MIAPH */
+ gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ case 0xc: /* MIABB */
+ case 0xd: /* MIABT */
+ case 0xe: /* MIATB */
+ case 0xf: /* MIATT */
+ if (insn & (1 << 16))
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ if (insn & (1 << 17))
+ tcg_gen_shri_i32(tmp2, tmp2, 16);
+ gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
+ break;
+ default:
+ return 1;
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+
+ gen_op_iwmmxt_movq_wRn_M0(acc);
+ return 0;
+ }
+
+ if ((insn & 0x0fe00ff8) == 0x0c400000) {
+ /* Internal Accumulator Access Format */
+ rdhi = (insn >> 16) & 0xf;
+ rdlo = (insn >> 12) & 0xf;
+ acc = insn & 7;
+
+ if (acc != 0)
+ return 1;
+
+ if (insn & ARM_CP_RW_BIT) { /* MRA */
+ iwmmxt_load_reg(cpu_V0, acc);
+ tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0);
+ tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
+ } else { /* MAR */
+ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
+ iwmmxt_store_reg(cpu_V0, acc);
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
+#define VFP_SREG(insn, bigbit, smallbit) \
+ ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
+#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
+ if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
+ reg = (((insn) >> (bigbit)) & 0x0f) \
+ | (((insn) >> ((smallbit) - 4)) & 0x10); \
+ } else { \
+ if (insn & (1 << (smallbit))) \
+ return 1; \
+ reg = ((insn) >> (bigbit)) & 0x0f; \
+ }} while (0)
+
+#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
+#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
+#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
+#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
+#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
+#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
+
+/* Move between integer and VFP cores. */
+static TCGv_i32 gen_vfp_mrs(void)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp, cpu_F0s);
+ return tmp;
+}
+
+static void gen_vfp_msr(TCGv_i32 tmp)
+{
+ tcg_gen_mov_i32(cpu_F0s, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_neon_dup_u8(TCGv_i32 var, int shift)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ if (shift)
+ tcg_gen_shri_i32(var, var, shift);
+ tcg_gen_ext8u_i32(var, var);
+ tcg_gen_shli_i32(tmp, var, 8);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_gen_shli_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_neon_dup_low16(TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(var, var);
+ tcg_gen_shli_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_neon_dup_high16(TCGv_i32 var)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(var, var, 0xffff0000);
+ tcg_gen_shri_i32(tmp, var, 16);
+ tcg_gen_or_i32(var, var, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
+{
+ /* Load a single Neon element and replicate into a 32 bit TCG reg */
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ switch (size) {
+ case 0:
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ gen_neon_dup_u8(tmp, 0);
+ break;
+ case 1:
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ gen_neon_dup_low16(tmp);
+ break;
+ case 2:
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ break;
+ default: /* Avoid compiler warnings. */
+ abort();
+ }
+ return tmp;
+}
+
+static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
+ uint32_t dp)
+{
+ uint32_t cc = extract32(insn, 20, 2);
+
+ if (dp) {
+ TCGv_i64 frn, frm, dest;
+ TCGv_i64 tmp, zero, zf, nf, vf;
+
+ zero = tcg_const_i64(0);
+
+ frn = tcg_temp_new_i64();
+ frm = tcg_temp_new_i64();
+ dest = tcg_temp_new_i64();
+
+ zf = tcg_temp_new_i64();
+ nf = tcg_temp_new_i64();
+ vf = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(zf, cpu_ZF);
+ tcg_gen_ext_i32_i64(nf, cpu_NF);
+ tcg_gen_ext_i32_i64(vf, cpu_VF);
+
+ tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
+ switch (cc) {
+ case 0: /* eq: Z */
+ tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
+ frn, frm);
+ break;
+ case 1: /* vs: V */
+ tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
+ frn, frm);
+ break;
+ case 2: /* ge: N == V -> N ^ V == 0 */
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, vf, nf);
+ tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
+ frn, frm);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 3: /* gt: !Z && N == V */
+ tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
+ frn, frm);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, vf, nf);
+ tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
+ dest, frm);
+ tcg_temp_free_i64(tmp);
+ break;
+ }
+ tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i64(frn);
+ tcg_temp_free_i64(frm);
+ tcg_temp_free_i64(dest);
+
+ tcg_temp_free_i64(zf);
+ tcg_temp_free_i64(nf);
+ tcg_temp_free_i64(vf);
+
+ tcg_temp_free_i64(zero);
+ } else {
+ TCGv_i32 frn, frm, dest;
+ TCGv_i32 tmp, zero;
+
+ zero = tcg_const_i32(0);
+
+ frn = tcg_temp_new_i32();
+ frm = tcg_temp_new_i32();
+ dest = tcg_temp_new_i32();
+ tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
+ switch (cc) {
+ case 0: /* eq: Z */
+ tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
+ frn, frm);
+ break;
+ case 1: /* vs: V */
+ tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
+ frn, frm);
+ break;
+ case 2: /* ge: N == V -> N ^ V == 0 */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+ tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
+ frn, frm);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 3: /* gt: !Z && N == V */
+ tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
+ frn, frm);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+ tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
+ dest, frm);
+ tcg_temp_free_i32(tmp);
+ break;
+ }
+ tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i32(frn);
+ tcg_temp_free_i32(frm);
+ tcg_temp_free_i32(dest);
+
+ tcg_temp_free_i32(zero);
+ }
+
+ return 0;
+}
+
+static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
+ uint32_t rm, uint32_t dp)
+{
+ uint32_t vmin = extract32(insn, 6, 1);
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+
+ if (dp) {
+ TCGv_i64 frn, frm, dest;
+
+ frn = tcg_temp_new_i64();
+ frm = tcg_temp_new_i64();
+ dest = tcg_temp_new_i64();
+
+ tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
+ if (vmin) {
+ gen_helper_vfp_minnumd(dest, frn, frm, fpst);
+ } else {
+ gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
+ }
+ tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i64(frn);
+ tcg_temp_free_i64(frm);
+ tcg_temp_free_i64(dest);
+ } else {
+ TCGv_i32 frn, frm, dest;
+
+ frn = tcg_temp_new_i32();
+ frm = tcg_temp_new_i32();
+ dest = tcg_temp_new_i32();
+
+ tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
+ tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
+ if (vmin) {
+ gen_helper_vfp_minnums(dest, frn, frm, fpst);
+ } else {
+ gen_helper_vfp_maxnums(dest, frn, frm, fpst);
+ }
+ tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i32(frn);
+ tcg_temp_free_i32(frm);
+ tcg_temp_free_i32(dest);
+ }
+
+ tcg_temp_free_ptr(fpst);
+ return 0;
+}
+
+static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
+ int rounding)
+{
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ TCGv_i32 tcg_rmode;
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+
+ if (dp) {
+ TCGv_i64 tcg_op;
+ TCGv_i64 tcg_res;
+ tcg_op = tcg_temp_new_i64();
+ tcg_res = tcg_temp_new_i64();
+ tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
+ gen_helper_rintd(tcg_res, tcg_op, fpst);
+ tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i64(tcg_op);
+ tcg_temp_free_i64(tcg_res);
+ } else {
+ TCGv_i32 tcg_op;
+ TCGv_i32 tcg_res;
+ tcg_op = tcg_temp_new_i32();
+ tcg_res = tcg_temp_new_i32();
+ tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
+ gen_helper_rints(tcg_res, tcg_op, fpst);
+ tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
+ tcg_temp_free_i32(tcg_op);
+ tcg_temp_free_i32(tcg_res);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+
+ tcg_temp_free_ptr(fpst);
+ return 0;
+}
+
+static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
+ int rounding)
+{
+ bool is_signed = extract32(insn, 7, 1);
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ TCGv_i32 tcg_rmode, tcg_shift;
+
+ tcg_shift = tcg_const_i32(0);
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+
+ if (dp) {
+ TCGv_i64 tcg_double, tcg_res;
+ TCGv_i32 tcg_tmp;
+ /* Rd is encoded as a single precision register even when the source
+ * is double precision.
+ */
+ rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
+ tcg_double = tcg_temp_new_i64();
+ tcg_res = tcg_temp_new_i64();
+ tcg_tmp = tcg_temp_new_i32();
+ tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
+ if (is_signed) {
+ gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
+ }
+ tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
+ tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
+ tcg_temp_free_i32(tcg_tmp);
+ tcg_temp_free_i64(tcg_res);
+ tcg_temp_free_i64(tcg_double);
+ } else {
+ TCGv_i32 tcg_single, tcg_res;
+ tcg_single = tcg_temp_new_i32();
+ tcg_res = tcg_temp_new_i32();
+ tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
+ if (is_signed) {
+ gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
+ }
+ tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
+ tcg_temp_free_i32(tcg_res);
+ tcg_temp_free_i32(tcg_single);
+ }
+
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+
+ tcg_temp_free_i32(tcg_shift);
+
+ tcg_temp_free_ptr(fpst);
+
+ return 0;
+}
+
+/* Table for converting the most common AArch32 encoding of
+ * rounding mode to arm_fprounding order (which matches the
+ * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
+ */
+static const uint8_t fp_decode_rm[] = {
+ FPROUNDING_TIEAWAY,
+ FPROUNDING_TIEEVEN,
+ FPROUNDING_POSINF,
+ FPROUNDING_NEGINF,
+};
+
+static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
+{
+ uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return 1;
+ }
+
+ if (dp) {
+ VFP_DREG_D(rd, insn);
+ VFP_DREG_N(rn, insn);
+ VFP_DREG_M(rm, insn);
+ } else {
+ rd = VFP_SREG_D(insn);
+ rn = VFP_SREG_N(insn);
+ rm = VFP_SREG_M(insn);
+ }
+
+ if ((insn & 0x0f800e50) == 0x0e000a00) {
+ return handle_vsel(insn, rd, rn, rm, dp);
+ } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
+ return handle_vminmaxnm(insn, rd, rn, rm, dp);
+ } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
+ /* VRINTA, VRINTN, VRINTP, VRINTM */
+ int rounding = fp_decode_rm[extract32(insn, 16, 2)];
+ return handle_vrint(insn, rd, rm, dp, rounding);
+ } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
+ /* VCVTA, VCVTN, VCVTP, VCVTM */
+ int rounding = fp_decode_rm[extract32(insn, 16, 2)];
+ return handle_vcvt(insn, rd, rm, dp, rounding);
+ }
+ return 1;
+}
+
+/* Disassemble a VFP instruction. Returns nonzero if an error occurred
+ (ie. an undefined instruction). */
+static int disas_vfp_insn(DisasContext *s, uint32_t insn)
+{
+ uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
+ int dp, veclen;
+ TCGv_i32 addr;
+ TCGv_i32 tmp;
+ TCGv_i32 tmp2;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
+ return 1;
+ }
+
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (s->fp_excp_el) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ return 0;
+ }
+
+ if (!s->vfp_enabled) {
+ /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
+ if ((insn & 0x0fe00fff) != 0x0ee00a10)
+ return 1;
+ rn = (insn >> 16) & 0xf;
+ if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
+ && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
+ return 1;
+ }
+ }
+
+ if (extract32(insn, 28, 4) == 0xf) {
+ /* Encodings with T=1 (Thumb) or unconditional (ARM):
+ * only used in v8 and above.
+ */
+ return disas_vfp_v8_insn(s, insn);
+ }
+
+ dp = ((insn & 0xf00) == 0xb00);
+ switch ((insn >> 24) & 0xf) {
+ case 0xe:
+ if (insn & (1 << 4)) {
+ /* single register transfer */
+ rd = (insn >> 12) & 0xf;
+ if (dp) {
+ int size;
+ int pass;
+
+ VFP_DREG_N(rn, insn);
+ if (insn & 0xf)
+ return 1;
+ if (insn & 0x00c00060
+ && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return 1;
+ }
+
+ pass = (insn >> 21) & 1;
+ if (insn & (1 << 22)) {
+ size = 0;
+ offset = ((insn >> 5) & 3) * 8;
+ } else if (insn & (1 << 5)) {
+ size = 1;
+ offset = (insn & (1 << 6)) ? 16 : 0;
+ } else {
+ size = 2;
+ offset = 0;
+ }
+ if (insn & ARM_CP_RW_BIT) {
+ /* vfp->arm */
+ tmp = neon_load_reg(rn, pass);
+ switch (size) {
+ case 0:
+ if (offset)
+ tcg_gen_shri_i32(tmp, tmp, offset);
+ if (insn & (1 << 23))
+ gen_uxtb(tmp);
+ else
+ gen_sxtb(tmp);
+ break;
+ case 1:
+ if (insn & (1 << 23)) {
+ if (offset) {
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ } else {
+ gen_uxth(tmp);
+ }
+ } else {
+ if (offset) {
+ tcg_gen_sari_i32(tmp, tmp, 16);
+ } else {
+ gen_sxth(tmp);
+ }
+ }
+ break;
+ case 2:
+ break;
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ /* arm->vfp */
+ tmp = load_reg(s, rd);
+ if (insn & (1 << 23)) {
+ /* VDUP */
+ if (size == 0) {
+ gen_neon_dup_u8(tmp, 0);
+ } else if (size == 1) {
+ gen_neon_dup_low16(tmp);
+ }
+ for (n = 0; n <= pass * 2; n++) {
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp2, tmp);
+ neon_store_reg(rn, n, tmp2);
+ }
+ neon_store_reg(rn, n, tmp);
+ } else {
+ /* VMOV */
+ switch (size) {
+ case 0:
+ tmp2 = neon_load_reg(rn, pass);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case 1:
+ tmp2 = neon_load_reg(rn, pass);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case 2:
+ break;
+ }
+ neon_store_reg(rn, pass, tmp);
+ }
+ }
+ } else { /* !dp */
+ if ((insn & 0x6f) != 0x00)
+ return 1;
+ rn = VFP_SREG_N(insn);
+ if (insn & ARM_CP_RW_BIT) {
+ /* vfp->arm */
+ if (insn & (1 << 21)) {
+ /* system register */
+ rn >>= 1;
+
+ switch (rn) {
+ case ARM_VFP_FPSID:
+ /* VFP2 allows access to FSID from userspace.
+ VFP3 restricts all id registers to privileged
+ accesses. */
+ if (IS_USER(s)
+ && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ tmp = load_cpu_field(vfp.xregs[rn]);
+ break;
+ case ARM_VFP_FPEXC:
+ if (IS_USER(s))
+ return 1;
+ tmp = load_cpu_field(vfp.xregs[rn]);
+ break;
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ /* Not present in VFP3. */
+ if (IS_USER(s)
+ || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ tmp = load_cpu_field(vfp.xregs[rn]);
+ break;
+ case ARM_VFP_FPSCR:
+ if (rd == 15) {
+ tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
+ } else {
+ tmp = tcg_temp_new_i32();
+ gen_helper_vfp_get_fpscr(tmp, cpu_env);
+ }
+ break;
+ case ARM_VFP_MVFR2:
+ if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return 1;
+ }
+ /* fall through */
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ if (IS_USER(s)
+ || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
+ return 1;
+ }
+ tmp = load_cpu_field(vfp.xregs[rn]);
+ break;
+ default:
+ return 1;
+ }
+ } else {
+ gen_mov_F0_vreg(0, rn);
+ tmp = gen_vfp_mrs();
+ }
+ if (rd == 15) {
+ /* Set the 4 flag bits in the CPSR. */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, rd, tmp);
+ }
+ } else {
+ /* arm->vfp */
+ if (insn & (1 << 21)) {
+ rn >>= 1;
+ /* system register */
+ switch (rn) {
+ case ARM_VFP_FPSID:
+ case ARM_VFP_MVFR0:
+ case ARM_VFP_MVFR1:
+ /* Writes are ignored. */
+ break;
+ case ARM_VFP_FPSCR:
+ tmp = load_reg(s, rd);
+ gen_helper_vfp_set_fpscr(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPEXC:
+ if (IS_USER(s))
+ return 1;
+ /* TODO: VFP subarchitecture support.
+ * For now, keep the EN bit only */
+ tmp = load_reg(s, rd);
+ tcg_gen_andi_i32(tmp, tmp, 1 << 30);
+ store_cpu_field(tmp, vfp.xregs[rn]);
+ gen_lookup_tb(s);
+ break;
+ case ARM_VFP_FPINST:
+ case ARM_VFP_FPINST2:
+ if (IS_USER(s)) {
+ return 1;
+ }
+ tmp = load_reg(s, rd);
+ store_cpu_field(tmp, vfp.xregs[rn]);
+ break;
+ default:
+ return 1;
+ }
+ } else {
+ tmp = load_reg(s, rd);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rn);
+ }
+ }
+ }
+ } else {
+ /* data processing */
+ /* The opcode is in bits 23, 21, 20 and 6. */
+ op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
+ if (dp) {
+ if (op == 15) {
+ /* rn is opcode */
+ rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
+ } else {
+ /* rn is register number */
+ VFP_DREG_N(rn, insn);
+ }
+
+ if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
+ ((rn & 0x1e) == 0x6))) {
+ /* Integer or single/half precision destination. */
+ rd = VFP_SREG_D(insn);
+ } else {
+ VFP_DREG_D(rd, insn);
+ }
+ if (op == 15 &&
+ (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
+ ((rn & 0x1e) == 0x4))) {
+ /* VCVT from int or half precision is always from S reg
+ * regardless of dp bit. VCVT with immediate frac_bits
+ * has same format as SREG_M.
+ */
+ rm = VFP_SREG_M(insn);
+ } else {
+ VFP_DREG_M(rm, insn);
+ }
+ } else {
+ rn = VFP_SREG_N(insn);
+ if (op == 15 && rn == 15) {
+ /* Double precision destination. */
+ VFP_DREG_D(rd, insn);
+ } else {
+ rd = VFP_SREG_D(insn);
+ }
+ /* NB that we implicitly rely on the encoding for the frac_bits
+ * in VCVT of fixed to float being the same as that of an SREG_M
+ */
+ rm = VFP_SREG_M(insn);
+ }
+
+ veclen = s->vec_len;
+ if (op == 15 && rn > 3)
+ veclen = 0;
+
+ /* Shut up compiler warnings. */
+ delta_m = 0;
+ delta_d = 0;
+ bank_mask = 0;
+
+ if (veclen > 0) {
+ if (dp)
+ bank_mask = 0xc;
+ else
+ bank_mask = 0x18;
+
+ /* Figure out what type of vector operation this is. */
+ if ((rd & bank_mask) == 0) {
+ /* scalar */
+ veclen = 0;
+ } else {
+ if (dp)
+ delta_d = (s->vec_stride >> 1) + 1;
+ else
+ delta_d = s->vec_stride + 1;
+
+ if ((rm & bank_mask) == 0) {
+ /* mixed scalar/vector */
+ delta_m = 0;
+ } else {
+ /* vector */
+ delta_m = delta_d;
+ }
+ }
+ }
+
+ /* Load the initial operands. */
+ if (op == 15) {
+ switch (rn) {
+ case 16:
+ case 17:
+ /* Integer source */
+ gen_mov_F0_vreg(0, rm);
+ break;
+ case 8:
+ case 9:
+ /* Compare */
+ gen_mov_F0_vreg(dp, rd);
+ gen_mov_F1_vreg(dp, rm);
+ break;
+ case 10:
+ case 11:
+ /* Compare with zero */
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_F1_ld0(dp);
+ break;
+ case 20:
+ case 21:
+ case 22:
+ case 23:
+ case 28:
+ case 29:
+ case 30:
+ case 31:
+ /* Source and destination the same. */
+ gen_mov_F0_vreg(dp, rd);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /* VCVTB, VCVTT: only present with the halfprec extension
+ * UNPREDICTABLE if bit 8 is set prior to ARMv8
+ * (we choose to UNDEF)
+ */
+ if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
+ !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
+ return 1;
+ }
+ if (!extract32(rn, 1, 1)) {
+ /* Half precision source. */
+ gen_mov_F0_vreg(0, rm);
+ break;
+ }
+ /* Otherwise fall through */
+ default:
+ /* One source operand. */
+ gen_mov_F0_vreg(dp, rm);
+ break;
+ }
+ } else {
+ /* Two source operands. */
+ gen_mov_F0_vreg(dp, rn);
+ gen_mov_F1_vreg(dp, rm);
+ }
+
+ for (;;) {
+ /* Perform the calculation. */
+ switch (op) {
+ case 0: /* VMLA: fd + (fn * fm) */
+ /* Note that order of inputs to the add matters for NaNs */
+ gen_vfp_F1_mul(dp);
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_add(dp);
+ break;
+ case 1: /* VMLS: fd + -(fn * fm) */
+ gen_vfp_mul(dp);
+ gen_vfp_F1_neg(dp);
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_add(dp);
+ break;
+ case 2: /* VNMLS: -fd + (fn * fm) */
+ /* Note that it isn't valid to replace (-A + B) with (B - A)
+ * or similar plausible looking simplifications
+ * because this will give wrong results for NaNs.
+ */
+ gen_vfp_F1_mul(dp);
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_neg(dp);
+ gen_vfp_add(dp);
+ break;
+ case 3: /* VNMLA: -fd + -(fn * fm) */
+ gen_vfp_mul(dp);
+ gen_vfp_F1_neg(dp);
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_neg(dp);
+ gen_vfp_add(dp);
+ break;
+ case 4: /* mul: fn * fm */
+ gen_vfp_mul(dp);
+ break;
+ case 5: /* nmul: -(fn * fm) */
+ gen_vfp_mul(dp);
+ gen_vfp_neg(dp);
+ break;
+ case 6: /* add: fn + fm */
+ gen_vfp_add(dp);
+ break;
+ case 7: /* sub: fn - fm */
+ gen_vfp_sub(dp);
+ break;
+ case 8: /* div: fn / fm */
+ gen_vfp_div(dp);
+ break;
+ case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
+ case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
+ case 12: /* VFMA : fd = muladd( fd, fn, fm) */
+ case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
+ return 1;
+ }
+ if (dp) {
+ TCGv_ptr fpst;
+ TCGv_i64 frd;
+ if (op & 1) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
+ }
+ frd = tcg_temp_new_i64();
+ tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
+ if (op & 2) {
+ /* VFNMA, VFNMS */
+ gen_helper_vfp_negd(frd, frd);
+ }
+ fpst = get_fpstatus_ptr(0);
+ gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
+ cpu_F1d, frd, fpst);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(frd);
+ } else {
+ TCGv_ptr fpst;
+ TCGv_i32 frd;
+ if (op & 1) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
+ }
+ frd = tcg_temp_new_i32();
+ tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
+ if (op & 2) {
+ gen_helper_vfp_negs(frd, frd);
+ }
+ fpst = get_fpstatus_ptr(0);
+ gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
+ cpu_F1s, frd, fpst);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(frd);
+ }
+ break;
+ case 14: /* fconst */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+
+ n = (insn << 12) & 0x80000000;
+ i = ((insn >> 12) & 0x70) | (insn & 0xf);
+ if (dp) {
+ if (i & 0x40)
+ i |= 0x3f80;
+ else
+ i |= 0x4000;
+ n |= i << 16;
+ tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
+ } else {
+ if (i & 0x40)
+ i |= 0x780;
+ else
+ i |= 0x800;
+ n |= i << 19;
+ tcg_gen_movi_i32(cpu_F0s, n);
+ }
+ break;
+ case 15: /* extension space */
+ switch (rn) {
+ case 0: /* cpy */
+ /* no-op */
+ break;
+ case 1: /* abs */
+ gen_vfp_abs(dp);
+ break;
+ case 2: /* neg */
+ gen_vfp_neg(dp);
+ break;
+ case 3: /* sqrt */
+ gen_vfp_sqrt(dp);
+ break;
+ case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
+ tmp = gen_vfp_mrs();
+ tcg_gen_ext16u_i32(tmp, tmp);
+ if (dp) {
+ gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
+ cpu_env);
+ }
+ tcg_temp_free_i32(tmp);
+ break;
+ case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
+ tmp = gen_vfp_mrs();
+ tcg_gen_shri_i32(tmp, tmp, 16);
+ if (dp) {
+ gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
+ cpu_env);
+ }
+ tcg_temp_free_i32(tmp);
+ break;
+ case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
+ tmp = tcg_temp_new_i32();
+ if (dp) {
+ gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
+ cpu_env);
+ }
+ gen_mov_F0_vreg(0, rd);
+ tmp2 = gen_vfp_mrs();
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ gen_vfp_msr(tmp);
+ break;
+ case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
+ tmp = tcg_temp_new_i32();
+ if (dp) {
+ gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
+ cpu_env);
+ } else {
+ gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
+ cpu_env);
+ }
+ tcg_gen_shli_i32(tmp, tmp, 16);
+ gen_mov_F0_vreg(0, rd);
+ tmp2 = gen_vfp_mrs();
+ tcg_gen_ext16u_i32(tmp2, tmp2);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ gen_vfp_msr(tmp);
+ break;
+ case 8: /* cmp */
+ gen_vfp_cmp(dp);
+ break;
+ case 9: /* cmpe */
+ gen_vfp_cmpe(dp);
+ break;
+ case 10: /* cmpz */
+ gen_vfp_cmp(dp);
+ break;
+ case 11: /* cmpez */
+ gen_vfp_F1_ld0(dp);
+ gen_vfp_cmpe(dp);
+ break;
+ case 12: /* vrintr */
+ {
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ if (dp) {
+ gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
+ } else {
+ gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
+ }
+ tcg_temp_free_ptr(fpst);
+ break;
+ }
+ case 13: /* vrintz */
+ {
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ TCGv_i32 tcg_rmode;
+ tcg_rmode = tcg_const_i32(float_round_to_zero);
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ if (dp) {
+ gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
+ } else {
+ gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
+ }
+ gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_ptr(fpst);
+ break;
+ }
+ case 14: /* vrintx */
+ {
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ if (dp) {
+ gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
+ } else {
+ gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
+ }
+ tcg_temp_free_ptr(fpst);
+ break;
+ }
+ case 15: /* single<->double conversion */
+ if (dp)
+ gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
+ else
+ gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
+ break;
+ case 16: /* fuito */
+ gen_vfp_uito(dp, 0);
+ break;
+ case 17: /* fsito */
+ gen_vfp_sito(dp, 0);
+ break;
+ case 20: /* fshto */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_shto(dp, 16 - rm, 0);
+ break;
+ case 21: /* fslto */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_slto(dp, 32 - rm, 0);
+ break;
+ case 22: /* fuhto */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_uhto(dp, 16 - rm, 0);
+ break;
+ case 23: /* fulto */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_ulto(dp, 32 - rm, 0);
+ break;
+ case 24: /* ftoui */
+ gen_vfp_toui(dp, 0);
+ break;
+ case 25: /* ftouiz */
+ gen_vfp_touiz(dp, 0);
+ break;
+ case 26: /* ftosi */
+ gen_vfp_tosi(dp, 0);
+ break;
+ case 27: /* ftosiz */
+ gen_vfp_tosiz(dp, 0);
+ break;
+ case 28: /* ftosh */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_tosh(dp, 16 - rm, 0);
+ break;
+ case 29: /* ftosl */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_tosl(dp, 32 - rm, 0);
+ break;
+ case 30: /* ftouh */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_touh(dp, 16 - rm, 0);
+ break;
+ case 31: /* ftoul */
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
+ return 1;
+ }
+ gen_vfp_toul(dp, 32 - rm, 0);
+ break;
+ default: /* undefined */
+ return 1;
+ }
+ break;
+ default: /* undefined */
+ return 1;
+ }
+
+ /* Write back the result. */
+ if (op == 15 && (rn >= 8 && rn <= 11)) {
+ /* Comparison, do nothing. */
+ } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
+ (rn & 0x1e) == 0x6)) {
+ /* VCVT double to int: always integer result.
+ * VCVT double to half precision is always a single
+ * precision result.
+ */
+ gen_mov_vreg_F0(0, rd);
+ } else if (op == 15 && rn == 15) {
+ /* conversion */
+ gen_mov_vreg_F0(!dp, rd);
+ } else {
+ gen_mov_vreg_F0(dp, rd);
+ }
+
+ /* break out of the loop if we have finished */
+ if (veclen == 0)
+ break;
+
+ if (op == 15 && delta_m == 0) {
+ /* single source one-many */
+ while (veclen--) {
+ rd = ((rd + delta_d) & (bank_mask - 1))
+ | (rd & bank_mask);
+ gen_mov_vreg_F0(dp, rd);
+ }
+ break;
+ }
+ /* Setup the next operands. */
+ veclen--;
+ rd = ((rd + delta_d) & (bank_mask - 1))
+ | (rd & bank_mask);
+
+ if (op == 15) {
+ /* One source operand. */
+ rm = ((rm + delta_m) & (bank_mask - 1))
+ | (rm & bank_mask);
+ gen_mov_F0_vreg(dp, rm);
+ } else {
+ /* Two source operands. */
+ rn = ((rn + delta_d) & (bank_mask - 1))
+ | (rn & bank_mask);
+ gen_mov_F0_vreg(dp, rn);
+ if (delta_m) {
+ rm = ((rm + delta_m) & (bank_mask - 1))
+ | (rm & bank_mask);
+ gen_mov_F1_vreg(dp, rm);
+ }
+ }
+ }
+ }
+ break;
+ case 0xc:
+ case 0xd:
+ if ((insn & 0x03e00000) == 0x00400000) {
+ /* two-register transfer */
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ if (dp) {
+ VFP_DREG_M(rm, insn);
+ } else {
+ rm = VFP_SREG_M(insn);
+ }
+
+ if (insn & ARM_CP_RW_BIT) {
+ /* vfp->arm */
+ if (dp) {
+ gen_mov_F0_vreg(0, rm * 2);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rd, tmp);
+ gen_mov_F0_vreg(0, rm * 2 + 1);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rn, tmp);
+ } else {
+ gen_mov_F0_vreg(0, rm);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rd, tmp);
+ gen_mov_F0_vreg(0, rm + 1);
+ tmp = gen_vfp_mrs();
+ store_reg(s, rn, tmp);
+ }
+ } else {
+ /* arm->vfp */
+ if (dp) {
+ tmp = load_reg(s, rd);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rm * 2);
+ tmp = load_reg(s, rn);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rm * 2 + 1);
+ } else {
+ tmp = load_reg(s, rd);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rm);
+ tmp = load_reg(s, rn);
+ gen_vfp_msr(tmp);
+ gen_mov_vreg_F0(0, rm + 1);
+ }
+ }
+ } else {
+ /* Load/store */
+ rn = (insn >> 16) & 0xf;
+ if (dp)
+ VFP_DREG_D(rd, insn);
+ else
+ rd = VFP_SREG_D(insn);
+ if ((insn & 0x01200000) == 0x01000000) {
+ /* Single load/store */
+ offset = (insn & 0xff) << 2;
+ if ((insn & (1 << 23)) == 0)
+ offset = -offset;
+ if (s->thumb && rn == 15) {
+ /* This is actually UNPREDICTABLE */
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, s->pc & ~2);
+ } else {
+ addr = load_reg(s, rn);
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ if (insn & (1 << 20)) {
+ gen_vfp_ld(s, dp, addr);
+ gen_mov_vreg_F0(dp, rd);
+ } else {
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_st(s, dp, addr);
+ }
+ tcg_temp_free_i32(addr);
+ } else {
+ /* load/store multiple */
+ int w = insn & (1 << 21);
+ if (dp)
+ n = (insn >> 1) & 0x7f;
+ else
+ n = insn & 0xff;
+
+ if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
+ /* P == U , W == 1 => UNDEF */
+ return 1;
+ }
+ if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
+ /* UNPREDICTABLE cases for bad immediates: we choose to
+ * UNDEF to avoid generating huge numbers of TCG ops
+ */
+ return 1;
+ }
+ if (rn == 15 && w) {
+ /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
+ return 1;
+ }
+
+ if (s->thumb && rn == 15) {
+ /* This is actually UNPREDICTABLE */
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, s->pc & ~2);
+ } else {
+ addr = load_reg(s, rn);
+ }
+ if (insn & (1 << 24)) /* pre-decrement */
+ tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
+
+ if (dp)
+ offset = 8;
+ else
+ offset = 4;
+ for (i = 0; i < n; i++) {
+ if (insn & ARM_CP_RW_BIT) {
+ /* load */
+ gen_vfp_ld(s, dp, addr);
+ gen_mov_vreg_F0(dp, rd + i);
+ } else {
+ /* store */
+ gen_mov_F0_vreg(dp, rd + i);
+ gen_vfp_st(s, dp, addr);
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ if (w) {
+ /* writeback */
+ if (insn & (1 << 24))
+ offset = -offset * n;
+ else if (dp && (insn & 1))
+ offset = 4;
+ else
+ offset = 0;
+
+ if (offset != 0)
+ tcg_gen_addi_i32(addr, addr, offset);
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ }
+ }
+ break;
+ default:
+ /* Should never happen. */
+ return 1;
+ }
+ return 0;
+}
+
+static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ ((s->pc - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
+{
+ if (use_goto_tb(s, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_set_pc_im(s, dest);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
+ } else {
+ gen_set_pc_im(s, dest);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void gen_jmp (DisasContext *s, uint32_t dest)
+{
+ if (unlikely(s->singlestep_enabled || s->ss_active)) {
+ /* An indirect jump so that we still trigger the debug exception. */
+ if (s->thumb)
+ dest |= 1;
+ gen_bx_im(s, dest);
+ } else {
+ gen_goto_tb(s, 0, dest);
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+}
+
+static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
+{
+ if (x)
+ tcg_gen_sari_i32(t0, t0, 16);
+ else
+ gen_sxth(t0);
+ if (y)
+ tcg_gen_sari_i32(t1, t1, 16);
+ else
+ gen_sxth(t1);
+ tcg_gen_mul_i32(t0, t0, t1);
+}
+
+/* Return the mask of PSR bits set by a MSR instruction. */
+static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (flags & (1 << 0))
+ mask |= 0xff;
+ if (flags & (1 << 1))
+ mask |= 0xff00;
+ if (flags & (1 << 2))
+ mask |= 0xff0000;
+ if (flags & (1 << 3))
+ mask |= 0xff000000;
+
+ /* Mask out undefined bits. */
+ mask &= ~CPSR_RESERVED;
+ if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
+ mask &= ~CPSR_T;
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
+ mask &= ~CPSR_Q; /* V5TE in reality*/
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
+ mask &= ~(CPSR_E | CPSR_GE);
+ }
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
+ mask &= ~CPSR_IT;
+ }
+ /* Mask out execution state and reserved bits. */
+ if (!spsr) {
+ mask &= ~(CPSR_EXEC | CPSR_RESERVED);
+ }
+ /* Mask out privileged bits. */
+ if (IS_USER(s))
+ mask &= CPSR_USER;
+ return mask;
+}
+
+/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
+static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
+{
+ TCGv_i32 tmp;
+ if (spsr) {
+ /* ??? This is also undefined in system mode. */
+ if (IS_USER(s))
+ return 1;
+
+ tmp = load_cpu_field(spsr);
+ tcg_gen_andi_i32(tmp, tmp, ~mask);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_or_i32(tmp, tmp, t0);
+ store_cpu_field(tmp, spsr);
+ } else {
+ gen_set_cpsr(t0, mask);
+ }
+ tcg_temp_free_i32(t0);
+ gen_lookup_tb(s);
+ return 0;
+}
+
+/* Returns nonzero if access to the PSR is not permitted. */
+static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
+{
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ return gen_set_psr(s, mask, spsr, tmp);
+}
+
+static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
+ int *tgtmode, int *regno)
+{
+ /* Decode the r and sysm fields of MSR/MRS banked accesses into
+ * the target mode and register number, and identify the various
+ * unpredictable cases.
+ * MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
+ * + executed in user mode
+ * + using R15 as the src/dest register
+ * + accessing an unimplemented register
+ * + accessing a register that's inaccessible at current PL/security state*
+ * + accessing a register that you could access with a different insn
+ * We choose to UNDEF in all these cases.
+ * Since we don't know which of the various AArch32 modes we are in
+ * we have to defer some checks to runtime.
+ * Accesses to Monitor mode registers from Secure EL1 (which implies
+ * that EL3 is AArch64) must trap to EL3.
+ *
+ * If the access checks fail this function will emit code to take
+ * an exception and return false. Otherwise it will return true,
+ * and set *tgtmode and *regno appropriately.
+ */
+ int exc_target = default_exception_el(s);
+
+ /* These instructions are present only in ARMv8, or in ARMv7 with the
+ * Virtualization Extensions.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
+ !arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ goto undef;
+ }
+
+ if (IS_USER(s) || rn == 15) {
+ goto undef;
+ }
+
+ /* The table in the v8 ARM ARM section F5.2.3 describes the encoding
+ * of registers into (r, sysm).
+ */
+ if (r) {
+ /* SPSRs for other modes */
+ switch (sysm) {
+ case 0xe: /* SPSR_fiq */
+ *tgtmode = ARM_CPU_MODE_FIQ;
+ break;
+ case 0x10: /* SPSR_irq */
+ *tgtmode = ARM_CPU_MODE_IRQ;
+ break;
+ case 0x12: /* SPSR_svc */
+ *tgtmode = ARM_CPU_MODE_SVC;
+ break;
+ case 0x14: /* SPSR_abt */
+ *tgtmode = ARM_CPU_MODE_ABT;
+ break;
+ case 0x16: /* SPSR_und */
+ *tgtmode = ARM_CPU_MODE_UND;
+ break;
+ case 0x1c: /* SPSR_mon */
+ *tgtmode = ARM_CPU_MODE_MON;
+ break;
+ case 0x1e: /* SPSR_hyp */
+ *tgtmode = ARM_CPU_MODE_HYP;
+ break;
+ default: /* unallocated */
+ goto undef;
+ }
+ /* We arbitrarily assign SPSR a register number of 16. */
+ *regno = 16;
+ } else {
+ /* general purpose registers for other modes */
+ switch (sysm) {
+ case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
+ *tgtmode = ARM_CPU_MODE_USR;
+ *regno = sysm + 8;
+ break;
+ case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
+ *tgtmode = ARM_CPU_MODE_FIQ;
+ *regno = sysm;
+ break;
+ case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
+ *tgtmode = ARM_CPU_MODE_IRQ;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
+ *tgtmode = ARM_CPU_MODE_SVC;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
+ *tgtmode = ARM_CPU_MODE_ABT;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
+ *tgtmode = ARM_CPU_MODE_UND;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
+ *tgtmode = ARM_CPU_MODE_MON;
+ *regno = sysm & 1 ? 13 : 14;
+ break;
+ case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
+ *tgtmode = ARM_CPU_MODE_HYP;
+ /* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
+ *regno = sysm & 1 ? 13 : 17;
+ break;
+ default: /* unallocated */
+ goto undef;
+ }
+ }
+
+ /* Catch the 'accessing inaccessible register' cases we can detect
+ * at translate time.
+ */
+ switch (*tgtmode) {
+ case ARM_CPU_MODE_MON:
+ if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
+ goto undef;
+ }
+ if (s->current_el == 1) {
+ /* If we're in Secure EL1 (which implies that EL3 is AArch64)
+ * then accesses to Mon registers trap to EL3
+ */
+ exc_target = 3;
+ goto undef;
+ }
+ break;
+ case ARM_CPU_MODE_HYP:
+ /* Note that we can forbid accesses from EL2 here because they
+ * must be from Hyp mode itself
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 3) {
+ goto undef;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+
+undef:
+ /* If we get here then some access check did not pass */
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), exc_target);
+ return false;
+}
+
+static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
+{
+ TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
+ int tgtmode = 0, regno = 0;
+
+ if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
+ return;
+ }
+
+ /* Sync state because msr_banked() can raise exceptions */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
+ tcg_reg = load_reg(s, rn);
+ tcg_tgtmode = tcg_const_i32(tgtmode);
+ tcg_regno = tcg_const_i32(regno);
+ gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
+ tcg_temp_free_i32(tcg_tgtmode);
+ tcg_temp_free_i32(tcg_regno);
+ tcg_temp_free_i32(tcg_reg);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
+{
+ TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
+ int tgtmode = 0, regno = 0;
+
+ if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, &regno)) {
+ return;
+ }
+
+ /* Sync state because mrs_banked() can raise exceptions */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
+ tcg_reg = tcg_temp_new_i32();
+ tcg_tgtmode = tcg_const_i32(tgtmode);
+ tcg_regno = tcg_const_i32(regno);
+ gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
+ tcg_temp_free_i32(tcg_tgtmode);
+ tcg_temp_free_i32(tcg_regno);
+ store_reg(s, rn, tcg_reg);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+/* Store value to PC as for an exception return (ie don't
+ * mask bits). The subsequent call to gen_helper_cpsr_write_eret()
+ * will do the masking based on the new value of the Thumb bit.
+ */
+static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
+{
+ tcg_gen_mov_i32(cpu_R[15], pc);
+ tcg_temp_free_i32(pc);
+}
+
+/* Generate a v6 exception return. Marks both values as dead. */
+static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
+{
+ store_pc_exc_ret(s, pc);
+ /* The cpsr_write_eret helper will mask the low bits of PC
+ * appropriately depending on the new Thumb bit, so it must
+ * be called after storing the new PC.
+ */
+ gen_helper_cpsr_write_eret(cpu_env, cpsr);
+ tcg_temp_free_i32(cpsr);
+ s->is_jmp = DISAS_JUMP;
+}
+
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
+{
+ gen_rfe(s, pc, load_cpu_field(spsr));
+}
+
+static void gen_nop_hint(DisasContext *s, int val)
+{
+ switch (val) {
+ case 1: /* yield */
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_YIELD;
+ break;
+ case 3: /* wfi */
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_WFI;
+ break;
+ case 2: /* wfe */
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_WFE;
+ break;
+ case 4: /* sev */
+ case 5: /* sevl */
+ /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
+ default: /* nop */
+ break;
+ }
+}
+
+#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
+
+static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
+{
+ switch (size) {
+ case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
+ case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
+ case 2: tcg_gen_add_i32(t0, t0, t1); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
+{
+ switch (size) {
+ case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
+ case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
+ case 2: tcg_gen_sub_i32(t0, t1, t0); break;
+ default: return;
+ }
+}
+
+/* 32-bit pairwise ops end up the same as the elementwise versions. */
+#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
+#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
+#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
+#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
+
+#define GEN_NEON_INTEGER_OP_ENV(name) do { \
+ switch ((size << 1) | u) { \
+ case 0: \
+ gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
+ break; \
+ case 1: \
+ gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
+ break; \
+ case 2: \
+ gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
+ break; \
+ case 3: \
+ gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
+ break; \
+ case 4: \
+ gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
+ break; \
+ case 5: \
+ gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
+ break; \
+ default: return 1; \
+ }} while (0)
+
+#define GEN_NEON_INTEGER_OP(name) do { \
+ switch ((size << 1) | u) { \
+ case 0: \
+ gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
+ break; \
+ case 1: \
+ gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
+ break; \
+ case 2: \
+ gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
+ break; \
+ case 3: \
+ gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
+ break; \
+ case 4: \
+ gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
+ break; \
+ case 5: \
+ gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
+ break; \
+ default: return 1; \
+ }} while (0)
+
+static TCGv_i32 neon_load_scratch(int scratch)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
+ return tmp;
+}
+
+static void neon_store_scratch(int scratch, TCGv_i32 var)
+{
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
+ tcg_temp_free_i32(var);
+}
+
+static inline TCGv_i32 neon_get_scalar(int size, int reg)
+{
+ TCGv_i32 tmp;
+ if (size == 1) {
+ tmp = neon_load_reg(reg & 7, reg >> 4);
+ if (reg & 8) {
+ gen_neon_dup_high16(tmp);
+ } else {
+ gen_neon_dup_low16(tmp);
+ }
+ } else {
+ tmp = neon_load_reg(reg & 15, reg >> 4);
+ }
+ return tmp;
+}
+
+static int gen_neon_unzip(int rd, int rm, int size, int q)
+{
+ TCGv_i32 tmp, tmp2;
+ if (!q && size == 2) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+ if (q) {
+ switch (size) {
+ case 0:
+ gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
+ break;
+ default:
+ abort();
+ }
+ } else {
+ switch (size) {
+ case 0:
+ gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
+ break;
+ default:
+ abort();
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ return 0;
+}
+
+static int gen_neon_zip(int rd, int rm, int size, int q)
+{
+ TCGv_i32 tmp, tmp2;
+ if (!q && size == 2) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+ if (q) {
+ switch (size) {
+ case 0:
+ gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
+ break;
+ default:
+ abort();
+ }
+ } else {
+ switch (size) {
+ case 0:
+ gen_helper_neon_zip8(cpu_env, tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_zip16(cpu_env, tmp, tmp2);
+ break;
+ default:
+ abort();
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ return 0;
+}
+
+static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 rd, tmp;
+
+ rd = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
+
+ tcg_gen_shli_i32(rd, t0, 8);
+ tcg_gen_andi_i32(rd, rd, 0xff00ff00);
+ tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
+ tcg_gen_or_i32(rd, rd, tmp);
+
+ tcg_gen_shri_i32(t1, t1, 8);
+ tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
+ tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
+ tcg_gen_or_i32(t1, t1, tmp);
+ tcg_gen_mov_i32(t0, rd);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(rd);
+}
+
+static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
+{
+ TCGv_i32 rd, tmp;
+
+ rd = tcg_temp_new_i32();
+ tmp = tcg_temp_new_i32();
+
+ tcg_gen_shli_i32(rd, t0, 16);
+ tcg_gen_andi_i32(tmp, t1, 0xffff);
+ tcg_gen_or_i32(rd, rd, tmp);
+ tcg_gen_shri_i32(t1, t1, 16);
+ tcg_gen_andi_i32(tmp, t0, 0xffff0000);
+ tcg_gen_or_i32(t1, t1, tmp);
+ tcg_gen_mov_i32(t0, rd);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(rd);
+}
+
+
+static struct {
+ int nregs;
+ int interleave;
+ int spacing;
+} neon_ls_element_type[11] = {
+ {4, 4, 1},
+ {4, 4, 2},
+ {4, 1, 1},
+ {4, 2, 1},
+ {3, 3, 1},
+ {3, 3, 2},
+ {3, 1, 1},
+ {1, 1, 1},
+ {2, 2, 1},
+ {2, 2, 2},
+ {2, 1, 1}
+};
+
+/* Translate a NEON load/store element instruction. Return nonzero if the
+ instruction is invalid. */
+static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
+{
+ int rd, rn, rm;
+ int op;
+ int nregs;
+ int interleave;
+ int spacing;
+ int stride;
+ int size;
+ int reg;
+ int pass;
+ int load;
+ int shift;
+ int n;
+ TCGv_i32 addr;
+ TCGv_i32 tmp;
+ TCGv_i32 tmp2;
+ TCGv_i64 tmp64;
+
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (s->fp_excp_el) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ return 0;
+ }
+
+ if (!s->vfp_enabled)
+ return 1;
+ VFP_DREG_D(rd, insn);
+ rn = (insn >> 16) & 0xf;
+ rm = insn & 0xf;
+ load = (insn & (1 << 21)) != 0;
+ if ((insn & (1 << 23)) == 0) {
+ /* Load store all elements. */
+ op = (insn >> 8) & 0xf;
+ size = (insn >> 6) & 3;
+ if (op > 10)
+ return 1;
+ /* Catch UNDEF cases for bad values of align field */
+ switch (op & 0xc) {
+ case 4:
+ if (((insn >> 5) & 1) == 1) {
+ return 1;
+ }
+ break;
+ case 8:
+ if (((insn >> 4) & 3) == 3) {
+ return 1;
+ }
+ break;
+ default:
+ break;
+ }
+ nregs = neon_ls_element_type[op].nregs;
+ interleave = neon_ls_element_type[op].interleave;
+ spacing = neon_ls_element_type[op].spacing;
+ if (size == 3 && (interleave | spacing) != 1)
+ return 1;
+ addr = tcg_temp_new_i32();
+ load_reg_var(s, addr, rn);
+ stride = (1 << size) * interleave;
+ for (reg = 0; reg < nregs; reg++) {
+ if (interleave > 2 || (interleave == 2 && nregs == 2)) {
+ load_reg_var(s, addr, rn);
+ tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
+ } else if (interleave == 2 && nregs == 4 && reg == 2) {
+ load_reg_var(s, addr, rn);
+ tcg_gen_addi_i32(addr, addr, 1 << size);
+ }
+ if (size == 3) {
+ tmp64 = tcg_temp_new_i64();
+ if (load) {
+ gen_aa32_ld64(s, tmp64, addr, get_mem_index(s));
+ neon_store_reg64(tmp64, rd);
+ } else {
+ neon_load_reg64(tmp64, rd);
+ gen_aa32_st64(s, tmp64, addr, get_mem_index(s));
+ }
+ tcg_temp_free_i64(tmp64);
+ tcg_gen_addi_i32(addr, addr, stride);
+ } else {
+ for (pass = 0; pass < 2; pass++) {
+ if (size == 2) {
+ if (load) {
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ neon_store_reg(rd, pass, tmp);
+ } else {
+ tmp = neon_load_reg(rd, pass);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_gen_addi_i32(addr, addr, stride);
+ } else if (size == 1) {
+ if (load) {
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ tcg_gen_addi_i32(addr, addr, stride);
+ tmp2 = tcg_temp_new_i32();
+ gen_aa32_ld16u(s, tmp2, addr, get_mem_index(s));
+ tcg_gen_addi_i32(addr, addr, stride);
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ neon_store_reg(rd, pass, tmp);
+ } else {
+ tmp = neon_load_reg(rd, pass);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(tmp2, tmp, 16);
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ tcg_gen_addi_i32(addr, addr, stride);
+ gen_aa32_st16(s, tmp2, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_addi_i32(addr, addr, stride);
+ }
+ } else /* size == 0 */ {
+ if (load) {
+ TCGV_UNUSED_I32(tmp2);
+ for (n = 0; n < 4; n++) {
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ tcg_gen_addi_i32(addr, addr, stride);
+ if (n == 0) {
+ tmp2 = tmp;
+ } else {
+ tcg_gen_shli_i32(tmp, tmp, n * 8);
+ tcg_gen_or_i32(tmp2, tmp2, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ }
+ neon_store_reg(rd, pass, tmp2);
+ } else {
+ tmp2 = neon_load_reg(rd, pass);
+ for (n = 0; n < 4; n++) {
+ tmp = tcg_temp_new_i32();
+ if (n == 0) {
+ tcg_gen_mov_i32(tmp, tmp2);
+ } else {
+ tcg_gen_shri_i32(tmp, tmp2, n * 8);
+ }
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ tcg_gen_addi_i32(addr, addr, stride);
+ }
+ tcg_temp_free_i32(tmp2);
+ }
+ }
+ }
+ }
+ rd += spacing;
+ }
+ tcg_temp_free_i32(addr);
+ stride = nregs * 8;
+ } else {
+ size = (insn >> 10) & 3;
+ if (size == 3) {
+ /* Load single element to all lanes. */
+ int a = (insn >> 4) & 1;
+ if (!load) {
+ return 1;
+ }
+ size = (insn >> 6) & 3;
+ nregs = ((insn >> 8) & 3) + 1;
+
+ if (size == 3) {
+ if (nregs != 4 || a == 0) {
+ return 1;
+ }
+ /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
+ size = 2;
+ }
+ if (nregs == 1 && a == 1 && size == 0) {
+ return 1;
+ }
+ if (nregs == 3 && a == 1) {
+ return 1;
+ }
+ addr = tcg_temp_new_i32();
+ load_reg_var(s, addr, rn);
+ if (nregs == 1) {
+ /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
+ tmp = gen_load_and_replicate(s, addr, size);
+ tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
+ tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
+ if (insn & (1 << 5)) {
+ tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
+ tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
+ }
+ tcg_temp_free_i32(tmp);
+ } else {
+ /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
+ stride = (insn & (1 << 5)) ? 2 : 1;
+ for (reg = 0; reg < nregs; reg++) {
+ tmp = gen_load_and_replicate(s, addr, size);
+ tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
+ tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
+ tcg_temp_free_i32(tmp);
+ tcg_gen_addi_i32(addr, addr, 1 << size);
+ rd += stride;
+ }
+ }
+ tcg_temp_free_i32(addr);
+ stride = (1 << size) * nregs;
+ } else {
+ /* Single element. */
+ int idx = (insn >> 4) & 0xf;
+ pass = (insn >> 7) & 1;
+ switch (size) {
+ case 0:
+ shift = ((insn >> 5) & 3) * 8;
+ stride = 1;
+ break;
+ case 1:
+ shift = ((insn >> 6) & 1) * 16;
+ stride = (insn & (1 << 5)) ? 2 : 1;
+ break;
+ case 2:
+ shift = 0;
+ stride = (insn & (1 << 6)) ? 2 : 1;
+ break;
+ default:
+ abort();
+ }
+ nregs = ((insn >> 8) & 3) + 1;
+ /* Catch the UNDEF cases. This is unavoidably a bit messy. */
+ switch (nregs) {
+ case 1:
+ if (((idx & (1 << size)) != 0) ||
+ (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
+ return 1;
+ }
+ break;
+ case 3:
+ if ((idx & 1) != 0) {
+ return 1;
+ }
+ /* fall through */
+ case 2:
+ if (size == 2 && (idx & 2) != 0) {
+ return 1;
+ }
+ break;
+ case 4:
+ if ((size == 2) && ((idx & 3) == 3)) {
+ return 1;
+ }
+ break;
+ default:
+ abort();
+ }
+ if ((rd + stride * (nregs - 1)) > 31) {
+ /* Attempts to write off the end of the register file
+ * are UNPREDICTABLE; we choose to UNDEF because otherwise
+ * the neon_load_reg() would write off the end of the array.
+ */
+ return 1;
+ }
+ addr = tcg_temp_new_i32();
+ load_reg_var(s, addr, rn);
+ for (reg = 0; reg < nregs; reg++) {
+ if (load) {
+ tmp = tcg_temp_new_i32();
+ switch (size) {
+ case 0:
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 1:
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 2:
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ break;
+ default: /* Avoid compiler warnings. */
+ abort();
+ }
+ if (size != 2) {
+ tmp2 = neon_load_reg(rd, pass);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp,
+ shift, size ? 16 : 8);
+ tcg_temp_free_i32(tmp2);
+ }
+ neon_store_reg(rd, pass, tmp);
+ } else { /* Store */
+ tmp = neon_load_reg(rd, pass);
+ if (shift)
+ tcg_gen_shri_i32(tmp, tmp, shift);
+ switch (size) {
+ case 0:
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ break;
+ case 1:
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ break;
+ case 2:
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ break;
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ rd += stride;
+ tcg_gen_addi_i32(addr, addr, 1 << size);
+ }
+ tcg_temp_free_i32(addr);
+ stride = nregs * (1 << size);
+ }
+ }
+ if (rm != 15) {
+ TCGv_i32 base;
+
+ base = load_reg(s, rn);
+ if (rm == 13) {
+ tcg_gen_addi_i32(base, base, stride);
+ } else {
+ TCGv_i32 index;
+ index = load_reg(s, rm);
+ tcg_gen_add_i32(base, base, index);
+ tcg_temp_free_i32(index);
+ }
+ store_reg(s, rn, base);
+ }
+ return 0;
+}
+
+/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
+static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
+{
+ tcg_gen_and_i32(t, t, c);
+ tcg_gen_andc_i32(f, f, c);
+ tcg_gen_or_i32(dest, t, f);
+}
+
+static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_narrow_u8(dest, src); break;
+ case 1: gen_helper_neon_narrow_u16(dest, src); break;
+ case 2: tcg_gen_extrl_i64_i32(dest, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
+ int q, int u)
+{
+ if (q) {
+ if (u) {
+ switch (size) {
+ case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
+ case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
+ case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
+ default: abort();
+ }
+ }
+ } else {
+ if (u) {
+ switch (size) {
+ case 1: gen_helper_neon_shl_u16(var, var, shift); break;
+ case 2: gen_helper_neon_shl_u32(var, var, shift); break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 1: gen_helper_neon_shl_s16(var, var, shift); break;
+ case 2: gen_helper_neon_shl_s32(var, var, shift); break;
+ default: abort();
+ }
+ }
+ }
+}
+
+static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
+{
+ if (u) {
+ switch (size) {
+ case 0: gen_helper_neon_widen_u8(dest, src); break;
+ case 1: gen_helper_neon_widen_u16(dest, src); break;
+ case 2: tcg_gen_extu_i32_i64(dest, src); break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 0: gen_helper_neon_widen_s8(dest, src); break;
+ case 1: gen_helper_neon_widen_s16(dest, src); break;
+ case 2: tcg_gen_ext_i32_i64(dest, src); break;
+ default: abort();
+ }
+ }
+ tcg_temp_free_i32(src);
+}
+
+static inline void gen_neon_addl(int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_addl_u16(CPU_V001); break;
+ case 1: gen_helper_neon_addl_u32(CPU_V001); break;
+ case 2: tcg_gen_add_i64(CPU_V001); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_subl(int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_subl_u16(CPU_V001); break;
+ case 1: gen_helper_neon_subl_u32(CPU_V001); break;
+ case 2: tcg_gen_sub_i64(CPU_V001); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_negl(TCGv_i64 var, int size)
+{
+ switch (size) {
+ case 0: gen_helper_neon_negl_u16(var, var); break;
+ case 1: gen_helper_neon_negl_u32(var, var); break;
+ case 2:
+ tcg_gen_neg_i64(var, var);
+ break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
+{
+ switch (size) {
+ case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
+ case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
+ default: abort();
+ }
+}
+
+static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
+ int size, int u)
+{
+ TCGv_i64 tmp;
+
+ switch ((size << 1) | u) {
+ case 0: gen_helper_neon_mull_s8(dest, a, b); break;
+ case 1: gen_helper_neon_mull_u8(dest, a, b); break;
+ case 2: gen_helper_neon_mull_s16(dest, a, b); break;
+ case 3: gen_helper_neon_mull_u16(dest, a, b); break;
+ case 4:
+ tmp = gen_muls_i64_i32(a, b);
+ tcg_gen_mov_i64(dest, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ case 5:
+ tmp = gen_mulu_i64_i32(a, b);
+ tcg_gen_mov_i64(dest, tmp);
+ tcg_temp_free_i64(tmp);
+ break;
+ default: abort();
+ }
+
+ /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
+ Don't forget to clean them now. */
+ if (size < 2) {
+ tcg_temp_free_i32(a);
+ tcg_temp_free_i32(b);
+ }
+}
+
+static void gen_neon_narrow_op(int op, int u, int size,
+ TCGv_i32 dest, TCGv_i64 src)
+{
+ if (op) {
+ if (u) {
+ gen_neon_unarrow_sats(size, dest, src);
+ } else {
+ gen_neon_narrow(size, dest, src);
+ }
+ } else {
+ if (u) {
+ gen_neon_narrow_satu(size, dest, src);
+ } else {
+ gen_neon_narrow_sats(size, dest, src);
+ }
+ }
+}
+
+/* Symbolic constants for op fields for Neon 3-register same-length.
+ * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
+ * table A7-9.
+ */
+#define NEON_3R_VHADD 0
+#define NEON_3R_VQADD 1
+#define NEON_3R_VRHADD 2
+#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
+#define NEON_3R_VHSUB 4
+#define NEON_3R_VQSUB 5
+#define NEON_3R_VCGT 6
+#define NEON_3R_VCGE 7
+#define NEON_3R_VSHL 8
+#define NEON_3R_VQSHL 9
+#define NEON_3R_VRSHL 10
+#define NEON_3R_VQRSHL 11
+#define NEON_3R_VMAX 12
+#define NEON_3R_VMIN 13
+#define NEON_3R_VABD 14
+#define NEON_3R_VABA 15
+#define NEON_3R_VADD_VSUB 16
+#define NEON_3R_VTST_VCEQ 17
+#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
+#define NEON_3R_VMUL 19
+#define NEON_3R_VPMAX 20
+#define NEON_3R_VPMIN 21
+#define NEON_3R_VQDMULH_VQRDMULH 22
+#define NEON_3R_VPADD 23
+#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
+#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
+#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
+#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
+#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
+#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
+#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
+#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
+
+static const uint8_t neon_3r_sizes[] = {
+ [NEON_3R_VHADD] = 0x7,
+ [NEON_3R_VQADD] = 0xf,
+ [NEON_3R_VRHADD] = 0x7,
+ [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
+ [NEON_3R_VHSUB] = 0x7,
+ [NEON_3R_VQSUB] = 0xf,
+ [NEON_3R_VCGT] = 0x7,
+ [NEON_3R_VCGE] = 0x7,
+ [NEON_3R_VSHL] = 0xf,
+ [NEON_3R_VQSHL] = 0xf,
+ [NEON_3R_VRSHL] = 0xf,
+ [NEON_3R_VQRSHL] = 0xf,
+ [NEON_3R_VMAX] = 0x7,
+ [NEON_3R_VMIN] = 0x7,
+ [NEON_3R_VABD] = 0x7,
+ [NEON_3R_VABA] = 0x7,
+ [NEON_3R_VADD_VSUB] = 0xf,
+ [NEON_3R_VTST_VCEQ] = 0x7,
+ [NEON_3R_VML] = 0x7,
+ [NEON_3R_VMUL] = 0x7,
+ [NEON_3R_VPMAX] = 0x7,
+ [NEON_3R_VPMIN] = 0x7,
+ [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
+ [NEON_3R_VPADD] = 0x7,
+ [NEON_3R_SHA] = 0xf, /* size field encodes op type */
+ [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
+ [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
+ [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
+ [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
+ [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
+ [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
+ [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
+};
+
+/* Symbolic constants for op fields for Neon 2-register miscellaneous.
+ * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
+ * table A7-13.
+ */
+#define NEON_2RM_VREV64 0
+#define NEON_2RM_VREV32 1
+#define NEON_2RM_VREV16 2
+#define NEON_2RM_VPADDL 4
+#define NEON_2RM_VPADDL_U 5
+#define NEON_2RM_AESE 6 /* Includes AESD */
+#define NEON_2RM_AESMC 7 /* Includes AESIMC */
+#define NEON_2RM_VCLS 8
+#define NEON_2RM_VCLZ 9
+#define NEON_2RM_VCNT 10
+#define NEON_2RM_VMVN 11
+#define NEON_2RM_VPADAL 12
+#define NEON_2RM_VPADAL_U 13
+#define NEON_2RM_VQABS 14
+#define NEON_2RM_VQNEG 15
+#define NEON_2RM_VCGT0 16
+#define NEON_2RM_VCGE0 17
+#define NEON_2RM_VCEQ0 18
+#define NEON_2RM_VCLE0 19
+#define NEON_2RM_VCLT0 20
+#define NEON_2RM_SHA1H 21
+#define NEON_2RM_VABS 22
+#define NEON_2RM_VNEG 23
+#define NEON_2RM_VCGT0_F 24
+#define NEON_2RM_VCGE0_F 25
+#define NEON_2RM_VCEQ0_F 26
+#define NEON_2RM_VCLE0_F 27
+#define NEON_2RM_VCLT0_F 28
+#define NEON_2RM_VABS_F 30
+#define NEON_2RM_VNEG_F 31
+#define NEON_2RM_VSWP 32
+#define NEON_2RM_VTRN 33
+#define NEON_2RM_VUZP 34
+#define NEON_2RM_VZIP 35
+#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
+#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
+#define NEON_2RM_VSHLL 38
+#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
+#define NEON_2RM_VRINTN 40
+#define NEON_2RM_VRINTX 41
+#define NEON_2RM_VRINTA 42
+#define NEON_2RM_VRINTZ 43
+#define NEON_2RM_VCVT_F16_F32 44
+#define NEON_2RM_VRINTM 45
+#define NEON_2RM_VCVT_F32_F16 46
+#define NEON_2RM_VRINTP 47
+#define NEON_2RM_VCVTAU 48
+#define NEON_2RM_VCVTAS 49
+#define NEON_2RM_VCVTNU 50
+#define NEON_2RM_VCVTNS 51
+#define NEON_2RM_VCVTPU 52
+#define NEON_2RM_VCVTPS 53
+#define NEON_2RM_VCVTMU 54
+#define NEON_2RM_VCVTMS 55
+#define NEON_2RM_VRECPE 56
+#define NEON_2RM_VRSQRTE 57
+#define NEON_2RM_VRECPE_F 58
+#define NEON_2RM_VRSQRTE_F 59
+#define NEON_2RM_VCVT_FS 60
+#define NEON_2RM_VCVT_FU 61
+#define NEON_2RM_VCVT_SF 62
+#define NEON_2RM_VCVT_UF 63
+
+static int neon_2rm_is_float_op(int op)
+{
+ /* Return true if this neon 2reg-misc op is float-to-float */
+ return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
+ (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
+ op == NEON_2RM_VRINTM ||
+ (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
+ op >= NEON_2RM_VRECPE_F);
+}
+
+static bool neon_2rm_is_v8_op(int op)
+{
+ /* Return true if this neon 2reg-misc op is ARMv8 and up */
+ switch (op) {
+ case NEON_2RM_VRINTN:
+ case NEON_2RM_VRINTA:
+ case NEON_2RM_VRINTM:
+ case NEON_2RM_VRINTP:
+ case NEON_2RM_VRINTZ:
+ case NEON_2RM_VRINTX:
+ case NEON_2RM_VCVTAU:
+ case NEON_2RM_VCVTAS:
+ case NEON_2RM_VCVTNU:
+ case NEON_2RM_VCVTNS:
+ case NEON_2RM_VCVTPU:
+ case NEON_2RM_VCVTPS:
+ case NEON_2RM_VCVTMU:
+ case NEON_2RM_VCVTMS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Each entry in this array has bit n set if the insn allows
+ * size value n (otherwise it will UNDEF). Since unallocated
+ * op values will have no bits set they always UNDEF.
+ */
+static const uint8_t neon_2rm_sizes[] = {
+ [NEON_2RM_VREV64] = 0x7,
+ [NEON_2RM_VREV32] = 0x3,
+ [NEON_2RM_VREV16] = 0x1,
+ [NEON_2RM_VPADDL] = 0x7,
+ [NEON_2RM_VPADDL_U] = 0x7,
+ [NEON_2RM_AESE] = 0x1,
+ [NEON_2RM_AESMC] = 0x1,
+ [NEON_2RM_VCLS] = 0x7,
+ [NEON_2RM_VCLZ] = 0x7,
+ [NEON_2RM_VCNT] = 0x1,
+ [NEON_2RM_VMVN] = 0x1,
+ [NEON_2RM_VPADAL] = 0x7,
+ [NEON_2RM_VPADAL_U] = 0x7,
+ [NEON_2RM_VQABS] = 0x7,
+ [NEON_2RM_VQNEG] = 0x7,
+ [NEON_2RM_VCGT0] = 0x7,
+ [NEON_2RM_VCGE0] = 0x7,
+ [NEON_2RM_VCEQ0] = 0x7,
+ [NEON_2RM_VCLE0] = 0x7,
+ [NEON_2RM_VCLT0] = 0x7,
+ [NEON_2RM_SHA1H] = 0x4,
+ [NEON_2RM_VABS] = 0x7,
+ [NEON_2RM_VNEG] = 0x7,
+ [NEON_2RM_VCGT0_F] = 0x4,
+ [NEON_2RM_VCGE0_F] = 0x4,
+ [NEON_2RM_VCEQ0_F] = 0x4,
+ [NEON_2RM_VCLE0_F] = 0x4,
+ [NEON_2RM_VCLT0_F] = 0x4,
+ [NEON_2RM_VABS_F] = 0x4,
+ [NEON_2RM_VNEG_F] = 0x4,
+ [NEON_2RM_VSWP] = 0x1,
+ [NEON_2RM_VTRN] = 0x7,
+ [NEON_2RM_VUZP] = 0x7,
+ [NEON_2RM_VZIP] = 0x7,
+ [NEON_2RM_VMOVN] = 0x7,
+ [NEON_2RM_VQMOVN] = 0x7,
+ [NEON_2RM_VSHLL] = 0x7,
+ [NEON_2RM_SHA1SU1] = 0x4,
+ [NEON_2RM_VRINTN] = 0x4,
+ [NEON_2RM_VRINTX] = 0x4,
+ [NEON_2RM_VRINTA] = 0x4,
+ [NEON_2RM_VRINTZ] = 0x4,
+ [NEON_2RM_VCVT_F16_F32] = 0x2,
+ [NEON_2RM_VRINTM] = 0x4,
+ [NEON_2RM_VCVT_F32_F16] = 0x2,
+ [NEON_2RM_VRINTP] = 0x4,
+ [NEON_2RM_VCVTAU] = 0x4,
+ [NEON_2RM_VCVTAS] = 0x4,
+ [NEON_2RM_VCVTNU] = 0x4,
+ [NEON_2RM_VCVTNS] = 0x4,
+ [NEON_2RM_VCVTPU] = 0x4,
+ [NEON_2RM_VCVTPS] = 0x4,
+ [NEON_2RM_VCVTMU] = 0x4,
+ [NEON_2RM_VCVTMS] = 0x4,
+ [NEON_2RM_VRECPE] = 0x4,
+ [NEON_2RM_VRSQRTE] = 0x4,
+ [NEON_2RM_VRECPE_F] = 0x4,
+ [NEON_2RM_VRSQRTE_F] = 0x4,
+ [NEON_2RM_VCVT_FS] = 0x4,
+ [NEON_2RM_VCVT_FU] = 0x4,
+ [NEON_2RM_VCVT_SF] = 0x4,
+ [NEON_2RM_VCVT_UF] = 0x4,
+};
+
+/* Translate a NEON data processing instruction. Return nonzero if the
+ instruction is invalid.
+ We process data in a mixture of 32-bit and 64-bit chunks.
+ Mostly we use 32-bit chunks so we can use normal scalar instructions. */
+
+static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
+{
+ int op;
+ int q;
+ int rd, rn, rm;
+ int size;
+ int shift;
+ int pass;
+ int count;
+ int pairwise;
+ int u;
+ uint32_t imm, mask;
+ TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
+ TCGv_i64 tmp64;
+
+ /* FIXME: this access check should not take precedence over UNDEF
+ * for invalid encodings; we will generate incorrect syndrome information
+ * for attempts to execute invalid vfp/neon encodings with FP disabled.
+ */
+ if (s->fp_excp_el) {
+ gen_exception_insn(s, 4, EXCP_UDEF,
+ syn_fp_access_trap(1, 0xe, false), s->fp_excp_el);
+ return 0;
+ }
+
+ if (!s->vfp_enabled)
+ return 1;
+ q = (insn & (1 << 6)) != 0;
+ u = (insn >> 24) & 1;
+ VFP_DREG_D(rd, insn);
+ VFP_DREG_N(rn, insn);
+ VFP_DREG_M(rm, insn);
+ size = (insn >> 20) & 3;
+ if ((insn & (1 << 23)) == 0) {
+ /* Three register same length. */
+ op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
+ /* Catch invalid op and bad size combinations: UNDEF */
+ if ((neon_3r_sizes[op] & (1 << size)) == 0) {
+ return 1;
+ }
+ /* All insns of this form UNDEF for either this condition or the
+ * superset of cases "Q==1"; we catch the latter later.
+ */
+ if (q && ((rd | rn | rm) & 1)) {
+ return 1;
+ }
+ /*
+ * The SHA-1/SHA-256 3-register instructions require special treatment
+ * here, as their size field is overloaded as an op type selector, and
+ * they all consume their input in a single pass.
+ */
+ if (op == NEON_3R_SHA) {
+ if (!q) {
+ return 1;
+ }
+ if (!u) { /* SHA-1 */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rn);
+ tmp3 = tcg_const_i32(rm);
+ tmp4 = tcg_const_i32(size);
+ gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
+ tcg_temp_free_i32(tmp4);
+ } else { /* SHA-256 */
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rn);
+ tmp3 = tcg_const_i32(rm);
+ switch (size) {
+ case 0:
+ gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
+ break;
+ case 1:
+ gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
+ break;
+ case 2:
+ gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
+ break;
+ }
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ return 0;
+ }
+ if (size == 3 && op != NEON_3R_LOGIC) {
+ /* 64-bit element instructions. */
+ for (pass = 0; pass < (q ? 2 : 1); pass++) {
+ neon_load_reg64(cpu_V0, rn + pass);
+ neon_load_reg64(cpu_V1, rm + pass);
+ switch (op) {
+ case NEON_3R_VQADD:
+ if (u) {
+ gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ } else {
+ gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ }
+ break;
+ case NEON_3R_VQSUB:
+ if (u) {
+ gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ } else {
+ gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ }
+ break;
+ case NEON_3R_VSHL:
+ if (u) {
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
+ }
+ break;
+ case NEON_3R_VQSHL:
+ if (u) {
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
+ }
+ break;
+ case NEON_3R_VRSHL:
+ if (u) {
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
+ }
+ break;
+ case NEON_3R_VQRSHL:
+ if (u) {
+ gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
+ } else {
+ gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
+ }
+ break;
+ case NEON_3R_VADD_VSUB:
+ if (u) {
+ tcg_gen_sub_i64(CPU_V001);
+ } else {
+ tcg_gen_add_i64(CPU_V001);
+ }
+ break;
+ default:
+ abort();
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ return 0;
+ }
+ pairwise = 0;
+ switch (op) {
+ case NEON_3R_VSHL:
+ case NEON_3R_VQSHL:
+ case NEON_3R_VRSHL:
+ case NEON_3R_VQRSHL:
+ {
+ int rtmp;
+ /* Shift instruction operands are reversed. */
+ rtmp = rn;
+ rn = rm;
+ rm = rtmp;
+ }
+ break;
+ case NEON_3R_VPADD:
+ if (u) {
+ return 1;
+ }
+ /* Fall through */
+ case NEON_3R_VPMAX:
+ case NEON_3R_VPMIN:
+ pairwise = 1;
+ break;
+ case NEON_3R_FLOAT_ARITH:
+ pairwise = (u && size < 2); /* if VPADD (float) */
+ break;
+ case NEON_3R_FLOAT_MINMAX:
+ pairwise = u; /* if VPMIN/VPMAX (float) */
+ break;
+ case NEON_3R_FLOAT_CMP:
+ if (!u && size) {
+ /* no encoding for U=0 C=1x */
+ return 1;
+ }
+ break;
+ case NEON_3R_FLOAT_ACMP:
+ if (!u) {
+ return 1;
+ }
+ break;
+ case NEON_3R_FLOAT_MISC:
+ /* VMAXNM/VMINNM in ARMv8 */
+ if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return 1;
+ }
+ break;
+ case NEON_3R_VMUL:
+ if (u && (size != 0)) {
+ /* UNDEF on invalid size for polynomial subcase */
+ return 1;
+ }
+ break;
+ case NEON_3R_VFM:
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
+ return 1;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (pairwise && q) {
+ /* All the pairwise insns UNDEF if Q is set */
+ return 1;
+ }
+
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+
+ if (pairwise) {
+ /* Pairwise. */
+ if (pass < 1) {
+ tmp = neon_load_reg(rn, 0);
+ tmp2 = neon_load_reg(rn, 1);
+ } else {
+ tmp = neon_load_reg(rm, 0);
+ tmp2 = neon_load_reg(rm, 1);
+ }
+ } else {
+ /* Elementwise. */
+ tmp = neon_load_reg(rn, pass);
+ tmp2 = neon_load_reg(rm, pass);
+ }
+ switch (op) {
+ case NEON_3R_VHADD:
+ GEN_NEON_INTEGER_OP(hadd);
+ break;
+ case NEON_3R_VQADD:
+ GEN_NEON_INTEGER_OP_ENV(qadd);
+ break;
+ case NEON_3R_VRHADD:
+ GEN_NEON_INTEGER_OP(rhadd);
+ break;
+ case NEON_3R_LOGIC: /* Logic ops. */
+ switch ((u << 2) | size) {
+ case 0: /* VAND */
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ break;
+ case 1: /* BIC */
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ break;
+ case 2: /* VORR */
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ break;
+ case 3: /* VORN */
+ tcg_gen_orc_i32(tmp, tmp, tmp2);
+ break;
+ case 4: /* VEOR */
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ break;
+ case 5: /* VBSL */
+ tmp3 = neon_load_reg(rd, pass);
+ gen_neon_bsl(tmp, tmp, tmp2, tmp3);
+ tcg_temp_free_i32(tmp3);
+ break;
+ case 6: /* VBIT */
+ tmp3 = neon_load_reg(rd, pass);
+ gen_neon_bsl(tmp, tmp, tmp3, tmp2);
+ tcg_temp_free_i32(tmp3);
+ break;
+ case 7: /* VBIF */
+ tmp3 = neon_load_reg(rd, pass);
+ gen_neon_bsl(tmp, tmp3, tmp, tmp2);
+ tcg_temp_free_i32(tmp3);
+ break;
+ }
+ break;
+ case NEON_3R_VHSUB:
+ GEN_NEON_INTEGER_OP(hsub);
+ break;
+ case NEON_3R_VQSUB:
+ GEN_NEON_INTEGER_OP_ENV(qsub);
+ break;
+ case NEON_3R_VCGT:
+ GEN_NEON_INTEGER_OP(cgt);
+ break;
+ case NEON_3R_VCGE:
+ GEN_NEON_INTEGER_OP(cge);
+ break;
+ case NEON_3R_VSHL:
+ GEN_NEON_INTEGER_OP(shl);
+ break;
+ case NEON_3R_VQSHL:
+ GEN_NEON_INTEGER_OP_ENV(qshl);
+ break;
+ case NEON_3R_VRSHL:
+ GEN_NEON_INTEGER_OP(rshl);
+ break;
+ case NEON_3R_VQRSHL:
+ GEN_NEON_INTEGER_OP_ENV(qrshl);
+ break;
+ case NEON_3R_VMAX:
+ GEN_NEON_INTEGER_OP(max);
+ break;
+ case NEON_3R_VMIN:
+ GEN_NEON_INTEGER_OP(min);
+ break;
+ case NEON_3R_VABD:
+ GEN_NEON_INTEGER_OP(abd);
+ break;
+ case NEON_3R_VABA:
+ GEN_NEON_INTEGER_OP(abd);
+ tcg_temp_free_i32(tmp2);
+ tmp2 = neon_load_reg(rd, pass);
+ gen_neon_add(size, tmp, tmp2);
+ break;
+ case NEON_3R_VADD_VSUB:
+ if (!u) { /* VADD */
+ gen_neon_add(size, tmp, tmp2);
+ } else { /* VSUB */
+ switch (size) {
+ case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
+ case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ }
+ break;
+ case NEON_3R_VTST_VCEQ:
+ if (!u) { /* VTST */
+ switch (size) {
+ case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
+ case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ } else { /* VCEQ */
+ switch (size) {
+ case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
+ case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ }
+ break;
+ case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
+ switch (size) {
+ case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+ case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ tcg_temp_free_i32(tmp2);
+ tmp2 = neon_load_reg(rd, pass);
+ if (u) { /* VMLS */
+ gen_neon_rsb(size, tmp, tmp2);
+ } else { /* VMLA */
+ gen_neon_add(size, tmp, tmp2);
+ }
+ break;
+ case NEON_3R_VMUL:
+ if (u) { /* polynomial */
+ gen_helper_neon_mul_p8(tmp, tmp, tmp2);
+ } else { /* Integer */
+ switch (size) {
+ case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+ case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ }
+ break;
+ case NEON_3R_VPMAX:
+ GEN_NEON_INTEGER_OP(pmax);
+ break;
+ case NEON_3R_VPMIN:
+ GEN_NEON_INTEGER_OP(pmin);
+ break;
+ case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
+ if (!u) { /* VQDMULH */
+ switch (size) {
+ case 1:
+ gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ break;
+ default: abort();
+ }
+ } else { /* VQRDMULH */
+ switch (size) {
+ case 1:
+ gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ break;
+ default: abort();
+ }
+ }
+ break;
+ case NEON_3R_VPADD:
+ switch (size) {
+ case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
+ case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ break;
+ case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ switch ((u << 2) | size) {
+ case 0: /* VADD */
+ case 4: /* VPADD */
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
+ break;
+ case 2: /* VSUB */
+ gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
+ break;
+ case 6: /* VABD */
+ gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
+ break;
+ default:
+ abort();
+ }
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_3R_FLOAT_MULTIPLY:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
+ if (!u) {
+ tcg_temp_free_i32(tmp2);
+ tmp2 = neon_load_reg(rd, pass);
+ if (size == 0) {
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
+ }
+ }
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_3R_FLOAT_CMP:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (!u) {
+ gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ if (size == 0) {
+ gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
+ }
+ }
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_3R_FLOAT_ACMP:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_3R_FLOAT_MINMAX:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_3R_FLOAT_MISC:
+ if (u) {
+ /* VMAXNM/VMINNM */
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
+ } else {
+ if (size == 0) {
+ gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
+ } else {
+ gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
+ }
+ }
+ break;
+ case NEON_3R_VFM:
+ {
+ /* VFMA, VFMS: fused multiply-add */
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ TCGv_i32 tmp3 = neon_load_reg(rd, pass);
+ if (size) {
+ /* VFMS */
+ gen_helper_vfp_negs(tmp, tmp);
+ }
+ gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ default:
+ abort();
+ }
+ tcg_temp_free_i32(tmp2);
+
+ /* Save the result. For elementwise operations we can put it
+ straight into the destination register. For pairwise operations
+ we have to be careful to avoid clobbering the source operands. */
+ if (pairwise && rd == rm) {
+ neon_store_scratch(pass, tmp);
+ } else {
+ neon_store_reg(rd, pass, tmp);
+ }
+
+ } /* for pass */
+ if (pairwise && rd == rm) {
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ tmp = neon_load_scratch(pass);
+ neon_store_reg(rd, pass, tmp);
+ }
+ }
+ /* End of 3 register same size operations. */
+ } else if (insn & (1 << 4)) {
+ if ((insn & 0x00380080) != 0) {
+ /* Two registers and shift. */
+ op = (insn >> 8) & 0xf;
+ if (insn & (1 << 7)) {
+ /* 64-bit shift. */
+ if (op > 7) {
+ return 1;
+ }
+ size = 3;
+ } else {
+ size = 2;
+ while ((insn & (1 << (size + 19))) == 0)
+ size--;
+ }
+ shift = (insn >> 16) & ((1 << (3 + size)) - 1);
+ /* To avoid excessive duplication of ops we implement shift
+ by immediate using the variable shift operations. */
+ if (op < 8) {
+ /* Shift by immediate:
+ VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
+ if (q && ((rd | rm) & 1)) {
+ return 1;
+ }
+ if (!u && (op == 4 || op == 6)) {
+ return 1;
+ }
+ /* Right shifts are encoded as N - shift, where N is the
+ element size in bits. */
+ if (op <= 4)
+ shift = shift - (1 << (size + 3));
+ if (size == 3) {
+ count = q + 1;
+ } else {
+ count = q ? 4: 2;
+ }
+ switch (size) {
+ case 0:
+ imm = (uint8_t) shift;
+ imm |= imm << 8;
+ imm |= imm << 16;
+ break;
+ case 1:
+ imm = (uint16_t) shift;
+ imm |= imm << 16;
+ break;
+ case 2:
+ case 3:
+ imm = shift;
+ break;
+ default:
+ abort();
+ }
+
+ for (pass = 0; pass < count; pass++) {
+ if (size == 3) {
+ neon_load_reg64(cpu_V0, rm + pass);
+ tcg_gen_movi_i64(cpu_V1, imm);
+ switch (op) {
+ case 0: /* VSHR */
+ case 1: /* VSRA */
+ if (u)
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+ else
+ gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 2: /* VRSHR */
+ case 3: /* VRSRA */
+ if (u)
+ gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
+ else
+ gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 4: /* VSRI */
+ case 5: /* VSHL, VSLI */
+ gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
+ break;
+ case 6: /* VQSHLU */
+ gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ break;
+ case 7: /* VQSHL */
+ if (u) {
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ } else {
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
+ }
+ break;
+ }
+ if (op == 1 || op == 3) {
+ /* Accumulate. */
+ neon_load_reg64(cpu_V1, rd + pass);
+ tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
+ } else if (op == 4 || (op == 5 && u)) {
+ /* Insert */
+ neon_load_reg64(cpu_V1, rd + pass);
+ uint64_t mask;
+ if (shift < -63 || shift > 63) {
+ mask = 0;
+ } else {
+ if (op == 4) {
+ mask = 0xffffffffffffffffull >> -shift;
+ } else {
+ mask = 0xffffffffffffffffull << shift;
+ }
+ }
+ tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ } else { /* size < 3 */
+ /* Operands in T0 and T1. */
+ tmp = neon_load_reg(rm, pass);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, imm);
+ switch (op) {
+ case 0: /* VSHR */
+ case 1: /* VSRA */
+ GEN_NEON_INTEGER_OP(shl);
+ break;
+ case 2: /* VRSHR */
+ case 3: /* VRSRA */
+ GEN_NEON_INTEGER_OP(rshl);
+ break;
+ case 4: /* VSRI */
+ case 5: /* VSHL, VSLI */
+ switch (size) {
+ case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
+ case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ break;
+ case 6: /* VQSHLU */
+ switch (size) {
+ case 0:
+ gen_helper_neon_qshlu_s8(tmp, cpu_env,
+ tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_qshlu_s16(tmp, cpu_env,
+ tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qshlu_s32(tmp, cpu_env,
+ tmp, tmp2);
+ break;
+ default:
+ abort();
+ }
+ break;
+ case 7: /* VQSHL */
+ GEN_NEON_INTEGER_OP_ENV(qshl);
+ break;
+ }
+ tcg_temp_free_i32(tmp2);
+
+ if (op == 1 || op == 3) {
+ /* Accumulate. */
+ tmp2 = neon_load_reg(rd, pass);
+ gen_neon_add(size, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else if (op == 4 || (op == 5 && u)) {
+ /* Insert */
+ switch (size) {
+ case 0:
+ if (op == 4)
+ mask = 0xff >> -shift;
+ else
+ mask = (uint8_t)(0xff << shift);
+ mask |= mask << 8;
+ mask |= mask << 16;
+ break;
+ case 1:
+ if (op == 4)
+ mask = 0xffff >> -shift;
+ else
+ mask = (uint16_t)(0xffff << shift);
+ mask |= mask << 16;
+ break;
+ case 2:
+ if (shift < -31 || shift > 31) {
+ mask = 0;
+ } else {
+ if (op == 4)
+ mask = 0xffffffffu >> -shift;
+ else
+ mask = 0xffffffffu << shift;
+ }
+ break;
+ default:
+ abort();
+ }
+ tmp2 = neon_load_reg(rd, pass);
+ tcg_gen_andi_i32(tmp, tmp, mask);
+ tcg_gen_andi_i32(tmp2, tmp2, ~mask);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ neon_store_reg(rd, pass, tmp);
+ }
+ } /* for pass */
+ } else if (op < 10) {
+ /* Shift by immediate and narrow:
+ VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
+ int input_unsigned = (op == 8) ? !u : u;
+ if (rm & 1) {
+ return 1;
+ }
+ shift = shift - (1 << (size + 3));
+ size++;
+ if (size == 3) {
+ tmp64 = tcg_const_i64(shift);
+ neon_load_reg64(cpu_V0, rm);
+ neon_load_reg64(cpu_V1, rm + 1);
+ for (pass = 0; pass < 2; pass++) {
+ TCGv_i64 in;
+ if (pass == 0) {
+ in = cpu_V0;
+ } else {
+ in = cpu_V1;
+ }
+ if (q) {
+ if (input_unsigned) {
+ gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
+ } else {
+ gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
+ }
+ } else {
+ if (input_unsigned) {
+ gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
+ } else {
+ gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
+ }
+ }
+ tmp = tcg_temp_new_i32();
+ gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
+ neon_store_reg(rd, pass, tmp);
+ } /* for pass */
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (size == 1) {
+ imm = (uint16_t)shift;
+ imm |= imm << 16;
+ } else {
+ /* size == 2 */
+ imm = (uint32_t)shift;
+ }
+ tmp2 = tcg_const_i32(imm);
+ tmp4 = neon_load_reg(rm + 1, 0);
+ tmp5 = neon_load_reg(rm + 1, 1);
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 0) {
+ tmp = neon_load_reg(rm, 0);
+ } else {
+ tmp = tmp4;
+ }
+ gen_neon_shift_narrow(size, tmp, tmp2, q,
+ input_unsigned);
+ if (pass == 0) {
+ tmp3 = neon_load_reg(rm, 1);
+ } else {
+ tmp3 = tmp5;
+ }
+ gen_neon_shift_narrow(size, tmp3, tmp2, q,
+ input_unsigned);
+ tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp3);
+ tmp = tcg_temp_new_i32();
+ gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
+ neon_store_reg(rd, pass, tmp);
+ } /* for pass */
+ tcg_temp_free_i32(tmp2);
+ }
+ } else if (op == 10) {
+ /* VSHLL, VMOVL */
+ if (q || (rd & 1)) {
+ return 1;
+ }
+ tmp = neon_load_reg(rm, 0);
+ tmp2 = neon_load_reg(rm, 1);
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 1)
+ tmp = tmp2;
+
+ gen_neon_widen(cpu_V0, tmp, size, u);
+
+ if (shift != 0) {
+ /* The shift is less than the width of the source
+ type, so we can just shift the whole register. */
+ tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
+ /* Widen the result of shift: we need to clear
+ * the potential overflow bits resulting from
+ * left bits of the narrow input appearing as
+ * right bits of left the neighbour narrow
+ * input. */
+ if (size < 2 || !u) {
+ uint64_t imm64;
+ if (size == 0) {
+ imm = (0xffu >> (8 - shift));
+ imm |= imm << 16;
+ } else if (size == 1) {
+ imm = 0xffff >> (16 - shift);
+ } else {
+ /* size == 2 */
+ imm = 0xffffffff >> (32 - shift);
+ }
+ if (size < 2) {
+ imm64 = imm | (((uint64_t)imm) << 32);
+ } else {
+ imm64 = imm;
+ }
+ tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
+ }
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ } else if (op >= 14) {
+ /* VCVT fixed-point. */
+ if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
+ return 1;
+ }
+ /* We have already masked out the must-be-1 top bit of imm6,
+ * hence this 32-shift where the ARM ARM has 64-imm6.
+ */
+ shift = 32 - shift;
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
+ if (!(op & 1)) {
+ if (u)
+ gen_vfp_ulto(0, shift, 1);
+ else
+ gen_vfp_slto(0, shift, 1);
+ } else {
+ if (u)
+ gen_vfp_toul(0, shift, 1);
+ else
+ gen_vfp_tosl(0, shift, 1);
+ }
+ tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
+ }
+ } else {
+ return 1;
+ }
+ } else { /* (insn & 0x00380080) == 0 */
+ int invert;
+ if (q && (rd & 1)) {
+ return 1;
+ }
+
+ op = (insn >> 8) & 0xf;
+ /* One register and immediate. */
+ imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
+ invert = (insn & (1 << 5)) != 0;
+ /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
+ * We choose to not special-case this and will behave as if a
+ * valid constant encoding of 0 had been given.
+ */
+ switch (op) {
+ case 0: case 1:
+ /* no-op */
+ break;
+ case 2: case 3:
+ imm <<= 8;
+ break;
+ case 4: case 5:
+ imm <<= 16;
+ break;
+ case 6: case 7:
+ imm <<= 24;
+ break;
+ case 8: case 9:
+ imm |= imm << 16;
+ break;
+ case 10: case 11:
+ imm = (imm << 8) | (imm << 24);
+ break;
+ case 12:
+ imm = (imm << 8) | 0xff;
+ break;
+ case 13:
+ imm = (imm << 16) | 0xffff;
+ break;
+ case 14:
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
+ if (invert)
+ imm = ~imm;
+ break;
+ case 15:
+ if (invert) {
+ return 1;
+ }
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
+ break;
+ }
+ if (invert)
+ imm = ~imm;
+
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ if (op & 1 && op < 12) {
+ tmp = neon_load_reg(rd, pass);
+ if (invert) {
+ /* The immediate value has already been inverted, so
+ BIC becomes AND. */
+ tcg_gen_andi_i32(tmp, tmp, imm);
+ } else {
+ tcg_gen_ori_i32(tmp, tmp, imm);
+ }
+ } else {
+ /* VMOV, VMVN. */
+ tmp = tcg_temp_new_i32();
+ if (op == 14 && invert) {
+ int n;
+ uint32_t val;
+ val = 0;
+ for (n = 0; n < 4; n++) {
+ if (imm & (1 << (n + (pass & 1) * 4)))
+ val |= 0xff << (n * 8);
+ }
+ tcg_gen_movi_i32(tmp, val);
+ } else {
+ tcg_gen_movi_i32(tmp, imm);
+ }
+ }
+ neon_store_reg(rd, pass, tmp);
+ }
+ }
+ } else { /* (insn & 0x00800010 == 0x00800000) */
+ if (size != 3) {
+ op = (insn >> 8) & 0xf;
+ if ((insn & (1 << 6)) == 0) {
+ /* Three registers of different lengths. */
+ int src1_wide;
+ int src2_wide;
+ int prewiden;
+ /* undefreq: bit 0 : UNDEF if size == 0
+ * bit 1 : UNDEF if size == 1
+ * bit 2 : UNDEF if size == 2
+ * bit 3 : UNDEF if U == 1
+ * Note that [2:0] set implies 'always UNDEF'
+ */
+ int undefreq;
+ /* prewiden, src1_wide, src2_wide, undefreq */
+ static const int neon_3reg_wide[16][4] = {
+ {1, 0, 0, 0}, /* VADDL */
+ {1, 1, 0, 0}, /* VADDW */
+ {1, 0, 0, 0}, /* VSUBL */
+ {1, 1, 0, 0}, /* VSUBW */
+ {0, 1, 1, 0}, /* VADDHN */
+ {0, 0, 0, 0}, /* VABAL */
+ {0, 1, 1, 0}, /* VSUBHN */
+ {0, 0, 0, 0}, /* VABDL */
+ {0, 0, 0, 0}, /* VMLAL */
+ {0, 0, 0, 9}, /* VQDMLAL */
+ {0, 0, 0, 0}, /* VMLSL */
+ {0, 0, 0, 9}, /* VQDMLSL */
+ {0, 0, 0, 0}, /* Integer VMULL */
+ {0, 0, 0, 1}, /* VQDMULL */
+ {0, 0, 0, 0xa}, /* Polynomial VMULL */
+ {0, 0, 0, 7}, /* Reserved: always UNDEF */
+ };
+
+ prewiden = neon_3reg_wide[op][0];
+ src1_wide = neon_3reg_wide[op][1];
+ src2_wide = neon_3reg_wide[op][2];
+ undefreq = neon_3reg_wide[op][3];
+
+ if ((undefreq & (1 << size)) ||
+ ((undefreq & 8) && u)) {
+ return 1;
+ }
+ if ((src1_wide && (rn & 1)) ||
+ (src2_wide && (rm & 1)) ||
+ (!src2_wide && (rd & 1))) {
+ return 1;
+ }
+
+ /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
+ * outside the loop below as it only performs a single pass.
+ */
+ if (op == 14 && size == 2) {
+ TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
+ return 1;
+ }
+ tcg_rn = tcg_temp_new_i64();
+ tcg_rm = tcg_temp_new_i64();
+ tcg_rd = tcg_temp_new_i64();
+ neon_load_reg64(tcg_rn, rn);
+ neon_load_reg64(tcg_rm, rm);
+ gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
+ neon_store_reg64(tcg_rd, rd);
+ gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
+ neon_store_reg64(tcg_rd, rd + 1);
+ tcg_temp_free_i64(tcg_rn);
+ tcg_temp_free_i64(tcg_rm);
+ tcg_temp_free_i64(tcg_rd);
+ return 0;
+ }
+
+ /* Avoid overlapping operands. Wide source operands are
+ always aligned so will never overlap with wide
+ destinations in problematic ways. */
+ if (rd == rm && !src2_wide) {
+ tmp = neon_load_reg(rm, 1);
+ neon_store_scratch(2, tmp);
+ } else if (rd == rn && !src1_wide) {
+ tmp = neon_load_reg(rn, 1);
+ neon_store_scratch(2, tmp);
+ }
+ TCGV_UNUSED_I32(tmp3);
+ for (pass = 0; pass < 2; pass++) {
+ if (src1_wide) {
+ neon_load_reg64(cpu_V0, rn + pass);
+ TCGV_UNUSED_I32(tmp);
+ } else {
+ if (pass == 1 && rd == rn) {
+ tmp = neon_load_scratch(2);
+ } else {
+ tmp = neon_load_reg(rn, pass);
+ }
+ if (prewiden) {
+ gen_neon_widen(cpu_V0, tmp, size, u);
+ }
+ }
+ if (src2_wide) {
+ neon_load_reg64(cpu_V1, rm + pass);
+ TCGV_UNUSED_I32(tmp2);
+ } else {
+ if (pass == 1 && rd == rm) {
+ tmp2 = neon_load_scratch(2);
+ } else {
+ tmp2 = neon_load_reg(rm, pass);
+ }
+ if (prewiden) {
+ gen_neon_widen(cpu_V1, tmp2, size, u);
+ }
+ }
+ switch (op) {
+ case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
+ gen_neon_addl(size);
+ break;
+ case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
+ gen_neon_subl(size);
+ break;
+ case 5: case 7: /* VABAL, VABDL */
+ switch ((size << 1) | u) {
+ case 0:
+ gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
+ break;
+ case 1:
+ gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
+ break;
+ case 3:
+ gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
+ break;
+ case 4:
+ gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
+ break;
+ case 5:
+ gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
+ break;
+ default: abort();
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 8: case 9: case 10: case 11: case 12: case 13:
+ /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
+ gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
+ break;
+ case 14: /* Polynomial VMULL */
+ gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ default: /* 15 is RESERVED: caught earlier */
+ abort();
+ }
+ if (op == 13) {
+ /* VQDMULL */
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ neon_store_reg64(cpu_V0, rd + pass);
+ } else if (op == 5 || (op >= 8 && op <= 11)) {
+ /* Accumulate. */
+ neon_load_reg64(cpu_V1, rd + pass);
+ switch (op) {
+ case 10: /* VMLSL */
+ gen_neon_negl(cpu_V0, size);
+ /* Fall through */
+ case 5: case 8: /* VABAL, VMLAL */
+ gen_neon_addl(size);
+ break;
+ case 9: case 11: /* VQDMLAL, VQDMLSL */
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ if (op == 11) {
+ gen_neon_negl(cpu_V0, size);
+ }
+ gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
+ break;
+ default:
+ abort();
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ } else if (op == 4 || op == 6) {
+ /* Narrowing operation. */
+ tmp = tcg_temp_new_i32();
+ if (!u) {
+ switch (size) {
+ case 0:
+ gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
+ break;
+ case 1:
+ gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
+ break;
+ case 2:
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
+ break;
+ default: abort();
+ }
+ } else {
+ switch (size) {
+ case 0:
+ gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
+ break;
+ case 1:
+ gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
+ break;
+ case 2:
+ tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
+ tcg_gen_extrl_i64_i32(tmp, cpu_V0);
+ break;
+ default: abort();
+ }
+ }
+ if (pass == 0) {
+ tmp3 = tmp;
+ } else {
+ neon_store_reg(rd, 0, tmp3);
+ neon_store_reg(rd, 1, tmp);
+ }
+ } else {
+ /* Write back the result. */
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ }
+ } else {
+ /* Two registers and a scalar. NB that for ops of this form
+ * the ARM ARM labels bit 24 as Q, but it is in our variable
+ * 'u', not 'q'.
+ */
+ if (size == 0) {
+ return 1;
+ }
+ switch (op) {
+ case 1: /* Float VMLA scalar */
+ case 5: /* Floating point VMLS scalar */
+ case 9: /* Floating point VMUL scalar */
+ if (size == 1) {
+ return 1;
+ }
+ /* fall through */
+ case 0: /* Integer VMLA scalar */
+ case 4: /* Integer VMLS scalar */
+ case 8: /* Integer VMUL scalar */
+ case 12: /* VQDMULH scalar */
+ case 13: /* VQRDMULH scalar */
+ if (u && ((rd | rn) & 1)) {
+ return 1;
+ }
+ tmp = neon_get_scalar(size, rm);
+ neon_store_scratch(0, tmp);
+ for (pass = 0; pass < (u ? 4 : 2); pass++) {
+ tmp = neon_load_scratch(0);
+ tmp2 = neon_load_reg(rn, pass);
+ if (op == 12) {
+ if (size == 1) {
+ gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ }
+ } else if (op == 13) {
+ if (size == 1) {
+ gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ }
+ } else if (op & 1) {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ } else {
+ switch (size) {
+ case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
+ case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ }
+ tcg_temp_free_i32(tmp2);
+ if (op < 8) {
+ /* Accumulate. */
+ tmp2 = neon_load_reg(rd, pass);
+ switch (op) {
+ case 0:
+ gen_neon_add(size, tmp, tmp2);
+ break;
+ case 1:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case 4:
+ gen_neon_rsb(size, tmp, tmp2);
+ break;
+ case 5:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ default:
+ abort();
+ }
+ tcg_temp_free_i32(tmp2);
+ }
+ neon_store_reg(rd, pass, tmp);
+ }
+ break;
+ case 3: /* VQDMLAL scalar */
+ case 7: /* VQDMLSL scalar */
+ case 11: /* VQDMULL scalar */
+ if (u == 1) {
+ return 1;
+ }
+ /* fall through */
+ case 2: /* VMLAL sclar */
+ case 6: /* VMLSL scalar */
+ case 10: /* VMULL scalar */
+ if (rd & 1) {
+ return 1;
+ }
+ tmp2 = neon_get_scalar(size, rm);
+ /* We need a copy of tmp2 because gen_neon_mull
+ * deletes it during pass 0. */
+ tmp4 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp4, tmp2);
+ tmp3 = neon_load_reg(rn, 1);
+
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 0) {
+ tmp = neon_load_reg(rn, 0);
+ } else {
+ tmp = tmp3;
+ tmp2 = tmp4;
+ }
+ gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
+ if (op != 11) {
+ neon_load_reg64(cpu_V1, rd + pass);
+ }
+ switch (op) {
+ case 6:
+ gen_neon_negl(cpu_V0, size);
+ /* Fall through */
+ case 2:
+ gen_neon_addl(size);
+ break;
+ case 3: case 7:
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ if (op == 7) {
+ gen_neon_negl(cpu_V0, size);
+ }
+ gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
+ break;
+ case 10:
+ /* no-op */
+ break;
+ case 11:
+ gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
+ break;
+ default:
+ abort();
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+
+
+ break;
+ default: /* 14 and 15 are RESERVED */
+ return 1;
+ }
+ }
+ } else { /* size == 3 */
+ if (!u) {
+ /* Extract. */
+ imm = (insn >> 8) & 0xf;
+
+ if (imm > 7 && !q)
+ return 1;
+
+ if (q && ((rd | rn | rm) & 1)) {
+ return 1;
+ }
+
+ if (imm == 0) {
+ neon_load_reg64(cpu_V0, rn);
+ if (q) {
+ neon_load_reg64(cpu_V1, rn + 1);
+ }
+ } else if (imm == 8) {
+ neon_load_reg64(cpu_V0, rn + 1);
+ if (q) {
+ neon_load_reg64(cpu_V1, rm);
+ }
+ } else if (q) {
+ tmp64 = tcg_temp_new_i64();
+ if (imm < 8) {
+ neon_load_reg64(cpu_V0, rn);
+ neon_load_reg64(tmp64, rn + 1);
+ } else {
+ neon_load_reg64(cpu_V0, rn + 1);
+ neon_load_reg64(tmp64, rm);
+ }
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
+ tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ if (imm < 8) {
+ neon_load_reg64(cpu_V1, rm);
+ } else {
+ neon_load_reg64(cpu_V1, rm + 1);
+ imm -= 8;
+ }
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
+ tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
+ tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
+ tcg_temp_free_i64(tmp64);
+ } else {
+ /* BUGFIX */
+ neon_load_reg64(cpu_V0, rn);
+ tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
+ neon_load_reg64(cpu_V1, rm);
+ tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
+ tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
+ }
+ neon_store_reg64(cpu_V0, rd);
+ if (q) {
+ neon_store_reg64(cpu_V1, rd + 1);
+ }
+ } else if ((insn & (1 << 11)) == 0) {
+ /* Two register misc. */
+ op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
+ size = (insn >> 18) & 3;
+ /* UNDEF for unknown op values and bad op-size combinations */
+ if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
+ return 1;
+ }
+ if (neon_2rm_is_v8_op(op) &&
+ !arm_dc_feature(s, ARM_FEATURE_V8)) {
+ return 1;
+ }
+ if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
+ q && ((rm | rd) & 1)) {
+ return 1;
+ }
+ switch (op) {
+ case NEON_2RM_VREV64:
+ for (pass = 0; pass < (q ? 2 : 1); pass++) {
+ tmp = neon_load_reg(rm, pass * 2);
+ tmp2 = neon_load_reg(rm, pass * 2 + 1);
+ switch (size) {
+ case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
+ case 1: gen_swap_half(tmp); break;
+ case 2: /* no-op */ break;
+ default: abort();
+ }
+ neon_store_reg(rd, pass * 2 + 1, tmp);
+ if (size == 2) {
+ neon_store_reg(rd, pass * 2, tmp2);
+ } else {
+ switch (size) {
+ case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
+ case 1: gen_swap_half(tmp2); break;
+ default: abort();
+ }
+ neon_store_reg(rd, pass * 2, tmp2);
+ }
+ }
+ break;
+ case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
+ case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
+ for (pass = 0; pass < q + 1; pass++) {
+ tmp = neon_load_reg(rm, pass * 2);
+ gen_neon_widen(cpu_V0, tmp, size, op & 1);
+ tmp = neon_load_reg(rm, pass * 2 + 1);
+ gen_neon_widen(cpu_V1, tmp, size, op & 1);
+ switch (size) {
+ case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
+ case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
+ case 2: tcg_gen_add_i64(CPU_V001); break;
+ default: abort();
+ }
+ if (op >= NEON_2RM_VPADAL) {
+ /* Accumulate. */
+ neon_load_reg64(cpu_V1, rd + pass);
+ gen_neon_addl(size);
+ }
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ break;
+ case NEON_2RM_VTRN:
+ if (size == 2) {
+ int n;
+ for (n = 0; n < (q ? 4 : 2); n += 2) {
+ tmp = neon_load_reg(rm, n);
+ tmp2 = neon_load_reg(rd, n + 1);
+ neon_store_reg(rm, n, tmp2);
+ neon_store_reg(rd, n + 1, tmp);
+ }
+ } else {
+ goto elementwise;
+ }
+ break;
+ case NEON_2RM_VUZP:
+ if (gen_neon_unzip(rd, rm, size, q)) {
+ return 1;
+ }
+ break;
+ case NEON_2RM_VZIP:
+ if (gen_neon_zip(rd, rm, size, q)) {
+ return 1;
+ }
+ break;
+ case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
+ /* also VQMOVUN; op field and mnemonics don't line up */
+ if (rm & 1) {
+ return 1;
+ }
+ TCGV_UNUSED_I32(tmp2);
+ for (pass = 0; pass < 2; pass++) {
+ neon_load_reg64(cpu_V0, rm + pass);
+ tmp = tcg_temp_new_i32();
+ gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
+ tmp, cpu_V0);
+ if (pass == 0) {
+ tmp2 = tmp;
+ } else {
+ neon_store_reg(rd, 0, tmp2);
+ neon_store_reg(rd, 1, tmp);
+ }
+ }
+ break;
+ case NEON_2RM_VSHLL:
+ if (q || (rd & 1)) {
+ return 1;
+ }
+ tmp = neon_load_reg(rm, 0);
+ tmp2 = neon_load_reg(rm, 1);
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 1)
+ tmp = tmp2;
+ gen_neon_widen(cpu_V0, tmp, size, 1);
+ tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
+ neon_store_reg64(cpu_V0, rd + pass);
+ }
+ break;
+ case NEON_2RM_VCVT_F16_F32:
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
+ q || (rm & 1)) {
+ return 1;
+ }
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
+ gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
+ gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_or_i32(tmp2, tmp2, tmp);
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
+ gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
+ tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
+ neon_store_reg(rd, 0, tmp2);
+ tmp2 = tcg_temp_new_i32();
+ gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
+ tcg_gen_shli_i32(tmp2, tmp2, 16);
+ tcg_gen_or_i32(tmp2, tmp2, tmp);
+ neon_store_reg(rd, 1, tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case NEON_2RM_VCVT_F32_F16:
+ if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
+ q || (rd & 1)) {
+ return 1;
+ }
+ tmp3 = tcg_temp_new_i32();
+ tmp = neon_load_reg(rm, 0);
+ tmp2 = neon_load_reg(rm, 1);
+ tcg_gen_ext16u_i32(tmp3, tmp);
+ gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
+ tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
+ tcg_gen_shri_i32(tmp3, tmp, 16);
+ gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
+ tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
+ tcg_temp_free_i32(tmp);
+ tcg_gen_ext16u_i32(tmp3, tmp2);
+ gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
+ tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
+ tcg_gen_shri_i32(tmp3, tmp2, 16);
+ gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
+ tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ break;
+ case NEON_2RM_AESE: case NEON_2RM_AESMC:
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
+ || ((rm | rd) & 1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+
+ /* Bit 6 is the lowest opcode bit; it distinguishes between
+ * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
+ */
+ tmp3 = tcg_const_i32(extract32(insn, 6, 1));
+
+ if (op == NEON_2RM_AESE) {
+ gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
+ } else {
+ gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ break;
+ case NEON_2RM_SHA1H:
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
+ || ((rm | rd) & 1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+
+ gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
+
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case NEON_2RM_SHA1SU1:
+ if ((rm | rd) & 1) {
+ return 1;
+ }
+ /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
+ if (q) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
+ return 1;
+ }
+ } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
+ return 1;
+ }
+ tmp = tcg_const_i32(rd);
+ tmp2 = tcg_const_i32(rm);
+ if (q) {
+ gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
+ } else {
+ gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ default:
+ elementwise:
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ if (neon_2rm_is_float_op(op)) {
+ tcg_gen_ld_f32(cpu_F0s, cpu_env,
+ neon_reg_offset(rm, pass));
+ TCGV_UNUSED_I32(tmp);
+ } else {
+ tmp = neon_load_reg(rm, pass);
+ }
+ switch (op) {
+ case NEON_2RM_VREV32:
+ switch (size) {
+ case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
+ case 1: gen_swap_half(tmp); break;
+ default: abort();
+ }
+ break;
+ case NEON_2RM_VREV16:
+ gen_rev16(tmp);
+ break;
+ case NEON_2RM_VCLS:
+ switch (size) {
+ case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
+ case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
+ case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
+ default: abort();
+ }
+ break;
+ case NEON_2RM_VCLZ:
+ switch (size) {
+ case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
+ case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
+ case 2: gen_helper_clz(tmp, tmp); break;
+ default: abort();
+ }
+ break;
+ case NEON_2RM_VCNT:
+ gen_helper_neon_cnt_u8(tmp, tmp);
+ break;
+ case NEON_2RM_VMVN:
+ tcg_gen_not_i32(tmp, tmp);
+ break;
+ case NEON_2RM_VQABS:
+ switch (size) {
+ case 0:
+ gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
+ break;
+ case 2:
+ gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
+ break;
+ default: abort();
+ }
+ break;
+ case NEON_2RM_VQNEG:
+ switch (size) {
+ case 0:
+ gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
+ break;
+ case 2:
+ gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
+ break;
+ default: abort();
+ }
+ break;
+ case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
+ tmp2 = tcg_const_i32(0);
+ switch(size) {
+ case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
+ case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ tcg_temp_free_i32(tmp2);
+ if (op == NEON_2RM_VCLE0) {
+ tcg_gen_not_i32(tmp, tmp);
+ }
+ break;
+ case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
+ tmp2 = tcg_const_i32(0);
+ switch(size) {
+ case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
+ case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ tcg_temp_free_i32(tmp2);
+ if (op == NEON_2RM_VCLT0) {
+ tcg_gen_not_i32(tmp, tmp);
+ }
+ break;
+ case NEON_2RM_VCEQ0:
+ tmp2 = tcg_const_i32(0);
+ switch(size) {
+ case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
+ case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
+ case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
+ default: abort();
+ }
+ tcg_temp_free_i32(tmp2);
+ break;
+ case NEON_2RM_VABS:
+ switch(size) {
+ case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
+ case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
+ case 2: tcg_gen_abs_i32(tmp, tmp); break;
+ default: abort();
+ }
+ break;
+ case NEON_2RM_VNEG:
+ tmp2 = tcg_const_i32(0);
+ gen_neon_rsb(size, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case NEON_2RM_VCGT0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ tmp2 = tcg_const_i32(0);
+ gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VCGE0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ tmp2 = tcg_const_i32(0);
+ gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VCEQ0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ tmp2 = tcg_const_i32(0);
+ gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VCLE0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ tmp2 = tcg_const_i32(0);
+ gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VCLT0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ tmp2 = tcg_const_i32(0);
+ gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VABS_F:
+ gen_vfp_abs(0);
+ break;
+ case NEON_2RM_VNEG_F:
+ gen_vfp_neg(0);
+ break;
+ case NEON_2RM_VSWP:
+ tmp2 = neon_load_reg(rd, pass);
+ neon_store_reg(rm, pass, tmp2);
+ break;
+ case NEON_2RM_VTRN:
+ tmp2 = neon_load_reg(rd, pass);
+ switch (size) {
+ case 0: gen_neon_trn_u8(tmp, tmp2); break;
+ case 1: gen_neon_trn_u16(tmp, tmp2); break;
+ default: abort();
+ }
+ neon_store_reg(rm, pass, tmp2);
+ break;
+ case NEON_2RM_VRINTN:
+ case NEON_2RM_VRINTA:
+ case NEON_2RM_VRINTM:
+ case NEON_2RM_VRINTP:
+ case NEON_2RM_VRINTZ:
+ {
+ TCGv_i32 tcg_rmode;
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ int rmode;
+
+ if (op == NEON_2RM_VRINTZ) {
+ rmode = FPROUNDING_ZERO;
+ } else {
+ rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
+ }
+
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
+ cpu_env);
+ gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
+ gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
+ cpu_env);
+ tcg_temp_free_ptr(fpstatus);
+ tcg_temp_free_i32(tcg_rmode);
+ break;
+ }
+ case NEON_2RM_VRINTX:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VCVTAU:
+ case NEON_2RM_VCVTAS:
+ case NEON_2RM_VCVTNU:
+ case NEON_2RM_VCVTNS:
+ case NEON_2RM_VCVTPU:
+ case NEON_2RM_VCVTPS:
+ case NEON_2RM_VCVTMU:
+ case NEON_2RM_VCVTMS:
+ {
+ bool is_signed = !extract32(insn, 7, 1);
+ TCGv_ptr fpst = get_fpstatus_ptr(1);
+ TCGv_i32 tcg_rmode, tcg_shift;
+ int rmode = fp_decode_rm[extract32(insn, 8, 2)];
+
+ tcg_shift = tcg_const_i32(0);
+ tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
+ gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
+ cpu_env);
+
+ if (is_signed) {
+ gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
+ tcg_shift, fpst);
+ } else {
+ gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
+ tcg_shift, fpst);
+ }
+
+ gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
+ cpu_env);
+ tcg_temp_free_i32(tcg_rmode);
+ tcg_temp_free_i32(tcg_shift);
+ tcg_temp_free_ptr(fpst);
+ break;
+ }
+ case NEON_2RM_VRECPE:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_recpe_u32(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VRSQRTE:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VRECPE_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VRSQRTE_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
+ case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
+ gen_vfp_sito(0, 1);
+ break;
+ case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
+ gen_vfp_uito(0, 1);
+ break;
+ case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
+ gen_vfp_tosiz(0, 1);
+ break;
+ case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
+ gen_vfp_touiz(0, 1);
+ break;
+ default:
+ /* Reserved op values were caught by the
+ * neon_2rm_sizes[] check earlier.
+ */
+ abort();
+ }
+ if (neon_2rm_is_float_op(op)) {
+ tcg_gen_st_f32(cpu_F0s, cpu_env,
+ neon_reg_offset(rd, pass));
+ } else {
+ neon_store_reg(rd, pass, tmp);
+ }
+ }
+ break;
+ }
+ } else if ((insn & (1 << 10)) == 0) {
+ /* VTBL, VTBX. */
+ int n = ((insn >> 8) & 3) + 1;
+ if ((rn + n) > 32) {
+ /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
+ * helper function running off the end of the register file.
+ */
+ return 1;
+ }
+ n <<= 3;
+ if (insn & (1 << 6)) {
+ tmp = neon_load_reg(rd, 0);
+ } else {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ }
+ tmp2 = neon_load_reg(rm, 0);
+ tmp4 = tcg_const_i32(rn);
+ tmp5 = tcg_const_i32(n);
+ gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
+ tcg_temp_free_i32(tmp);
+ if (insn & (1 << 6)) {
+ tmp = neon_load_reg(rd, 1);
+ } else {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ }
+ tmp3 = neon_load_reg(rm, 1);
+ gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
+ tcg_temp_free_i32(tmp5);
+ tcg_temp_free_i32(tmp4);
+ neon_store_reg(rd, 0, tmp2);
+ neon_store_reg(rd, 1, tmp3);
+ tcg_temp_free_i32(tmp);
+ } else if ((insn & 0x380) == 0) {
+ /* VDUP */
+ if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
+ return 1;
+ }
+ if (insn & (1 << 19)) {
+ tmp = neon_load_reg(rm, 1);
+ } else {
+ tmp = neon_load_reg(rm, 0);
+ }
+ if (insn & (1 << 16)) {
+ gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
+ } else if (insn & (1 << 17)) {
+ if ((insn >> 18) & 1)
+ gen_neon_dup_high16(tmp);
+ else
+ gen_neon_dup_low16(tmp);
+ }
+ for (pass = 0; pass < (q ? 4 : 2); pass++) {
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp2, tmp);
+ neon_store_reg(rd, pass, tmp2);
+ }
+ tcg_temp_free_i32(tmp);
+ } else {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static int disas_coproc_insn(DisasContext *s, uint32_t insn)
+{
+ int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
+ const ARMCPRegInfo *ri;
+
+ cpnum = (insn >> 8) & 0xf;
+
+ /* First check for coprocessor space used for XScale/iwMMXt insns */
+ if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
+ if (extract32(s->c15_cpar, cpnum, 1) == 0) {
+ return 1;
+ }
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
+ return disas_iwmmxt_insn(s, insn);
+ } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
+ return disas_dsp_insn(s, insn);
+ }
+ return 1;
+ }
+
+ /* Otherwise treat as a generic register access */
+ is64 = (insn & (1 << 25)) == 0;
+ if (!is64 && ((insn & (1 << 4)) == 0)) {
+ /* cdp */
+ return 1;
+ }
+
+ crm = insn & 0xf;
+ if (is64) {
+ crn = 0;
+ opc1 = (insn >> 4) & 0xf;
+ opc2 = 0;
+ rt2 = (insn >> 16) & 0xf;
+ } else {
+ crn = (insn >> 16) & 0xf;
+ opc1 = (insn >> 21) & 7;
+ opc2 = (insn >> 5) & 7;
+ rt2 = 0;
+ }
+ isread = (insn >> 20) & 1;
+ rt = (insn >> 12) & 0xf;
+
+ ri = get_arm_cp_reginfo(s->cp_regs,
+ ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
+ if (ri) {
+ /* Check access permissions */
+ if (!cp_access_ok(s->current_el, ri, isread)) {
+ return 1;
+ }
+
+ if (ri->accessfn ||
+ (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
+ /* Emit code to perform further access permissions checks at
+ * runtime; this may result in an exception.
+ * Note that on XScale all cp0..c13 registers do an access check
+ * call in order to handle c15_cpar.
+ */
+ TCGv_ptr tmpptr;
+ TCGv_i32 tcg_syn, tcg_isread;
+ uint32_t syndrome;
+
+ /* Note that since we are an implementation which takes an
+ * exception on a trapped conditional instruction only if the
+ * instruction passes its condition code check, we can take
+ * advantage of the clause in the ARM ARM that allows us to set
+ * the COND field in the instruction to 0xE in all cases.
+ * We could fish the actual condition out of the insn (ARM)
+ * or the condexec bits (Thumb) but it isn't necessary.
+ */
+ switch (cpnum) {
+ case 14:
+ if (is64) {
+ syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, false);
+ } else {
+ syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, false);
+ }
+ break;
+ case 15:
+ if (is64) {
+ syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
+ isread, false);
+ } else {
+ syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
+ rt, isread, false);
+ }
+ break;
+ default:
+ /* ARMv8 defines that only coprocessors 14 and 15 exist,
+ * so this can only happen if this is an ARMv7 or earlier CPU,
+ * in which case the syndrome information won't actually be
+ * guest visible.
+ */
+ assert(!arm_dc_feature(s, ARM_FEATURE_V8));
+ syndrome = syn_uncategorized();
+ break;
+ }
+
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
+ tmpptr = tcg_const_ptr(ri);
+ tcg_syn = tcg_const_i32(syndrome);
+ tcg_isread = tcg_const_i32(isread);
+ gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
+ tcg_isread);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tcg_syn);
+ tcg_temp_free_i32(tcg_isread);
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return 0;
+ case ARM_CP_WFI:
+ if (isread) {
+ return 1;
+ }
+ gen_set_pc_im(s, s->pc);
+ s->is_jmp = DISAS_WFI;
+ return 0;
+ default:
+ break;
+ }
+
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ gen_io_start();
+ }
+
+ if (isread) {
+ /* Read */
+ if (is64) {
+ TCGv_i64 tmp64;
+ TCGv_i32 tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp64 = tcg_const_i64(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ tmp64 = tcg_temp_new_i64();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ store_reg(s, rt, tmp);
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
+ store_reg(s, rt2, tmp);
+ } else {
+ TCGv_i32 tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp = tcg_const_i32(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ tmp = tcg_temp_new_i32();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp = load_cpu_offset(ri->fieldoffset);
+ }
+ if (rt == 15) {
+ /* Destination register of r15 for 32 bit loads sets
+ * the condition codes from the high 4 bits of the value
+ */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, rt, tmp);
+ }
+ }
+ } else {
+ /* Write */
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return 0;
+ }
+
+ if (is64) {
+ TCGv_i32 tmplo, tmphi;
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+ tmplo = load_reg(s, rt);
+ tmphi = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
+ tcg_temp_free_i32(tmplo);
+ tcg_temp_free_i32(tmphi);
+ if (ri->writefn) {
+ TCGv_ptr tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (ri->writefn) {
+ TCGv_i32 tmp;
+ TCGv_ptr tmpptr;
+ tmp = load_reg(s, rt);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tmp);
+ } else {
+ TCGv_i32 tmp = load_reg(s, rt);
+ store_cpu_offset(tmp, ri->fieldoffset);
+ }
+ }
+ }
+
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ /* I/O operations must end the TB here (whether read or write) */
+ gen_io_end();
+ gen_lookup_tb(s);
+ } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ /* We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ gen_lookup_tb(s);
+ }
+
+ return 0;
+ }
+
+ /* Unknown register; this might be a guest error or a QEMU
+ * unimplemented feature.
+ */
+ if (is64) {
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
+ "64 bit system register cp:%d opc1: %d crm:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crm,
+ s->ns ? "non-secure" : "secure");
+ } else {
+ qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
+ "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
+ s->ns ? "non-secure" : "secure");
+ }
+
+ return 1;
+}
+
+
+/* Store a 64-bit value to a register pair. Clobbers val. */
+static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
+{
+ TCGv_i32 tmp;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, val);
+ store_reg(s, rlow, tmp);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_shri_i64(val, val, 32);
+ tcg_gen_extrl_i64_i32(tmp, val);
+ store_reg(s, rhigh, tmp);
+}
+
+/* load a 32-bit value from a register and perform a 64-bit accumulate. */
+static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmp2;
+
+ /* Load value and extend to 64 bits. */
+ tmp = tcg_temp_new_i64();
+ tmp2 = load_reg(s, rlow);
+ tcg_gen_extu_i32_i64(tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_gen_add_i64(val, val, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* load and add a 64-bit value from a register pair. */
+static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
+{
+ TCGv_i64 tmp;
+ TCGv_i32 tmpl;
+ TCGv_i32 tmph;
+
+ /* Load 64-bit value rd:rn. */
+ tmpl = load_reg(s, rlow);
+ tmph = load_reg(s, rhigh);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
+ tcg_temp_free_i32(tmpl);
+ tcg_temp_free_i32(tmph);
+ tcg_gen_add_i64(val, val, tmp);
+ tcg_temp_free_i64(tmp);
+}
+
+/* Set N and Z flags from hi|lo. */
+static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
+{
+ tcg_gen_mov_i32(cpu_NF, hi);
+ tcg_gen_or_i32(cpu_ZF, lo, hi);
+}
+
+/* Load/Store exclusive instructions are implemented by remembering
+ the value/address loaded, and seeing if these are the same
+ when the store is performed. This should be sufficient to implement
+ the architecturally mandated semantics, and avoids having to monitor
+ regular stores. The compare vs the remembered value is done during
+ the cmpxchg operation, but we must compare the addresses manually. */
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv_i32 addr, int size)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ TCGMemOp opc = size | MO_ALIGN | s->be_data;
+
+ s->is_ldex = true;
+
+ if (size == 3) {
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
+ tcg_gen_mov_i64(cpu_exclusive_val, t64);
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ tcg_temp_free_i64(t64);
+
+ store_reg(s, rt2, tmp2);
+ } else {
+ gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
+ tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
+ }
+
+ store_reg(s, rt, tmp);
+ tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
+}
+
+static void gen_clrex(DisasContext *s)
+{
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv_i32 addr, int size)
+{
+ TCGv_i32 t0, t1, t2;
+ TCGv_i64 extaddr;
+ TCGv taddr;
+ TCGLabel *done_label;
+ TCGLabel *fail_label;
+ TCGMemOp opc = size | MO_ALIGN | s->be_data;
+
+ /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
+ [addr] = {Rt};
+ {Rd} = 0;
+ } else {
+ {Rd} = 1;
+ } */
+ fail_label = gen_new_label();
+ done_label = gen_new_label();
+ extaddr = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(extaddr, addr);
+ tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
+ tcg_temp_free_i64(extaddr);
+
+ taddr = gen_aa32_addr(s, addr, opc);
+ t0 = tcg_temp_new_i32();
+ t1 = load_reg(s, rt);
+ if (size == 3) {
+ TCGv_i64 o64 = tcg_temp_new_i64();
+ TCGv_i64 n64 = tcg_temp_new_i64();
+
+ t2 = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(n64, t1, t2);
+ tcg_temp_free_i32(t2);
+ gen_aa32_frob64(s, n64);
+
+ tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
+ get_mem_index(s), opc);
+ tcg_temp_free_i64(n64);
+
+ gen_aa32_frob64(s, o64);
+ tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
+ tcg_gen_extrl_i64_i32(t0, o64);
+
+ tcg_temp_free_i64(o64);
+ } else {
+ t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
+ tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
+ tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
+ tcg_temp_free_i32(t2);
+ }
+ tcg_temp_free_i32(t1);
+ tcg_temp_free(taddr);
+ tcg_gen_mov_i32(cpu_R[rd], t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_br(done_label);
+
+ gen_set_label(fail_label);
+ tcg_gen_movi_i32(cpu_R[rd], 1);
+ gen_set_label(done_label);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
+}
+
+/* gen_srs:
+ * @env: CPUARMState
+ * @s: DisasContext
+ * @mode: mode field from insn (which stack to store to)
+ * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
+ * @writeback: true if writeback bit set
+ *
+ * Generate code for the SRS (Store Return State) insn.
+ */
+static void gen_srs(DisasContext *s,
+ uint32_t mode, uint32_t amode, bool writeback)
+{
+ int32_t offset;
+ TCGv_i32 addr, tmp;
+ bool undef = false;
+
+ /* SRS is:
+ * - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
+ * and specified mode is monitor mode
+ * - UNDEFINED in Hyp mode
+ * - UNPREDICTABLE in User or System mode
+ * - UNPREDICTABLE if the specified mode is:
+ * -- not implemented
+ * -- not a valid mode number
+ * -- a mode that's at a higher exception level
+ * -- Monitor, if we are Non-secure
+ * For the UNPREDICTABLE cases we choose to UNDEF.
+ */
+ if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(), 3);
+ return;
+ }
+
+ if (s->current_el == 0 || s->current_el == 2) {
+ undef = true;
+ }
+
+ switch (mode) {
+ case ARM_CPU_MODE_USR:
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ break;
+ case ARM_CPU_MODE_HYP:
+ if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
+ undef = true;
+ }
+ break;
+ case ARM_CPU_MODE_MON:
+ /* No need to check specifically for "are we non-secure" because
+ * we've already made EL0 UNDEF and handled the trap for S-EL1;
+ * so if this isn't EL3 then we must be non-secure.
+ */
+ if (s->current_el != 3) {
+ undef = true;
+ }
+ break;
+ default:
+ undef = true;
+ }
+
+ if (undef) {
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+ return;
+ }
+
+ addr = tcg_temp_new_i32();
+ tmp = tcg_const_i32(mode);
+ /* get_r13_banked() will raise an exception if called from System mode */
+ gen_set_condexec(s);
+ gen_set_pc_im(s, s->pc - 4);
+ gen_helper_get_r13_banked(addr, cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ switch (amode) {
+ case 0: /* DA */
+ offset = -4;
+ break;
+ case 1: /* IA */
+ offset = 0;
+ break;
+ case 2: /* DB */
+ offset = -8;
+ break;
+ case 3: /* IB */
+ offset = 4;
+ break;
+ default:
+ abort();
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ tmp = load_reg(s, 14);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ tmp = load_cpu_field(spsr);
+ tcg_gen_addi_i32(addr, addr, 4);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ if (writeback) {
+ switch (amode) {
+ case 0:
+ offset = -8;
+ break;
+ case 1:
+ offset = 4;
+ break;
+ case 2:
+ offset = -4;
+ break;
+ case 3:
+ offset = 0;
+ break;
+ default:
+ abort();
+ }
+ tcg_gen_addi_i32(addr, addr, offset);
+ tmp = tcg_const_i32(mode);
+ gen_helper_set_r13_banked(cpu_env, tmp, addr);
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static void disas_arm_insn(DisasContext *s, unsigned int insn)
+{
+ unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
+ TCGv_i32 tmp;
+ TCGv_i32 tmp2;
+ TCGv_i32 tmp3;
+ TCGv_i32 addr;
+ TCGv_i64 tmp64;
+
+ /* M variants do not implement ARM mode. */
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ goto illegal_op;
+ }
+ cond = insn >> 28;
+ if (cond == 0xf){
+ /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
+ * choose to UNDEF. In ARMv5 and above the space is used
+ * for miscellaneous unconditional instructions.
+ */
+ ARCH(5);
+
+ /* Unconditional instructions. */
+ if (((insn >> 25) & 7) == 1) {
+ /* NEON Data processing. */
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ goto illegal_op;
+ }
+
+ if (disas_neon_data_insn(s, insn)) {
+ goto illegal_op;
+ }
+ return;
+ }
+ if ((insn & 0x0f100000) == 0x04000000) {
+ /* NEON load/store. */
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ goto illegal_op;
+ }
+
+ if (disas_neon_ls_insn(s, insn)) {
+ goto illegal_op;
+ }
+ return;
+ }
+ if ((insn & 0x0f000e10) == 0x0e000a00) {
+ /* VFP. */
+ if (disas_vfp_insn(s, insn)) {
+ goto illegal_op;
+ }
+ return;
+ }
+ if (((insn & 0x0f30f000) == 0x0510f000) ||
+ ((insn & 0x0f30f010) == 0x0710f000)) {
+ if ((insn & (1 << 22)) == 0) {
+ /* PLDW; v7MP */
+ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
+ goto illegal_op;
+ }
+ }
+ /* Otherwise PLD; v5TE+ */
+ ARCH(5TE);
+ return;
+ }
+ if (((insn & 0x0f70f000) == 0x0450f000) ||
+ ((insn & 0x0f70f010) == 0x0650f000)) {
+ ARCH(7);
+ return; /* PLI; V7 */
+ }
+ if (((insn & 0x0f700000) == 0x04100000) ||
+ ((insn & 0x0f700010) == 0x06100000)) {
+ if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
+ goto illegal_op;
+ }
+ return; /* v7MP: Unallocated memory hint: must NOP */
+ }
+
+ if ((insn & 0x0ffffdff) == 0x01010000) {
+ ARCH(6);
+ /* setend */
+ if (((insn >> 9) & 1) != !!(s->be_data == MO_BE)) {
+ gen_helper_setend(cpu_env);
+ s->is_jmp = DISAS_UPDATE;
+ }
+ return;
+ } else if ((insn & 0x0fffff00) == 0x057ff000) {
+ switch ((insn >> 4) & 0xf) {
+ case 1: /* clrex */
+ ARCH(6K);
+ gen_clrex(s);
+ return;
+ case 4: /* dsb */
+ case 5: /* dmb */
+ ARCH(7);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ return;
+ case 6: /* isb */
+ /* We need to break the TB after this insn to execute
+ * self-modifying code correctly and also to take
+ * any pending interrupts immediately.
+ */
+ gen_lookup_tb(s);
+ return;
+ default:
+ goto illegal_op;
+ }
+ } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
+ /* srs */
+ ARCH(6);
+ gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
+ return;
+ } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
+ /* rfe */
+ int32_t offset;
+ if (IS_USER(s))
+ goto illegal_op;
+ ARCH(6);
+ rn = (insn >> 16) & 0xf;
+ addr = load_reg(s, rn);
+ i = (insn >> 23) & 3;
+ switch (i) {
+ case 0: offset = -4; break; /* DA */
+ case 1: offset = 0; break; /* IA */
+ case 2: offset = -8; break; /* DB */
+ case 3: offset = 4; break; /* IB */
+ default: abort();
+ }
+ if (offset)
+ tcg_gen_addi_i32(addr, addr, offset);
+ /* Load PC into tmp and CPSR into tmp2. */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp2 = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ switch (i) {
+ case 0: offset = -8; break;
+ case 1: offset = 4; break;
+ case 2: offset = -4; break;
+ case 3: offset = 0; break;
+ default: abort();
+ }
+ if (offset)
+ tcg_gen_addi_i32(addr, addr, offset);
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ gen_rfe(s, tmp, tmp2);
+ return;
+ } else if ((insn & 0x0e000000) == 0x0a000000) {
+ /* branch link and change to thumb (blx <offset>) */
+ int32_t offset;
+
+ val = (uint32_t)s->pc;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ store_reg(s, 14, tmp);
+ /* Sign-extend the 24-bit offset */
+ offset = (((int32_t)insn) << 8) >> 8;
+ /* offset * 4 + bit24 * 2 + (thumb bit) */
+ val += (offset << 2) | ((insn >> 23) & 2) | 1;
+ /* pipeline offset */
+ val += 4;
+ /* protected by ARCH(5); above, near the start of uncond block */
+ gen_bx_im(s, val);
+ return;
+ } else if ((insn & 0x0e000f00) == 0x0c000100) {
+ if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
+ /* iWMMXt register transfer. */
+ if (extract32(s->c15_cpar, 1, 1)) {
+ if (!disas_iwmmxt_insn(s, insn)) {
+ return;
+ }
+ }
+ }
+ } else if ((insn & 0x0fe00000) == 0x0c400000) {
+ /* Coprocessor double register transfer. */
+ ARCH(5TE);
+ } else if ((insn & 0x0f000010) == 0x0e000010) {
+ /* Additional coprocessor register transfer. */
+ } else if ((insn & 0x0ff10020) == 0x01000000) {
+ uint32_t mask;
+ uint32_t val;
+ /* cps (privileged) */
+ if (IS_USER(s))
+ return;
+ mask = val = 0;
+ if (insn & (1 << 19)) {
+ if (insn & (1 << 8))
+ mask |= CPSR_A;
+ if (insn & (1 << 7))
+ mask |= CPSR_I;
+ if (insn & (1 << 6))
+ mask |= CPSR_F;
+ if (insn & (1 << 18))
+ val |= mask;
+ }
+ if (insn & (1 << 17)) {
+ mask |= CPSR_M;
+ val |= (insn & 0x1f);
+ }
+ if (mask) {
+ gen_set_psr_im(s, mask, 0, val);
+ }
+ return;
+ }
+ goto illegal_op;
+ }
+ if (cond != 0xe) {
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ s->condlabel = gen_new_label();
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
+ if ((insn & 0x0f900000) == 0x03000000) {
+ if ((insn & (1 << 21)) == 0) {
+ ARCH(6T2);
+ rd = (insn >> 12) & 0xf;
+ val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
+ if ((insn & (1 << 22)) == 0) {
+ /* MOVW */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ } else {
+ /* MOVT */
+ tmp = load_reg(s, rd);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_ori_i32(tmp, tmp, val << 16);
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ if (((insn >> 12) & 0xf) != 0xf)
+ goto illegal_op;
+ if (((insn >> 16) & 0xf) == 0) {
+ gen_nop_hint(s, insn & 0xff);
+ } else {
+ /* CPSR = immediate */
+ val = insn & 0xff;
+ shift = ((insn >> 8) & 0xf) * 2;
+ if (shift)
+ val = (val >> shift) | (val << (32 - shift));
+ i = ((insn & (1 << 22)) != 0);
+ if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
+ i, val)) {
+ goto illegal_op;
+ }
+ }
+ }
+ } else if ((insn & 0x0f900000) == 0x01000000
+ && (insn & 0x00000090) != 0x00000090) {
+ /* miscellaneous instructions */
+ op1 = (insn >> 21) & 3;
+ sh = (insn >> 4) & 0xf;
+ rm = insn & 0xf;
+ switch (sh) {
+ case 0x0: /* MSR, MRS */
+ if (insn & (1 << 9)) {
+ /* MSR (banked) and MRS (banked) */
+ int sysm = extract32(insn, 16, 4) |
+ (extract32(insn, 8, 1) << 4);
+ int r = extract32(insn, 22, 1);
+
+ if (op1 & 1) {
+ /* MSR (banked) */
+ gen_msr_banked(s, r, sysm, rm);
+ } else {
+ /* MRS (banked) */
+ int rd = extract32(insn, 12, 4);
+
+ gen_mrs_banked(s, r, sysm, rd);
+ }
+ break;
+ }
+
+ /* MSR, MRS (for PSRs) */
+ if (op1 & 1) {
+ /* PSR = reg */
+ tmp = load_reg(s, rm);
+ i = ((op1 & 2) != 0);
+ if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
+ goto illegal_op;
+ } else {
+ /* reg = PSR */
+ rd = (insn >> 12) & 0xf;
+ if (op1 & 2) {
+ if (IS_USER(s))
+ goto illegal_op;
+ tmp = load_cpu_field(spsr);
+ } else {
+ tmp = tcg_temp_new_i32();
+ gen_helper_cpsr_read(tmp, cpu_env);
+ }
+ store_reg(s, rd, tmp);
+ }
+ break;
+ case 0x1:
+ if (op1 == 1) {
+ /* branch/exchange thumb (bx). */
+ ARCH(4T);
+ tmp = load_reg(s, rm);
+ gen_bx(s, tmp);
+ } else if (op1 == 3) {
+ /* clz */
+ ARCH(5);
+ rd = (insn >> 12) & 0xf;
+ tmp = load_reg(s, rm);
+ gen_helper_clz(tmp, tmp);
+ store_reg(s, rd, tmp);
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x2:
+ if (op1 == 1) {
+ ARCH(5J); /* bxj */
+ /* Trivial implementation equivalent to bx. */
+ tmp = load_reg(s, rm);
+ gen_bx(s, tmp);
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x3:
+ if (op1 != 1)
+ goto illegal_op;
+
+ ARCH(5);
+ /* branch link/exchange thumb (blx) */
+ tmp = load_reg(s, rm);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, s->pc);
+ store_reg(s, 14, tmp2);
+ gen_bx(s, tmp);
+ break;
+ case 0x4:
+ {
+ /* crc32/crc32c */
+ uint32_t c = extract32(insn, 8, 4);
+
+ /* Check this CPU supports ARMv8 CRC instructions.
+ * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
+ * Bits 8, 10 and 11 should be zero.
+ */
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
+ (c & 0xd) != 0) {
+ goto illegal_op;
+ }
+
+ rn = extract32(insn, 16, 4);
+ rd = extract32(insn, 12, 4);
+
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if (op1 == 0) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xff);
+ } else if (op1 == 1) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
+ }
+ tmp3 = tcg_const_i32(1 << op1);
+ if (c & 0x2) {
+ gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
+ } else {
+ gen_helper_crc32(tmp, tmp, tmp2, tmp3);
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ store_reg(s, rd, tmp);
+ break;
+ }
+ case 0x5: /* saturating add/subtract */
+ ARCH(5TE);
+ rd = (insn >> 12) & 0xf;
+ rn = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rn);
+ if (op1 & 2)
+ gen_helper_double_saturate(tmp2, cpu_env, tmp2);
+ if (op1 & 1)
+ gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
+ else
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 7:
+ {
+ int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
+ switch (op1) {
+ case 0:
+ /* HLT */
+ gen_hlt(s, imm16);
+ break;
+ case 1:
+ /* bkpt */
+ ARCH(5);
+ gen_exception_insn(s, 4, EXCP_BKPT,
+ syn_aa32_bkpt(imm16, false),
+ default_exception_el(s));
+ break;
+ case 2:
+ /* Hypervisor call (v7) */
+ ARCH(7);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_hvc(s, imm16);
+ break;
+ case 3:
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_smc(s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ }
+ case 0x8: /* signed multiply */
+ case 0xa:
+ case 0xc:
+ case 0xe:
+ ARCH(5TE);
+ rs = (insn >> 8) & 0xf;
+ rn = (insn >> 12) & 0xf;
+ rd = (insn >> 16) & 0xf;
+ if (op1 == 1) {
+ /* (32 * 16) >> 16 */
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ if (sh & 4)
+ tcg_gen_sari_i32(tmp2, tmp2, 16);
+ else
+ gen_sxth(tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp64, tmp64, 16);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
+ if ((sh & 2) == 0) {
+ tmp2 = load_reg(s, rn);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ /* 16 * 16 */
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
+ tcg_temp_free_i32(tmp2);
+ if (op1 == 2) {
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_addq(s, tmp64, rn, rd);
+ gen_storeq_reg(s, rn, rd, tmp64);
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (op1 == 0) {
+ tmp2 = load_reg(s, rn);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, rd, tmp);
+ }
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ } else if (((insn & 0x0e000000) == 0 &&
+ (insn & 0x00000090) != 0x90) ||
+ ((insn & 0x0e000000) == (1 << 25))) {
+ int set_cc, logic_cc, shiftop;
+
+ op1 = (insn >> 21) & 0xf;
+ set_cc = (insn >> 20) & 1;
+ logic_cc = table_logic_cc[op1] & set_cc;
+
+ /* data processing instruction */
+ if (insn & (1 << 25)) {
+ /* immediate operand */
+ val = insn & 0xff;
+ shift = ((insn >> 8) & 0xf) * 2;
+ if (shift) {
+ val = (val >> shift) | (val << (32 - shift));
+ }
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, val);
+ if (logic_cc && shift) {
+ gen_set_CF_bit31(tmp2);
+ }
+ } else {
+ /* register */
+ rm = (insn) & 0xf;
+ tmp2 = load_reg(s, rm);
+ shiftop = (insn >> 5) & 3;
+ if (!(insn & (1 << 4))) {
+ shift = (insn >> 7) & 0x1f;
+ gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
+ } else {
+ rs = (insn >> 8) & 0xf;
+ tmp = load_reg(s, rs);
+ gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
+ }
+ }
+ if (op1 != 0x0f && op1 != 0x0d) {
+ rn = (insn >> 16) & 0xf;
+ tmp = load_reg(s, rn);
+ } else {
+ TCGV_UNUSED_I32(tmp);
+ }
+ rd = (insn >> 12) & 0xf;
+ switch(op1) {
+ case 0x00:
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x01:
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x02:
+ if (set_cc && rd == 15) {
+ /* SUBS r15, ... is used for exception return. */
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_sub_CC(tmp, tmp, tmp2);
+ gen_exception_return(s, tmp);
+ } else {
+ if (set_cc) {
+ gen_sub_CC(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, rd, tmp);
+ }
+ break;
+ case 0x03:
+ if (set_cc) {
+ gen_sub_CC(tmp, tmp2, tmp);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x04:
+ if (set_cc) {
+ gen_add_CC(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x05:
+ if (set_cc) {
+ gen_adc_CC(tmp, tmp, tmp2);
+ } else {
+ gen_add_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x06:
+ if (set_cc) {
+ gen_sbc_CC(tmp, tmp, tmp2);
+ } else {
+ gen_sub_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x07:
+ if (set_cc) {
+ gen_sbc_CC(tmp, tmp2, tmp);
+ } else {
+ gen_sub_carry(tmp, tmp2, tmp);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x08:
+ if (set_cc) {
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ }
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x09:
+ if (set_cc) {
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ }
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x0a:
+ if (set_cc) {
+ gen_sub_CC(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x0b:
+ if (set_cc) {
+ gen_add_CC(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0x0c:
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 0x0d:
+ if (logic_cc && rd == 15) {
+ /* MOVS r15, ... is used for exception return. */
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_exception_return(s, tmp2);
+ } else {
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(s, rd, tmp2);
+ }
+ break;
+ case 0x0e:
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, rd, tmp);
+ break;
+ default:
+ case 0x0f:
+ tcg_gen_not_i32(tmp2, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(s, rd, tmp2);
+ break;
+ }
+ if (op1 != 0x0f && op1 != 0x0d) {
+ tcg_temp_free_i32(tmp2);
+ }
+ } else {
+ /* other instructions */
+ op1 = (insn >> 24) & 0xf;
+ switch(op1) {
+ case 0x0:
+ case 0x1:
+ /* multiplies, extra load/stores */
+ sh = (insn >> 5) & 3;
+ if (sh == 0) {
+ if (op1 == 0x0) {
+ rd = (insn >> 16) & 0xf;
+ rn = (insn >> 12) & 0xf;
+ rs = (insn >> 8) & 0xf;
+ rm = (insn) & 0xf;
+ op1 = (insn >> 20) & 0xf;
+ switch (op1) {
+ case 0: case 1: case 2: case 3: case 6:
+ /* 32 bit mul */
+ tmp = load_reg(s, rs);
+ tmp2 = load_reg(s, rm);
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ if (insn & (1 << 22)) {
+ /* Subtract (mls) */
+ ARCH(6T2);
+ tmp2 = load_reg(s, rn);
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ tcg_temp_free_i32(tmp2);
+ } else if (insn & (1 << 21)) {
+ /* Add */
+ tmp2 = load_reg(s, rn);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ if (insn & (1 << 20))
+ gen_logic_CC(tmp);
+ store_reg(s, rd, tmp);
+ break;
+ case 4:
+ /* 64 bit mul double accumulate (UMAAL) */
+ ARCH(6);
+ tmp = load_reg(s, rs);
+ tmp2 = load_reg(s, rm);
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
+ gen_addq_lo(s, tmp64, rn);
+ gen_addq_lo(s, tmp64, rd);
+ gen_storeq_reg(s, rn, rd, tmp64);
+ tcg_temp_free_i64(tmp64);
+ break;
+ case 8: case 9: case 10: case 11:
+ case 12: case 13: case 14: case 15:
+ /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
+ tmp = load_reg(s, rs);
+ tmp2 = load_reg(s, rm);
+ if (insn & (1 << 22)) {
+ tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
+ } else {
+ tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
+ }
+ if (insn & (1 << 21)) { /* mult accumulate */
+ TCGv_i32 al = load_reg(s, rn);
+ TCGv_i32 ah = load_reg(s, rd);
+ tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
+ tcg_temp_free_i32(al);
+ tcg_temp_free_i32(ah);
+ }
+ if (insn & (1 << 20)) {
+ gen_logicq_cc(tmp, tmp2);
+ }
+ store_reg(s, rn, tmp);
+ store_reg(s, rd, tmp2);
+ break;
+ default:
+ goto illegal_op;
+ }
+ } else {
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ if (insn & (1 << 23)) {
+ /* load/store exclusive */
+ int op2 = (insn >> 8) & 3;
+ op1 = (insn >> 21) & 0x3;
+
+ switch (op2) {
+ case 0: /* lda/stl */
+ if (op1 == 1) {
+ goto illegal_op;
+ }
+ ARCH(8);
+ break;
+ case 1: /* reserved */
+ goto illegal_op;
+ case 2: /* ldaex/stlex */
+ ARCH(8);
+ break;
+ case 3: /* ldrex/strex */
+ if (op1) {
+ ARCH(6K);
+ } else {
+ ARCH(6);
+ }
+ break;
+ }
+
+ addr = tcg_temp_local_new_i32();
+ load_reg_var(s, addr, rn);
+
+ /* Since the emulation does not have barriers,
+ the acquire/release semantics need no special
+ handling */
+ if (op2 == 0) {
+ if (insn & (1 << 20)) {
+ tmp = tcg_temp_new_i32();
+ switch (op1) {
+ case 0: /* lda */
+ gen_aa32_ld32u(s, tmp, addr,
+ get_mem_index(s));
+ break;
+ case 2: /* ldab */
+ gen_aa32_ld8u(s, tmp, addr,
+ get_mem_index(s));
+ break;
+ case 3: /* ldah */
+ gen_aa32_ld16u(s, tmp, addr,
+ get_mem_index(s));
+ break;
+ default:
+ abort();
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ rm = insn & 0xf;
+ tmp = load_reg(s, rm);
+ switch (op1) {
+ case 0: /* stl */
+ gen_aa32_st32(s, tmp, addr,
+ get_mem_index(s));
+ break;
+ case 2: /* stlb */
+ gen_aa32_st8(s, tmp, addr,
+ get_mem_index(s));
+ break;
+ case 3: /* stlh */
+ gen_aa32_st16(s, tmp, addr,
+ get_mem_index(s));
+ break;
+ default:
+ abort();
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ } else if (insn & (1 << 20)) {
+ switch (op1) {
+ case 0: /* ldrex */
+ gen_load_exclusive(s, rd, 15, addr, 2);
+ break;
+ case 1: /* ldrexd */
+ gen_load_exclusive(s, rd, rd + 1, addr, 3);
+ break;
+ case 2: /* ldrexb */
+ gen_load_exclusive(s, rd, 15, addr, 0);
+ break;
+ case 3: /* ldrexh */
+ gen_load_exclusive(s, rd, 15, addr, 1);
+ break;
+ default:
+ abort();
+ }
+ } else {
+ rm = insn & 0xf;
+ switch (op1) {
+ case 0: /* strex */
+ gen_store_exclusive(s, rd, rm, 15, addr, 2);
+ break;
+ case 1: /* strexd */
+ gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
+ break;
+ case 2: /* strexb */
+ gen_store_exclusive(s, rd, rm, 15, addr, 0);
+ break;
+ case 3: /* strexh */
+ gen_store_exclusive(s, rd, rm, 15, addr, 1);
+ break;
+ default:
+ abort();
+ }
+ }
+ tcg_temp_free_i32(addr);
+ } else {
+ TCGv taddr;
+ TCGMemOp opc = s->be_data;
+
+ /* SWP instruction */
+ rm = (insn) & 0xf;
+
+ if (insn & (1 << 22)) {
+ opc |= MO_UB;
+ } else {
+ opc |= MO_UL | MO_ALIGN;
+ }
+
+ addr = load_reg(s, rn);
+ taddr = gen_aa32_addr(s, addr, opc);
+ tcg_temp_free_i32(addr);
+
+ tmp = load_reg(s, rm);
+ tcg_gen_atomic_xchg_i32(tmp, taddr, tmp,
+ get_mem_index(s), opc);
+ tcg_temp_free(taddr);
+ store_reg(s, rd, tmp);
+ }
+ }
+ } else {
+ int address_offset;
+ bool load = insn & (1 << 20);
+ bool doubleword = false;
+ /* Misc load/store */
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+
+ if (!load && (sh & 2)) {
+ /* doubleword */
+ ARCH(5TE);
+ if (rd & 1) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
+ load = (sh & 1) == 0;
+ doubleword = true;
+ }
+
+ addr = load_reg(s, rn);
+ if (insn & (1 << 24))
+ gen_add_datah_offset(s, insn, 0, addr);
+ address_offset = 0;
+
+ if (doubleword) {
+ if (!load) {
+ /* store */
+ tmp = load_reg(s, rd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rd + 1);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ } else {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rd, tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ rd++;
+ }
+ address_offset = -4;
+ } else if (load) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ switch (sh) {
+ case 1:
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 2:
+ gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
+ break;
+ default:
+ case 3:
+ gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
+ break;
+ }
+ } else {
+ /* store */
+ tmp = load_reg(s, rd);
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ /* Perform base writeback before the loaded value to
+ ensure correct behavior with overlapping index registers.
+ ldrd with base writeback is undefined if the
+ destination and index registers overlap. */
+ if (!(insn & (1 << 24))) {
+ gen_add_datah_offset(s, insn, address_offset, addr);
+ store_reg(s, rn, addr);
+ } else if (insn & (1 << 21)) {
+ if (address_offset)
+ tcg_gen_addi_i32(addr, addr, address_offset);
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ if (load) {
+ /* Complete the load. */
+ store_reg(s, rd, tmp);
+ }
+ }
+ break;
+ case 0x4:
+ case 0x5:
+ goto do_ldst;
+ case 0x6:
+ case 0x7:
+ if (insn & (1 << 4)) {
+ ARCH(6);
+ /* Armv6 Media instructions. */
+ rm = insn & 0xf;
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ rs = (insn >> 8) & 0xf;
+ switch ((insn >> 23) & 3) {
+ case 0: /* Parallel add/subtract. */
+ op1 = (insn >> 20) & 7;
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ sh = (insn >> 5) & 7;
+ if ((op1 & 3) == 0 || sh == 5 || sh == 6)
+ goto illegal_op;
+ gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 1:
+ if ((insn & 0x00700020) == 0) {
+ /* Halfword pack. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ shift = (insn >> 7) & 0x1f;
+ if (insn & (1 << 6)) {
+ /* pkhtb */
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp2, tmp2, shift);
+ tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
+ tcg_gen_ext16u_i32(tmp2, tmp2);
+ } else {
+ /* pkhbt */
+ if (shift)
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ }
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x00200020) == 0x00200000) {
+ /* [us]sat */
+ tmp = load_reg(s, rm);
+ shift = (insn >> 7) & 0x1f;
+ if (insn & (1 << 6)) {
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp, tmp, shift);
+ } else {
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+ sh = (insn >> 16) & 0x1f;
+ tmp2 = tcg_const_i32(sh);
+ if (insn & (1 << 22))
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
+ else
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x00300fe0) == 0x00200f20) {
+ /* [us]sat16 */
+ tmp = load_reg(s, rm);
+ sh = (insn >> 16) & 0x1f;
+ tmp2 = tcg_const_i32(sh);
+ if (insn & (1 << 22))
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
+ else
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x00700fe0) == 0x00000fa0) {
+ /* Select bytes. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ tmp3 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
+ gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x000003e0) == 0x00000060) {
+ tmp = load_reg(s, rm);
+ shift = (insn >> 10) & 3;
+ /* ??? In many cases it's not necessary to do a
+ rotate, a shift is sufficient. */
+ if (shift != 0)
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
+ op1 = (insn >> 20) & 7;
+ switch (op1) {
+ case 0: gen_sxtb16(tmp); break;
+ case 2: gen_sxtb(tmp); break;
+ case 3: gen_sxth(tmp); break;
+ case 4: gen_uxtb16(tmp); break;
+ case 6: gen_uxtb(tmp); break;
+ case 7: gen_uxth(tmp); break;
+ default: goto illegal_op;
+ }
+ if (rn != 15) {
+ tmp2 = load_reg(s, rn);
+ if ((op1 & 3) == 0) {
+ gen_add16(tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ }
+ store_reg(s, rd, tmp);
+ } else if ((insn & 0x003f0f60) == 0x003f0f20) {
+ /* rev */
+ tmp = load_reg(s, rm);
+ if (insn & (1 << 22)) {
+ if (insn & (1 << 7)) {
+ gen_revsh(tmp);
+ } else {
+ ARCH(6T2);
+ gen_helper_rbit(tmp, tmp);
+ }
+ } else {
+ if (insn & (1 << 7))
+ gen_rev16(tmp);
+ else
+ tcg_gen_bswap32_i32(tmp, tmp);
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 2: /* Multiplies (Type 3). */
+ switch ((insn >> 20) & 0x7) {
+ case 5:
+ if (((insn >> 6) ^ (insn >> 7)) & 1) {
+ /* op2 not 00x or 11x : UNDEF */
+ goto illegal_op;
+ }
+ /* Signed multiply most significant [accumulate].
+ (SMMUL, SMMLA, SMMLS) */
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+
+ if (rd != 15) {
+ tmp = load_reg(s, rd);
+ if (insn & (1 << 6)) {
+ tmp64 = gen_subq_msw(tmp64, tmp);
+ } else {
+ tmp64 = gen_addq_msw(tmp64, tmp);
+ }
+ }
+ if (insn & (1 << 5)) {
+ tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
+ }
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
+ store_reg(s, rn, tmp);
+ break;
+ case 0:
+ case 4:
+ /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
+ if (insn & (1 << 7)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ if (insn & (1 << 5))
+ gen_swap_half(tmp2);
+ gen_smul_dual(tmp, tmp2);
+ if (insn & (1 << 22)) {
+ /* smlald, smlsld */
+ TCGv_i64 tmp64_2;
+
+ tmp64 = tcg_temp_new_i64();
+ tmp64_2 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ tcg_gen_ext_i32_i64(tmp64_2, tmp2);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ if (insn & (1 << 6)) {
+ tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
+ } else {
+ tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
+ }
+ tcg_temp_free_i64(tmp64_2);
+ gen_addq(s, tmp64, rd, rn);
+ gen_storeq_reg(s, rd, rn, tmp64);
+ tcg_temp_free_i64(tmp64);
+ } else {
+ /* smuad, smusd, smlad, smlsd */
+ if (insn & (1 << 6)) {
+ /* This subtraction cannot overflow. */
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ /* This addition cannot overflow 32 bits;
+ * however it may overflow considered as a
+ * signed operation, in which case we must set
+ * the Q flag.
+ */
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ if (rd != 15)
+ {
+ tmp2 = load_reg(s, rd);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, rn, tmp);
+ }
+ break;
+ case 1:
+ case 3:
+ /* SDIV, UDIV */
+ if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
+ goto illegal_op;
+ }
+ if (((insn >> 5) & 7) || (rd != 15)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ if (insn & (1 << 21)) {
+ gen_helper_udiv(tmp, tmp, tmp2);
+ } else {
+ gen_helper_sdiv(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rn, tmp);
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 3:
+ op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
+ switch (op1) {
+ case 0: /* Unsigned sum of absolute differences. */
+ ARCH(6);
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ gen_helper_usad8(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ if (rd != 15) {
+ tmp2 = load_reg(s, rd);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, rn, tmp);
+ break;
+ case 0x20: case 0x24: case 0x28: case 0x2c:
+ /* Bitfield insert/clear. */
+ ARCH(6T2);
+ shift = (insn >> 7) & 0x1f;
+ i = (insn >> 16) & 0x1f;
+ if (i < shift) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
+ i = i + 1 - shift;
+ if (rm == 15) {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ } else {
+ tmp = load_reg(s, rm);
+ }
+ if (i != 32) {
+ tmp2 = load_reg(s, rd);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
+ tcg_temp_free_i32(tmp2);
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
+ case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
+ ARCH(6T2);
+ tmp = load_reg(s, rm);
+ shift = (insn >> 7) & 0x1f;
+ i = ((insn >> 16) & 0x1f) + 1;
+ if (shift + i > 32)
+ goto illegal_op;
+ if (i < 32) {
+ if (op1 & 0x20) {
+ gen_ubfx(tmp, shift, (1u << i) - 1);
+ } else {
+ gen_sbfx(tmp, shift, i);
+ }
+ }
+ store_reg(s, rd, tmp);
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ }
+ break;
+ }
+ do_ldst:
+ /* Check for undefined extension instructions
+ * per the ARM Bible IE:
+ * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
+ */
+ sh = (0xf << 20) | (0xf << 4);
+ if (op1 == 0x7 && ((insn & sh) == sh))
+ {
+ goto illegal_op;
+ }
+ /* load/store byte/word */
+ rn = (insn >> 16) & 0xf;
+ rd = (insn >> 12) & 0xf;
+ tmp2 = load_reg(s, rn);
+ if ((insn & 0x01200000) == 0x00200000) {
+ /* ldrt/strt */
+ i = get_a32_user_mem_index(s);
+ } else {
+ i = get_mem_index(s);
+ }
+ if (insn & (1 << 24))
+ gen_add_data_offset(s, insn, tmp2);
+ if (insn & (1 << 20)) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ if (insn & (1 << 22)) {
+ gen_aa32_ld8u(s, tmp, tmp2, i);
+ } else {
+ gen_aa32_ld32u(s, tmp, tmp2, i);
+ }
+ } else {
+ /* store */
+ tmp = load_reg(s, rd);
+ if (insn & (1 << 22)) {
+ gen_aa32_st8(s, tmp, tmp2, i);
+ } else {
+ gen_aa32_st32(s, tmp, tmp2, i);
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ if (!(insn & (1 << 24))) {
+ gen_add_data_offset(s, insn, tmp2);
+ store_reg(s, rn, tmp2);
+ } else if (insn & (1 << 21)) {
+ store_reg(s, rn, tmp2);
+ } else {
+ tcg_temp_free_i32(tmp2);
+ }
+ if (insn & (1 << 20)) {
+ /* Complete the load. */
+ store_reg_from_load(s, rd, tmp);
+ }
+ break;
+ case 0x08:
+ case 0x09:
+ {
+ int j, n, loaded_base;
+ bool exc_return = false;
+ bool is_load = extract32(insn, 20, 1);
+ bool user = false;
+ TCGv_i32 loaded_var;
+ /* load/store multiple words */
+ /* XXX: store correct base if write back */
+ if (insn & (1 << 22)) {
+ /* LDM (user), LDM (exception return) and STM (user) */
+ if (IS_USER(s))
+ goto illegal_op; /* only usable in supervisor mode */
+
+ if (is_load && extract32(insn, 15, 1)) {
+ exc_return = true;
+ } else {
+ user = true;
+ }
+ }
+ rn = (insn >> 16) & 0xf;
+ addr = load_reg(s, rn);
+
+ /* compute total size */
+ loaded_base = 0;
+ TCGV_UNUSED_I32(loaded_var);
+ n = 0;
+ for(i=0;i<16;i++) {
+ if (insn & (1 << i))
+ n++;
+ }
+ /* XXX: test invalid n == 0 case ? */
+ if (insn & (1 << 23)) {
+ if (insn & (1 << 24)) {
+ /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* post increment */
+ }
+ } else {
+ if (insn & (1 << 24)) {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ } else {
+ /* post decrement */
+ if (n != 1)
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ }
+ j = 0;
+ for(i=0;i<16;i++) {
+ if (insn & (1 << i)) {
+ if (is_load) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ if (user) {
+ tmp2 = tcg_const_i32(i);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ } else if (i == rn) {
+ loaded_var = tmp;
+ loaded_base = 1;
+ } else if (rn == 15 && exc_return) {
+ store_pc_exc_ret(s, tmp);
+ } else {
+ store_reg_from_load(s, i, tmp);
+ }
+ } else {
+ /* store */
+ if (i == 15) {
+ /* special case: r15 = PC + 8 */
+ val = (long)s->pc + 4;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ } else if (user) {
+ tmp = tcg_temp_new_i32();
+ tmp2 = tcg_const_i32(i);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else {
+ tmp = load_reg(s, i);
+ }
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ j++;
+ /* no need to add after the last transfer */
+ if (j != n)
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+ if (insn & (1 << 21)) {
+ /* write back */
+ if (insn & (1 << 23)) {
+ if (insn & (1 << 24)) {
+ /* pre increment */
+ } else {
+ /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ } else {
+ if (insn & (1 << 24)) {
+ /* pre decrement */
+ if (n != 1)
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ }
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ if (loaded_base) {
+ store_reg(s, rn, loaded_var);
+ }
+ if (exc_return) {
+ /* Restore CPSR from SPSR. */
+ tmp = load_cpu_field(spsr);
+ gen_helper_cpsr_write_eret(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ s->is_jmp = DISAS_JUMP;
+ }
+ }
+ break;
+ case 0xa:
+ case 0xb:
+ {
+ int32_t offset;
+
+ /* branch (and link) */
+ val = (int32_t)s->pc;
+ if (insn & (1 << 24)) {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, val);
+ store_reg(s, 14, tmp);
+ }
+ offset = sextract32(insn << 2, 0, 26);
+ val += offset + 4;
+ gen_jmp(s, val);
+ }
+ break;
+ case 0xc:
+ case 0xd:
+ case 0xe:
+ if (((insn >> 8) & 0xe) == 10) {
+ /* VFP. */
+ if (disas_vfp_insn(s, insn)) {
+ goto illegal_op;
+ }
+ } else if (disas_coproc_insn(s, insn)) {
+ /* Coprocessor. */
+ goto illegal_op;
+ }
+ break;
+ case 0xf:
+ /* swi */
+ gen_set_pc_im(s, s->pc);
+ s->svc_imm = extract32(insn, 0, 24);
+ s->is_jmp = DISAS_SWI;
+ break;
+ default:
+ illegal_op:
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+ break;
+ }
+ }
+}
+
+/* Return true if this is a Thumb-2 logical op. */
+static int
+thumb2_logic_op(int op)
+{
+ return (op < 8);
+}
+
+/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
+ then set condition code flags based on the result of the operation.
+ If SHIFTER_OUT is nonzero then set the carry flag for logical operations
+ to the high bit of T1.
+ Returns zero if the opcode is valid. */
+
+static int
+gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
+ TCGv_i32 t0, TCGv_i32 t1)
+{
+ int logic_cc;
+
+ logic_cc = 0;
+ switch (op) {
+ case 0: /* and */
+ tcg_gen_and_i32(t0, t0, t1);
+ logic_cc = conds;
+ break;
+ case 1: /* bic */
+ tcg_gen_andc_i32(t0, t0, t1);
+ logic_cc = conds;
+ break;
+ case 2: /* orr */
+ tcg_gen_or_i32(t0, t0, t1);
+ logic_cc = conds;
+ break;
+ case 3: /* orn */
+ tcg_gen_orc_i32(t0, t0, t1);
+ logic_cc = conds;
+ break;
+ case 4: /* eor */
+ tcg_gen_xor_i32(t0, t0, t1);
+ logic_cc = conds;
+ break;
+ case 8: /* add */
+ if (conds)
+ gen_add_CC(t0, t0, t1);
+ else
+ tcg_gen_add_i32(t0, t0, t1);
+ break;
+ case 10: /* adc */
+ if (conds)
+ gen_adc_CC(t0, t0, t1);
+ else
+ gen_adc(t0, t1);
+ break;
+ case 11: /* sbc */
+ if (conds) {
+ gen_sbc_CC(t0, t0, t1);
+ } else {
+ gen_sub_carry(t0, t0, t1);
+ }
+ break;
+ case 13: /* sub */
+ if (conds)
+ gen_sub_CC(t0, t0, t1);
+ else
+ tcg_gen_sub_i32(t0, t0, t1);
+ break;
+ case 14: /* rsb */
+ if (conds)
+ gen_sub_CC(t0, t1, t0);
+ else
+ tcg_gen_sub_i32(t0, t1, t0);
+ break;
+ default: /* 5, 6, 7, 9, 12, 15. */
+ return 1;
+ }
+ if (logic_cc) {
+ gen_logic_CC(t0);
+ if (shifter_out)
+ gen_set_CF_bit31(t1);
+ }
+ return 0;
+}
+
+/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
+ is not legal. */
+static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
+{
+ uint32_t insn, imm, shift, offset;
+ uint32_t rd, rn, rm, rs;
+ TCGv_i32 tmp;
+ TCGv_i32 tmp2;
+ TCGv_i32 tmp3;
+ TCGv_i32 addr;
+ TCGv_i64 tmp64;
+ int op;
+ int shiftop;
+ int conds;
+ int logic_cc;
+
+ if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
+ || arm_dc_feature(s, ARM_FEATURE_M))) {
+ /* Thumb-1 cores may need to treat bl and blx as a pair of
+ 16-bit instructions to get correct prefetch abort behavior. */
+ insn = insn_hw1;
+ if ((insn & (1 << 12)) == 0) {
+ ARCH(5);
+ /* Second half of blx. */
+ offset = ((insn & 0x7ff) << 1);
+ tmp = load_reg(s, 14);
+ tcg_gen_addi_i32(tmp, tmp, offset);
+ tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
+
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, s->pc | 1);
+ store_reg(s, 14, tmp2);
+ gen_bx(s, tmp);
+ return 0;
+ }
+ if (insn & (1 << 11)) {
+ /* Second half of bl. */
+ offset = ((insn & 0x7ff) << 1) | 1;
+ tmp = load_reg(s, 14);
+ tcg_gen_addi_i32(tmp, tmp, offset);
+
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, s->pc | 1);
+ store_reg(s, 14, tmp2);
+ gen_bx(s, tmp);
+ return 0;
+ }
+ if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
+ /* Instruction spans a page boundary. Implement it as two
+ 16-bit instructions in case the second half causes an
+ prefetch abort. */
+ offset = ((int32_t)insn << 21) >> 9;
+ tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
+ return 0;
+ }
+ /* Fall through to 32-bit decode. */
+ }
+
+ insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+ s->pc += 2;
+ insn |= (uint32_t)insn_hw1 << 16;
+
+ if ((insn & 0xf800e800) != 0xf000e800) {
+ ARCH(6T2);
+ }
+
+ rn = (insn >> 16) & 0xf;
+ rs = (insn >> 12) & 0xf;
+ rd = (insn >> 8) & 0xf;
+ rm = insn & 0xf;
+ switch ((insn >> 25) & 0xf) {
+ case 0: case 1: case 2: case 3:
+ /* 16-bit instructions. Should never happen. */
+ abort();
+ case 4:
+ if (insn & (1 << 22)) {
+ /* Other load/store, table branch. */
+ if (insn & 0x01200000) {
+ /* Load/store doubleword. */
+ if (rn == 15) {
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, s->pc & ~3);
+ } else {
+ addr = load_reg(s, rn);
+ }
+ offset = (insn & 0xff) * 4;
+ if ((insn & (1 << 23)) == 0)
+ offset = -offset;
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ offset = 0;
+ }
+ if (insn & (1 << 20)) {
+ /* ldrd */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rs, tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rd, tmp);
+ } else {
+ /* strd */
+ tmp = load_reg(s, rs);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ if (rn == 15)
+ goto illegal_op;
+ tcg_gen_addi_i32(addr, addr, offset - 4);
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ } else if ((insn & (1 << 23)) == 0) {
+ /* Load/store exclusive word. */
+ addr = tcg_temp_local_new_i32();
+ load_reg_var(s, addr, rn);
+ tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
+ if (insn & (1 << 20)) {
+ gen_load_exclusive(s, rs, 15, addr, 2);
+ } else {
+ gen_store_exclusive(s, rd, rs, 15, addr, 2);
+ }
+ tcg_temp_free_i32(addr);
+ } else if ((insn & (7 << 5)) == 0) {
+ /* Table Branch. */
+ if (rn == 15) {
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, s->pc);
+ } else {
+ addr = load_reg(s, rn);
+ }
+ tmp = load_reg(s, rm);
+ tcg_gen_add_i32(addr, addr, tmp);
+ if (insn & (1 << 4)) {
+ /* tbh */
+ tcg_gen_add_i32(addr, addr, tmp);
+ tcg_temp_free_i32(tmp);
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ } else { /* tbb */
+ tcg_temp_free_i32(tmp);
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ }
+ tcg_temp_free_i32(addr);
+ tcg_gen_shli_i32(tmp, tmp, 1);
+ tcg_gen_addi_i32(tmp, tmp, s->pc);
+ store_reg(s, 15, tmp);
+ } else {
+ int op2 = (insn >> 6) & 0x3;
+ op = (insn >> 4) & 0x3;
+ switch (op2) {
+ case 0:
+ goto illegal_op;
+ case 1:
+ /* Load/store exclusive byte/halfword/doubleword */
+ if (op == 2) {
+ goto illegal_op;
+ }
+ ARCH(7);
+ break;
+ case 2:
+ /* Load-acquire/store-release */
+ if (op == 3) {
+ goto illegal_op;
+ }
+ /* Fall through */
+ case 3:
+ /* Load-acquire/store-release exclusive */
+ ARCH(8);
+ break;
+ }
+ addr = tcg_temp_local_new_i32();
+ load_reg_var(s, addr, rn);
+ if (!(op2 & 1)) {
+ if (insn & (1 << 20)) {
+ tmp = tcg_temp_new_i32();
+ switch (op) {
+ case 0: /* ldab */
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 1: /* ldah */
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 2: /* lda */
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ break;
+ default:
+ abort();
+ }
+ store_reg(s, rs, tmp);
+ } else {
+ tmp = load_reg(s, rs);
+ switch (op) {
+ case 0: /* stlb */
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ break;
+ case 1: /* stlh */
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ break;
+ case 2: /* stl */
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ break;
+ default:
+ abort();
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ } else if (insn & (1 << 20)) {
+ gen_load_exclusive(s, rs, rd, addr, op);
+ } else {
+ gen_store_exclusive(s, rm, rs, rd, addr, op);
+ }
+ tcg_temp_free_i32(addr);
+ }
+ } else {
+ /* Load/store multiple, RFE, SRS. */
+ if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
+ /* RFE, SRS: not available in user mode or on M profile */
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
+ goto illegal_op;
+ }
+ if (insn & (1 << 20)) {
+ /* rfe */
+ addr = load_reg(s, rn);
+ if ((insn & (1 << 24)) == 0)
+ tcg_gen_addi_i32(addr, addr, -8);
+ /* Load PC into tmp and CPSR into tmp2. */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp2 = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp2, addr, get_mem_index(s));
+ if (insn & (1 << 21)) {
+ /* Base writeback. */
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ tcg_gen_addi_i32(addr, addr, -4);
+ }
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ gen_rfe(s, tmp, tmp2);
+ } else {
+ /* srs */
+ gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
+ insn & (1 << 21));
+ }
+ } else {
+ int i, loaded_base = 0;
+ TCGv_i32 loaded_var;
+ /* Load/store multiple. */
+ addr = load_reg(s, rn);
+ offset = 0;
+ for (i = 0; i < 16; i++) {
+ if (insn & (1 << i))
+ offset += 4;
+ }
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, -offset);
+ }
+
+ TCGV_UNUSED_I32(loaded_var);
+ for (i = 0; i < 16; i++) {
+ if ((insn & (1 << i)) == 0)
+ continue;
+ if (insn & (1 << 20)) {
+ /* Load. */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ if (i == 15) {
+ gen_bx(s, tmp);
+ } else if (i == rn) {
+ loaded_var = tmp;
+ loaded_base = 1;
+ } else {
+ store_reg(s, i, tmp);
+ }
+ } else {
+ /* Store. */
+ tmp = load_reg(s, i);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ if (loaded_base) {
+ store_reg(s, rn, loaded_var);
+ }
+ if (insn & (1 << 21)) {
+ /* Base register writeback. */
+ if (insn & (1 << 24)) {
+ tcg_gen_addi_i32(addr, addr, -offset);
+ }
+ /* Fault if writeback register is in register list. */
+ if (insn & (1 << rn))
+ goto illegal_op;
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ }
+ }
+ break;
+ case 5:
+
+ op = (insn >> 21) & 0xf;
+ if (op == 6) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ /* Halfword pack. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
+ if (insn & (1 << 5)) {
+ /* pkhtb */
+ if (shift == 0)
+ shift = 31;
+ tcg_gen_sari_i32(tmp2, tmp2, shift);
+ tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
+ tcg_gen_ext16u_i32(tmp2, tmp2);
+ } else {
+ /* pkhbt */
+ if (shift)
+ tcg_gen_shli_i32(tmp2, tmp2, shift);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
+ }
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else {
+ /* Data processing register constant shift. */
+ if (rn == 15) {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ } else {
+ tmp = load_reg(s, rn);
+ }
+ tmp2 = load_reg(s, rm);
+
+ shiftop = (insn >> 4) & 3;
+ shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
+ conds = (insn & (1 << 20)) != 0;
+ logic_cc = (conds && thumb2_logic_op(op));
+ gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
+ if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
+ goto illegal_op;
+ tcg_temp_free_i32(tmp2);
+ if (rd != 15) {
+ store_reg(s, rd, tmp);
+ } else {
+ tcg_temp_free_i32(tmp);
+ }
+ }
+ break;
+ case 13: /* Misc data processing. */
+ op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
+ if (op < 4 && (insn & 0xf000) != 0xf000)
+ goto illegal_op;
+ switch (op) {
+ case 0: /* Register controlled shift. */
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if ((insn & 0x70) != 0)
+ goto illegal_op;
+ op = (insn >> 21) & 3;
+ logic_cc = (insn & (1 << 20)) != 0;
+ gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
+ if (logic_cc)
+ gen_logic_CC(tmp);
+ store_reg_bx(s, rd, tmp);
+ break;
+ case 1: /* Sign/zero extend. */
+ op = (insn >> 20) & 7;
+ switch (op) {
+ case 0: /* SXTAH, SXTH */
+ case 1: /* UXTAH, UXTH */
+ case 4: /* SXTAB, SXTB */
+ case 5: /* UXTAB, UXTB */
+ break;
+ case 2: /* SXTAB16, SXTB16 */
+ case 3: /* UXTAB16, UXTB16 */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ if (rn != 15) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ }
+ tmp = load_reg(s, rm);
+ shift = (insn >> 4) & 3;
+ /* ??? In many cases it's not necessary to do a
+ rotate, a shift is sufficient. */
+ if (shift != 0)
+ tcg_gen_rotri_i32(tmp, tmp, shift * 8);
+ op = (insn >> 20) & 7;
+ switch (op) {
+ case 0: gen_sxth(tmp); break;
+ case 1: gen_uxth(tmp); break;
+ case 2: gen_sxtb16(tmp); break;
+ case 3: gen_uxtb16(tmp); break;
+ case 4: gen_sxtb(tmp); break;
+ case 5: gen_uxtb(tmp); break;
+ default:
+ g_assert_not_reached();
+ }
+ if (rn != 15) {
+ tmp2 = load_reg(s, rn);
+ if ((op >> 1) == 1) {
+ gen_add16(tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 2: /* SIMD add/subtract. */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ op = (insn >> 20) & 7;
+ shift = (insn >> 4) & 7;
+ if ((op & 3) == 3 || (shift & 3) == 3)
+ goto illegal_op;
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 3: /* Other data processing. */
+ op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
+ if (op < 4) {
+ /* Saturating add/subtract. */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if (op & 1)
+ gen_helper_double_saturate(tmp, cpu_env, tmp);
+ if (op & 2)
+ gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
+ else
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else {
+ switch (op) {
+ case 0x0a: /* rbit */
+ case 0x08: /* rev */
+ case 0x09: /* rev16 */
+ case 0x0b: /* revsh */
+ case 0x18: /* clz */
+ break;
+ case 0x10: /* sel */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ break;
+ case 0x20: /* crc32/crc32c */
+ case 0x21:
+ case 0x22:
+ case 0x28:
+ case 0x29:
+ case 0x2a:
+ if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
+ goto illegal_op;
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rn);
+ switch (op) {
+ case 0x0a: /* rbit */
+ gen_helper_rbit(tmp, tmp);
+ break;
+ case 0x08: /* rev */
+ tcg_gen_bswap32_i32(tmp, tmp);
+ break;
+ case 0x09: /* rev16 */
+ gen_rev16(tmp);
+ break;
+ case 0x0b: /* revsh */
+ gen_revsh(tmp);
+ break;
+ case 0x10: /* sel */
+ tmp2 = load_reg(s, rm);
+ tmp3 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
+ gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case 0x18: /* clz */
+ gen_helper_clz(tmp, tmp);
+ break;
+ case 0x20:
+ case 0x21:
+ case 0x22:
+ case 0x28:
+ case 0x29:
+ case 0x2a:
+ {
+ /* crc32/crc32c */
+ uint32_t sz = op & 0x3;
+ uint32_t c = op & 0x8;
+
+ tmp2 = load_reg(s, rm);
+ if (sz == 0) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xff);
+ } else if (sz == 1) {
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
+ }
+ tmp3 = tcg_const_i32(1 << sz);
+ if (c) {
+ gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
+ } else {
+ gen_helper_crc32(tmp, tmp, tmp2, tmp3);
+ }
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp3);
+ break;
+ }
+ default:
+ g_assert_not_reached();
+ }
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
+ switch ((insn >> 20) & 7) {
+ case 0: /* 32 x 32 -> 32 */
+ case 7: /* Unsigned sum of absolute differences. */
+ break;
+ case 1: /* 16 x 16 -> 32 */
+ case 2: /* Dual multiply add. */
+ case 3: /* 32 * 16 -> 32msb */
+ case 4: /* Dual multiply subtract. */
+ case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ goto illegal_op;
+ }
+ break;
+ }
+ op = (insn >> 4) & 0xf;
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ switch ((insn >> 20) & 7) {
+ case 0: /* 32 x 32 -> 32 */
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ if (op)
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ else
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+ case 1: /* 16 x 16 -> 32 */
+ gen_mulxy(tmp, tmp2, op & 2, op & 1);
+ tcg_temp_free_i32(tmp2);
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+ case 2: /* Dual multiply add. */
+ case 4: /* Dual multiply subtract. */
+ if (op)
+ gen_swap_half(tmp2);
+ gen_smul_dual(tmp, tmp2);
+ if (insn & (1 << 22)) {
+ /* This subtraction cannot overflow. */
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ /* This addition cannot overflow 32 bits;
+ * however it may overflow considered as a signed
+ * operation, in which case we must set the Q flag.
+ */
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ if (rs != 15)
+ {
+ tmp2 = load_reg(s, rs);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+ case 3: /* 32 * 16 -> 32msb */
+ if (op)
+ tcg_gen_sari_i32(tmp2, tmp2, 16);
+ else
+ gen_sxth(tmp2);
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ tcg_gen_shri_i64(tmp64, tmp64, 16);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
+ if (rs != 15)
+ {
+ tmp2 = load_reg(s, rs);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+ case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ if (rs != 15) {
+ tmp = load_reg(s, rs);
+ if (insn & (1 << 20)) {
+ tmp64 = gen_addq_msw(tmp64, tmp);
+ } else {
+ tmp64 = gen_subq_msw(tmp64, tmp);
+ }
+ }
+ if (insn & (1 << 4)) {
+ tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
+ }
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
+ break;
+ case 7: /* Unsigned sum of absolute differences. */
+ gen_helper_usad8(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ if (rs != 15) {
+ tmp2 = load_reg(s, rs);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 6: case 7: /* 64-bit multiply, Divide. */
+ op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
+ tmp = load_reg(s, rn);
+ tmp2 = load_reg(s, rm);
+ if ((op & 0x50) == 0x10) {
+ /* sdiv, udiv */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
+ goto illegal_op;
+ }
+ if (op & 0x20)
+ gen_helper_udiv(tmp, tmp, tmp2);
+ else
+ gen_helper_sdiv(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else if ((op & 0xe) == 0xc) {
+ /* Dual multiply accumulate long. */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto illegal_op;
+ }
+ if (op & 1)
+ gen_swap_half(tmp2);
+ gen_smul_dual(tmp, tmp2);
+ if (op & 0x10) {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ /* BUGFIX */
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ tcg_temp_free_i32(tmp);
+ gen_addq(s, tmp64, rs, rd);
+ gen_storeq_reg(s, rs, rd, tmp64);
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (op & 0x20) {
+ /* Unsigned 64-bit multiply */
+ tmp64 = gen_mulu_i64_i32(tmp, tmp2);
+ } else {
+ if (op & 8) {
+ /* smlalxy */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ goto illegal_op;
+ }
+ gen_mulxy(tmp, tmp2, op & 2, op & 1);
+ tcg_temp_free_i32(tmp2);
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(tmp64, tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ /* Signed 64-bit multiply */
+ tmp64 = gen_muls_i64_i32(tmp, tmp2);
+ }
+ }
+ if (op & 4) {
+ /* umaal */
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i64(tmp64);
+ goto illegal_op;
+ }
+ gen_addq_lo(s, tmp64, rs);
+ gen_addq_lo(s, tmp64, rd);
+ } else if (op & 0x40) {
+ /* 64-bit accumulate. */
+ gen_addq(s, tmp64, rs, rd);
+ }
+ gen_storeq_reg(s, rs, rd, tmp64);
+ tcg_temp_free_i64(tmp64);
+ }
+ break;
+ }
+ break;
+ case 6: case 7: case 14: case 15:
+ /* Coprocessor. */
+ if (((insn >> 24) & 3) == 3) {
+ /* Translate into the equivalent ARM encoding. */
+ insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
+ if (disas_neon_data_insn(s, insn)) {
+ goto illegal_op;
+ }
+ } else if (((insn >> 8) & 0xe) == 10) {
+ if (disas_vfp_insn(s, insn)) {
+ goto illegal_op;
+ }
+ } else {
+ if (insn & (1 << 28))
+ goto illegal_op;
+ if (disas_coproc_insn(s, insn)) {
+ goto illegal_op;
+ }
+ }
+ break;
+ case 8: case 9: case 10: case 11:
+ if (insn & (1 << 15)) {
+ /* Branches, misc control. */
+ if (insn & 0x5000) {
+ /* Unconditional branch. */
+ /* signextend(hw1[10:0]) -> offset[:12]. */
+ offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
+ /* hw1[10:0] -> offset[11:1]. */
+ offset |= (insn & 0x7ff) << 1;
+ /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
+ offset[24:22] already have the same value because of the
+ sign extension above. */
+ offset ^= ((~insn) & (1 << 13)) << 10;
+ offset ^= ((~insn) & (1 << 11)) << 11;
+
+ if (insn & (1 << 14)) {
+ /* Branch and link. */
+ tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
+ }
+
+ offset += s->pc;
+ if (insn & (1 << 12)) {
+ /* b/bl */
+ gen_jmp(s, offset);
+ } else {
+ /* blx */
+ offset &= ~(uint32_t)2;
+ /* thumb2 bx, no need to check */
+ gen_bx_im(s, offset);
+ }
+ } else if (((insn >> 23) & 7) == 7) {
+ /* Misc control */
+ if (insn & (1 << 13))
+ goto illegal_op;
+
+ if (insn & (1 << 26)) {
+ if (!(insn & (1 << 20))) {
+ /* Hypervisor call (v7) */
+ int imm16 = extract32(insn, 16, 4) << 12
+ | extract32(insn, 0, 12);
+ ARCH(7);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_hvc(s, imm16);
+ } else {
+ /* Secure monitor call (v6+) */
+ ARCH(6K);
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ gen_smc(s);
+ }
+ } else {
+ op = (insn >> 20) & 7;
+ switch (op) {
+ case 0: /* msr cpsr. */
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ tmp = load_reg(s, rn);
+ addr = tcg_const_i32(insn & 0xff);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ break;
+ }
+ /* fall through */
+ case 1: /* msr spsr. */
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ goto illegal_op;
+ }
+
+ if (extract32(insn, 5, 1)) {
+ /* MSR (banked) */
+ int sysm = extract32(insn, 8, 4) |
+ (extract32(insn, 4, 1) << 4);
+ int r = op & 1;
+
+ gen_msr_banked(s, r, sysm, rm);
+ break;
+ }
+
+ /* MSR (for PSRs) */
+ tmp = load_reg(s, rn);
+ if (gen_set_psr(s,
+ msr_mask(s, (insn >> 8) & 0xf, op == 1),
+ op == 1, tmp))
+ goto illegal_op;
+ break;
+ case 2: /* cps, nop-hint. */
+ if (((insn >> 8) & 7) == 0) {
+ gen_nop_hint(s, insn & 0xff);
+ }
+ /* Implemented as NOP in user mode. */
+ if (IS_USER(s))
+ break;
+ offset = 0;
+ imm = 0;
+ if (insn & (1 << 10)) {
+ if (insn & (1 << 7))
+ offset |= CPSR_A;
+ if (insn & (1 << 6))
+ offset |= CPSR_I;
+ if (insn & (1 << 5))
+ offset |= CPSR_F;
+ if (insn & (1 << 9))
+ imm = CPSR_A | CPSR_I | CPSR_F;
+ }
+ if (insn & (1 << 8)) {
+ offset |= 0x1f;
+ imm |= (insn & 0x1f);
+ }
+ if (offset) {
+ gen_set_psr_im(s, offset, 0, imm);
+ }
+ break;
+ case 3: /* Special control operations. */
+ ARCH(7);
+ op = (insn >> 4) & 0xf;
+ switch (op) {
+ case 2: /* clrex */
+ gen_clrex(s);
+ break;
+ case 4: /* dsb */
+ case 5: /* dmb */
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ break;
+ case 6: /* isb */
+ /* We need to break the TB after this insn
+ * to execute self-modifying code correctly
+ * and also to take any pending interrupts
+ * immediately.
+ */
+ gen_lookup_tb(s);
+ break;
+ default:
+ goto illegal_op;
+ }
+ break;
+ case 4: /* bxj */
+ /* Trivial implementation equivalent to bx. */
+ tmp = load_reg(s, rn);
+ gen_bx(s, tmp);
+ break;
+ case 5: /* Exception return. */
+ if (IS_USER(s)) {
+ goto illegal_op;
+ }
+ if (rn != 14 || rd != 15) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rn);
+ tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
+ gen_exception_return(s, tmp);
+ break;
+ case 6: /* MRS */
+ if (extract32(insn, 5, 1)) {
+ /* MRS (banked) */
+ int sysm = extract32(insn, 16, 4) |
+ (extract32(insn, 4, 1) << 4);
+
+ gen_mrs_banked(s, 0, sysm, rd);
+ break;
+ }
+
+ /* mrs cpsr */
+ tmp = tcg_temp_new_i32();
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ addr = tcg_const_i32(insn & 0xff);
+ gen_helper_v7m_mrs(tmp, cpu_env, addr);
+ tcg_temp_free_i32(addr);
+ } else {
+ gen_helper_cpsr_read(tmp, cpu_env);
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 7: /* MRS */
+ if (extract32(insn, 5, 1)) {
+ /* MRS (banked) */
+ int sysm = extract32(insn, 16, 4) |
+ (extract32(insn, 4, 1) << 4);
+
+ gen_mrs_banked(s, 1, sysm, rd);
+ break;
+ }
+
+ /* mrs spsr. */
+ /* Not accessible in user mode. */
+ if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
+ goto illegal_op;
+ }
+ tmp = load_cpu_field(spsr);
+ store_reg(s, rd, tmp);
+ break;
+ }
+ }
+ } else {
+ /* Conditional branch. */
+ op = (insn >> 22) & 0xf;
+ /* Generate a conditional jump to next instruction. */
+ s->condlabel = gen_new_label();
+ arm_gen_test_cc(op ^ 1, s->condlabel);
+ s->condjmp = 1;
+
+ /* offset[11:1] = insn[10:0] */
+ offset = (insn & 0x7ff) << 1;
+ /* offset[17:12] = insn[21:16]. */
+ offset |= (insn & 0x003f0000) >> 4;
+ /* offset[31:20] = insn[26]. */
+ offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
+ /* offset[18] = insn[13]. */
+ offset |= (insn & (1 << 13)) << 5;
+ /* offset[19] = insn[11]. */
+ offset |= (insn & (1 << 11)) << 8;
+
+ /* jump to the offset */
+ gen_jmp(s, s->pc + offset);
+ }
+ } else {
+ /* Data processing immediate. */
+ if (insn & (1 << 25)) {
+ if (insn & (1 << 24)) {
+ if (insn & (1 << 20))
+ goto illegal_op;
+ /* Bitfield/Saturate. */
+ op = (insn >> 21) & 7;
+ imm = insn & 0x1f;
+ shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
+ if (rn == 15) {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ } else {
+ tmp = load_reg(s, rn);
+ }
+ switch (op) {
+ case 2: /* Signed bitfield extract. */
+ imm++;
+ if (shift + imm > 32)
+ goto illegal_op;
+ if (imm < 32)
+ gen_sbfx(tmp, shift, imm);
+ break;
+ case 6: /* Unsigned bitfield extract. */
+ imm++;
+ if (shift + imm > 32)
+ goto illegal_op;
+ if (imm < 32)
+ gen_ubfx(tmp, shift, (1u << imm) - 1);
+ break;
+ case 3: /* Bitfield insert/clear. */
+ if (imm < shift)
+ goto illegal_op;
+ imm = imm + 1 - shift;
+ if (imm != 32) {
+ tmp2 = load_reg(s, rd);
+ tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+ case 7:
+ goto illegal_op;
+ default: /* Saturate. */
+ if (shift) {
+ if (op & 1)
+ tcg_gen_sari_i32(tmp, tmp, shift);
+ else
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ }
+ tmp2 = tcg_const_i32(imm);
+ if (op & 4) {
+ /* Unsigned. */
+ if ((op & 1) && shift == 0) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto illegal_op;
+ }
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
+ }
+ } else {
+ /* Signed. */
+ if ((op & 1) && shift == 0) {
+ if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ goto illegal_op;
+ }
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
+ }
+ }
+ tcg_temp_free_i32(tmp2);
+ break;
+ }
+ store_reg(s, rd, tmp);
+ } else {
+ imm = ((insn & 0x04000000) >> 15)
+ | ((insn & 0x7000) >> 4) | (insn & 0xff);
+ if (insn & (1 << 22)) {
+ /* 16-bit immediate. */
+ imm |= (insn >> 4) & 0xf000;
+ if (insn & (1 << 23)) {
+ /* movt */
+ tmp = load_reg(s, rd);
+ tcg_gen_ext16u_i32(tmp, tmp);
+ tcg_gen_ori_i32(tmp, tmp, imm << 16);
+ } else {
+ /* movw */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, imm);
+ }
+ } else {
+ /* Add/sub 12-bit immediate. */
+ if (rn == 15) {
+ offset = s->pc & ~(uint32_t)3;
+ if (insn & (1 << 23))
+ offset -= imm;
+ else
+ offset += imm;
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, offset);
+ } else {
+ tmp = load_reg(s, rn);
+ if (insn & (1 << 23))
+ tcg_gen_subi_i32(tmp, tmp, imm);
+ else
+ tcg_gen_addi_i32(tmp, tmp, imm);
+ }
+ }
+ store_reg(s, rd, tmp);
+ }
+ } else {
+ int shifter_out = 0;
+ /* modified 12-bit immediate. */
+ shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
+ imm = (insn & 0xff);
+ switch (shift) {
+ case 0: /* XY */
+ /* Nothing to do. */
+ break;
+ case 1: /* 00XY00XY */
+ imm |= imm << 16;
+ break;
+ case 2: /* XY00XY00 */
+ imm |= imm << 16;
+ imm <<= 8;
+ break;
+ case 3: /* XYXYXYXY */
+ imm |= imm << 16;
+ imm |= imm << 8;
+ break;
+ default: /* Rotated constant. */
+ shift = (shift << 1) | (imm >> 7);
+ imm |= 0x80;
+ imm = imm << (32 - shift);
+ shifter_out = 1;
+ break;
+ }
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, imm);
+ rn = (insn >> 16) & 0xf;
+ if (rn == 15) {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ } else {
+ tmp = load_reg(s, rn);
+ }
+ op = (insn >> 21) & 0xf;
+ if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
+ shifter_out, tmp, tmp2))
+ goto illegal_op;
+ tcg_temp_free_i32(tmp2);
+ rd = (insn >> 8) & 0xf;
+ if (rd != 15) {
+ store_reg(s, rd, tmp);
+ } else {
+ tcg_temp_free_i32(tmp);
+ }
+ }
+ }
+ break;
+ case 12: /* Load/store single data item. */
+ {
+ int postinc = 0;
+ int writeback = 0;
+ int memidx;
+ if ((insn & 0x01100000) == 0x01000000) {
+ if (disas_neon_ls_insn(s, insn)) {
+ goto illegal_op;
+ }
+ break;
+ }
+ op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
+ if (rs == 15) {
+ if (!(insn & (1 << 20))) {
+ goto illegal_op;
+ }
+ if (op != 2) {
+ /* Byte or halfword load space with dest == r15 : memory hints.
+ * Catch them early so we don't emit pointless addressing code.
+ * This space is a mix of:
+ * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
+ * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
+ * cores)
+ * unallocated hints, which must be treated as NOPs
+ * UNPREDICTABLE space, which we NOP or UNDEF depending on
+ * which is easiest for the decoding logic
+ * Some space which must UNDEF
+ */
+ int op1 = (insn >> 23) & 3;
+ int op2 = (insn >> 6) & 0x3f;
+ if (op & 2) {
+ goto illegal_op;
+ }
+ if (rn == 15) {
+ /* UNPREDICTABLE, unallocated hint or
+ * PLD/PLDW/PLI (literal)
+ */
+ return 0;
+ }
+ if (op1 & 1) {
+ return 0; /* PLD/PLDW/PLI or unallocated hint */
+ }
+ if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
+ return 0; /* PLD/PLDW/PLI or unallocated hint */
+ }
+ /* UNDEF space, or an UNPREDICTABLE */
+ return 1;
+ }
+ }
+ memidx = get_mem_index(s);
+ if (rn == 15) {
+ addr = tcg_temp_new_i32();
+ /* PC relative. */
+ /* s->pc has already been incremented by 4. */
+ imm = s->pc & 0xfffffffc;
+ if (insn & (1 << 23))
+ imm += insn & 0xfff;
+ else
+ imm -= insn & 0xfff;
+ tcg_gen_movi_i32(addr, imm);
+ } else {
+ addr = load_reg(s, rn);
+ if (insn & (1 << 23)) {
+ /* Positive offset. */
+ imm = insn & 0xfff;
+ tcg_gen_addi_i32(addr, addr, imm);
+ } else {
+ imm = insn & 0xff;
+ switch ((insn >> 8) & 0xf) {
+ case 0x0: /* Shifted Register. */
+ shift = (insn >> 4) & 0xf;
+ if (shift > 3) {
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ if (shift)
+ tcg_gen_shli_i32(tmp, tmp, shift);
+ tcg_gen_add_i32(addr, addr, tmp);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 0xc: /* Negative offset. */
+ tcg_gen_addi_i32(addr, addr, -imm);
+ break;
+ case 0xe: /* User privilege. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ memidx = get_a32_user_mem_index(s);
+ break;
+ case 0x9: /* Post-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xb: /* Post-increment. */
+ postinc = 1;
+ writeback = 1;
+ break;
+ case 0xd: /* Pre-decrement. */
+ imm = -imm;
+ /* Fall through. */
+ case 0xf: /* Pre-increment. */
+ tcg_gen_addi_i32(addr, addr, imm);
+ writeback = 1;
+ break;
+ default:
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
+ }
+ }
+ }
+ if (insn & (1 << 20)) {
+ /* Load. */
+ tmp = tcg_temp_new_i32();
+ switch (op) {
+ case 0:
+ gen_aa32_ld8u(s, tmp, addr, memidx);
+ break;
+ case 4:
+ gen_aa32_ld8s(s, tmp, addr, memidx);
+ break;
+ case 1:
+ gen_aa32_ld16u(s, tmp, addr, memidx);
+ break;
+ case 5:
+ gen_aa32_ld16s(s, tmp, addr, memidx);
+ break;
+ case 2:
+ gen_aa32_ld32u(s, tmp, addr, memidx);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
+ }
+ if (rs == 15) {
+ gen_bx(s, tmp);
+ } else {
+ store_reg(s, rs, tmp);
+ }
+ } else {
+ /* Store. */
+ tmp = load_reg(s, rs);
+ switch (op) {
+ case 0:
+ gen_aa32_st8(s, tmp, addr, memidx);
+ break;
+ case 1:
+ gen_aa32_st16(s, tmp, addr, memidx);
+ break;
+ case 2:
+ gen_aa32_st32(s, tmp, addr, memidx);
+ break;
+ default:
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(addr);
+ goto illegal_op;
+ }
+ tcg_temp_free_i32(tmp);
+ }
+ if (postinc)
+ tcg_gen_addi_i32(addr, addr, imm);
+ if (writeback) {
+ store_reg(s, rn, addr);
+ } else {
+ tcg_temp_free_i32(addr);
+ }
+ }
+ break;
+ default:
+ goto illegal_op;
+ }
+ return 0;
+illegal_op:
+ return 1;
+}
+
+static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
+{
+ uint32_t val, insn, op, rm, rn, rd, shift, cond;
+ int32_t offset;
+ int i;
+ TCGv_i32 tmp;
+ TCGv_i32 tmp2;
+ TCGv_i32 addr;
+
+ if (s->condexec_mask) {
+ cond = s->condexec_cond;
+ if (cond != 0x0e) { /* Skip conditional when condition is AL. */
+ s->condlabel = gen_new_label();
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
+ }
+
+ insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+ s->pc += 2;
+
+ switch (insn >> 12) {
+ case 0: case 1:
+
+ rd = insn & 7;
+ op = (insn >> 11) & 3;
+ if (op == 3) {
+ /* add/subtract */
+ rn = (insn >> 3) & 7;
+ tmp = load_reg(s, rn);
+ if (insn & (1 << 10)) {
+ /* immediate */
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
+ } else {
+ /* reg */
+ rm = (insn >> 6) & 7;
+ tmp2 = load_reg(s, rm);
+ }
+ if (insn & (1 << 9)) {
+ if (s->condexec_mask)
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ else
+ gen_sub_CC(tmp, tmp, tmp2);
+ } else {
+ if (s->condexec_mask)
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ else
+ gen_add_CC(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ } else {
+ /* shift immediate */
+ rm = (insn >> 3) & 7;
+ shift = (insn >> 6) & 0x1f;
+ tmp = load_reg(s, rm);
+ gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ store_reg(s, rd, tmp);
+ }
+ break;
+ case 2: case 3:
+ /* arithmetic large immediate */
+ op = (insn >> 11) & 3;
+ rd = (insn >> 8) & 0x7;
+ if (op == 0) { /* mov */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, insn & 0xff);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ store_reg(s, rd, tmp);
+ } else {
+ tmp = load_reg(s, rd);
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, insn & 0xff);
+ switch (op) {
+ case 1: /* cmp */
+ gen_sub_CC(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ break;
+ case 2: /* add */
+ if (s->condexec_mask)
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ else
+ gen_add_CC(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 3: /* sub */
+ if (s->condexec_mask)
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ else
+ gen_sub_CC(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ }
+ }
+ break;
+ case 4:
+ if (insn & (1 << 11)) {
+ rd = (insn >> 8) & 7;
+ /* load pc-relative. Bit 1 of PC is ignored. */
+ val = s->pc + 2 + ((insn & 0xff) * 4);
+ val &= ~(uint32_t)2;
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, val);
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(addr);
+ store_reg(s, rd, tmp);
+ break;
+ }
+ if (insn & (1 << 10)) {
+ /* data processing extended or blx */
+ rd = (insn & 7) | ((insn >> 4) & 8);
+ rm = (insn >> 3) & 0xf;
+ op = (insn >> 8) & 3;
+ switch (op) {
+ case 0: /* add */
+ tmp = load_reg(s, rd);
+ tmp2 = load_reg(s, rm);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rd, tmp);
+ break;
+ case 1: /* cmp */
+ tmp = load_reg(s, rd);
+ tmp2 = load_reg(s, rm);
+ gen_sub_CC(tmp, tmp, tmp2);
+ tcg_temp_free_i32(tmp2);
+ tcg_temp_free_i32(tmp);
+ break;
+ case 2: /* mov/cpy */
+ tmp = load_reg(s, rm);
+ store_reg(s, rd, tmp);
+ break;
+ case 3:/* branch [and link] exchange thumb register */
+ tmp = load_reg(s, rm);
+ if (insn & (1 << 7)) {
+ ARCH(5);
+ val = (uint32_t)s->pc | 1;
+ tmp2 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp2, val);
+ store_reg(s, 14, tmp2);
+ }
+ /* already thumb, no need to check */
+ gen_bx(s, tmp);
+ break;
+ }
+ break;
+ }
+
+ /* data processing register */
+ rd = insn & 7;
+ rm = (insn >> 3) & 7;
+ op = (insn >> 6) & 0xf;
+ if (op == 2 || op == 3 || op == 4 || op == 7) {
+ /* the shift/rotate ops want the operands backwards */
+ val = rm;
+ rm = rd;
+ rd = val;
+ val = 1;
+ } else {
+ val = 0;
+ }
+
+ if (op == 9) { /* neg */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ } else if (op != 0xf) { /* mvn doesn't read its first operand */
+ tmp = load_reg(s, rd);
+ } else {
+ TCGV_UNUSED_I32(tmp);
+ }
+
+ tmp2 = load_reg(s, rm);
+ switch (op) {
+ case 0x0: /* and */
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ break;
+ case 0x1: /* eor */
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ break;
+ case 0x2: /* lsl */
+ if (s->condexec_mask) {
+ gen_shl(tmp2, tmp2, tmp);
+ } else {
+ gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
+ gen_logic_CC(tmp2);
+ }
+ break;
+ case 0x3: /* lsr */
+ if (s->condexec_mask) {
+ gen_shr(tmp2, tmp2, tmp);
+ } else {
+ gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
+ gen_logic_CC(tmp2);
+ }
+ break;
+ case 0x4: /* asr */
+ if (s->condexec_mask) {
+ gen_sar(tmp2, tmp2, tmp);
+ } else {
+ gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
+ gen_logic_CC(tmp2);
+ }
+ break;
+ case 0x5: /* adc */
+ if (s->condexec_mask) {
+ gen_adc(tmp, tmp2);
+ } else {
+ gen_adc_CC(tmp, tmp, tmp2);
+ }
+ break;
+ case 0x6: /* sbc */
+ if (s->condexec_mask) {
+ gen_sub_carry(tmp, tmp, tmp2);
+ } else {
+ gen_sbc_CC(tmp, tmp, tmp2);
+ }
+ break;
+ case 0x7: /* ror */
+ if (s->condexec_mask) {
+ tcg_gen_andi_i32(tmp, tmp, 0x1f);
+ tcg_gen_rotr_i32(tmp2, tmp2, tmp);
+ } else {
+ gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
+ gen_logic_CC(tmp2);
+ }
+ break;
+ case 0x8: /* tst */
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ rd = 16;
+ break;
+ case 0x9: /* neg */
+ if (s->condexec_mask)
+ tcg_gen_neg_i32(tmp, tmp2);
+ else
+ gen_sub_CC(tmp, tmp, tmp2);
+ break;
+ case 0xa: /* cmp */
+ gen_sub_CC(tmp, tmp, tmp2);
+ rd = 16;
+ break;
+ case 0xb: /* cmn */
+ gen_add_CC(tmp, tmp, tmp2);
+ rd = 16;
+ break;
+ case 0xc: /* orr */
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ break;
+ case 0xd: /* mul */
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ break;
+ case 0xe: /* bic */
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp);
+ break;
+ case 0xf: /* mvn */
+ tcg_gen_not_i32(tmp2, tmp2);
+ if (!s->condexec_mask)
+ gen_logic_CC(tmp2);
+ val = 1;
+ rm = rd;
+ break;
+ }
+ if (rd != 16) {
+ if (val) {
+ store_reg(s, rm, tmp2);
+ if (op != 0xf)
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, rd, tmp);
+ tcg_temp_free_i32(tmp2);
+ }
+ } else {
+ tcg_temp_free_i32(tmp);
+ tcg_temp_free_i32(tmp2);
+ }
+ break;
+
+ case 5:
+ /* load/store register offset. */
+ rd = insn & 7;
+ rn = (insn >> 3) & 7;
+ rm = (insn >> 6) & 7;
+ op = (insn >> 9) & 7;
+ addr = load_reg(s, rn);
+ tmp = load_reg(s, rm);
+ tcg_gen_add_i32(addr, addr, tmp);
+ tcg_temp_free_i32(tmp);
+
+ if (op < 3) { /* store */
+ tmp = load_reg(s, rd);
+ } else {
+ tmp = tcg_temp_new_i32();
+ }
+
+ switch (op) {
+ case 0: /* str */
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ break;
+ case 1: /* strh */
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ break;
+ case 2: /* strb */
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ break;
+ case 3: /* ldrsb */
+ gen_aa32_ld8s(s, tmp, addr, get_mem_index(s));
+ break;
+ case 4: /* ldr */
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 5: /* ldrh */
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 6: /* ldrb */
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ break;
+ case 7: /* ldrsh */
+ gen_aa32_ld16s(s, tmp, addr, get_mem_index(s));
+ break;
+ }
+ if (op >= 3) { /* load */
+ store_reg(s, rd, tmp);
+ } else {
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ break;
+
+ case 6:
+ /* load/store word immediate offset */
+ rd = insn & 7;
+ rn = (insn >> 3) & 7;
+ addr = load_reg(s, rn);
+ val = (insn >> 4) & 0x7c;
+ tcg_gen_addi_i32(addr, addr, val);
+
+ if (insn & (1 << 11)) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rd, tmp);
+ } else {
+ /* store */
+ tmp = load_reg(s, rd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ break;
+
+ case 7:
+ /* load/store byte immediate offset */
+ rd = insn & 7;
+ rn = (insn >> 3) & 7;
+ addr = load_reg(s, rn);
+ val = (insn >> 6) & 0x1f;
+ tcg_gen_addi_i32(addr, addr, val);
+
+ if (insn & (1 << 11)) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rd, tmp);
+ } else {
+ /* store */
+ tmp = load_reg(s, rd);
+ gen_aa32_st8(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ break;
+
+ case 8:
+ /* load/store halfword immediate offset */
+ rd = insn & 7;
+ rn = (insn >> 3) & 7;
+ addr = load_reg(s, rn);
+ val = (insn >> 5) & 0x3e;
+ tcg_gen_addi_i32(addr, addr, val);
+
+ if (insn & (1 << 11)) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rd, tmp);
+ } else {
+ /* store */
+ tmp = load_reg(s, rd);
+ gen_aa32_st16(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ break;
+
+ case 9:
+ /* load/store from stack */
+ rd = (insn >> 8) & 7;
+ addr = load_reg(s, 13);
+ val = (insn & 0xff) * 4;
+ tcg_gen_addi_i32(addr, addr, val);
+
+ if (insn & (1 << 11)) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, rd, tmp);
+ } else {
+ /* store */
+ tmp = load_reg(s, rd);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_temp_free_i32(addr);
+ break;
+
+ case 10:
+ /* add to high reg */
+ rd = (insn >> 8) & 7;
+ if (insn & (1 << 11)) {
+ /* SP */
+ tmp = load_reg(s, 13);
+ } else {
+ /* PC. bit 1 is ignored. */
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
+ }
+ val = (insn & 0xff) * 4;
+ tcg_gen_addi_i32(tmp, tmp, val);
+ store_reg(s, rd, tmp);
+ break;
+
+ case 11:
+ /* misc */
+ op = (insn >> 8) & 0xf;
+ switch (op) {
+ case 0:
+ /* adjust stack pointer */
+ tmp = load_reg(s, 13);
+ val = (insn & 0x7f) * 4;
+ if (insn & (1 << 7))
+ val = -(int32_t)val;
+ tcg_gen_addi_i32(tmp, tmp, val);
+ store_reg(s, 13, tmp);
+ break;
+
+ case 2: /* sign/zero extend. */
+ ARCH(6);
+ rd = insn & 7;
+ rm = (insn >> 3) & 7;
+ tmp = load_reg(s, rm);
+ switch ((insn >> 6) & 3) {
+ case 0: gen_sxth(tmp); break;
+ case 1: gen_sxtb(tmp); break;
+ case 2: gen_uxth(tmp); break;
+ case 3: gen_uxtb(tmp); break;
+ }
+ store_reg(s, rd, tmp);
+ break;
+ case 4: case 5: case 0xc: case 0xd:
+ /* push/pop */
+ addr = load_reg(s, 13);
+ if (insn & (1 << 8))
+ offset = 4;
+ else
+ offset = 0;
+ for (i = 0; i < 8; i++) {
+ if (insn & (1 << i))
+ offset += 4;
+ }
+ if ((insn & (1 << 11)) == 0) {
+ tcg_gen_addi_i32(addr, addr, -offset);
+ }
+ for (i = 0; i < 8; i++) {
+ if (insn & (1 << i)) {
+ if (insn & (1 << 11)) {
+ /* pop */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ store_reg(s, i, tmp);
+ } else {
+ /* push */
+ tmp = load_reg(s, i);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ /* advance to the next address. */
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+ TCGV_UNUSED_I32(tmp);
+ if (insn & (1 << 8)) {
+ if (insn & (1 << 11)) {
+ /* pop pc */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ /* don't set the pc until the rest of the instruction
+ has completed */
+ } else {
+ /* push lr */
+ tmp = load_reg(s, 14);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ if ((insn & (1 << 11)) == 0) {
+ tcg_gen_addi_i32(addr, addr, -offset);
+ }
+ /* write back the new stack pointer */
+ store_reg(s, 13, addr);
+ /* set the new PC value */
+ if ((insn & 0x0900) == 0x0900) {
+ store_reg_from_load(s, 15, tmp);
+ }
+ break;
+
+ case 1: case 3: case 9: case 11: /* czb */
+ rm = insn & 7;
+ tmp = load_reg(s, rm);
+ s->condlabel = gen_new_label();
+ s->condjmp = 1;
+ if (insn & (1 << 11))
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
+ else
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
+ tcg_temp_free_i32(tmp);
+ offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
+ val = (uint32_t)s->pc + 2;
+ val += offset;
+ gen_jmp(s, val);
+ break;
+
+ case 15: /* IT, nop-hint. */
+ if ((insn & 0xf) == 0) {
+ gen_nop_hint(s, (insn >> 4) & 0xf);
+ break;
+ }
+ /* If Then. */
+ s->condexec_cond = (insn >> 4) & 0xe;
+ s->condexec_mask = insn & 0x1f;
+ /* No actual code generated for this insn, just setup state. */
+ break;
+
+ case 0xe: /* bkpt */
+ {
+ int imm8 = extract32(insn, 0, 8);
+ ARCH(5);
+ gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
+ default_exception_el(s));
+ break;
+ }
+
+ case 0xa: /* rev, and hlt */
+ {
+ int op1 = extract32(insn, 6, 2);
+
+ if (op1 == 2) {
+ /* HLT */
+ int imm6 = extract32(insn, 0, 6);
+
+ gen_hlt(s, imm6);
+ break;
+ }
+
+ /* Otherwise this is rev */
+ ARCH(6);
+ rn = (insn >> 3) & 0x7;
+ rd = insn & 0x7;
+ tmp = load_reg(s, rn);
+ switch (op1) {
+ case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
+ case 1: gen_rev16(tmp); break;
+ case 3: gen_revsh(tmp); break;
+ default:
+ g_assert_not_reached();
+ }
+ store_reg(s, rd, tmp);
+ break;
+ }
+
+ case 6:
+ switch ((insn >> 5) & 7) {
+ case 2:
+ /* setend */
+ ARCH(6);
+ if (((insn >> 3) & 1) != !!(s->be_data == MO_BE)) {
+ gen_helper_setend(cpu_env);
+ s->is_jmp = DISAS_UPDATE;
+ }
+ break;
+ case 3:
+ /* cps */
+ ARCH(6);
+ if (IS_USER(s)) {
+ break;
+ }
+ if (arm_dc_feature(s, ARM_FEATURE_M)) {
+ tmp = tcg_const_i32((insn & (1 << 4)) != 0);
+ /* FAULTMASK */
+ if (insn & 1) {
+ addr = tcg_const_i32(19);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ /* PRIMASK */
+ if (insn & 2) {
+ addr = tcg_const_i32(16);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ } else {
+ if (insn & (1 << 4)) {
+ shift = CPSR_A | CPSR_I | CPSR_F;
+ } else {
+ shift = 0;
+ }
+ gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
+ }
+ break;
+ default:
+ goto undef;
+ }
+ break;
+
+ default:
+ goto undef;
+ }
+ break;
+
+ case 12:
+ {
+ /* load/store multiple */
+ TCGv_i32 loaded_var;
+ TCGV_UNUSED_I32(loaded_var);
+ rn = (insn >> 8) & 0x7;
+ addr = load_reg(s, rn);
+ for (i = 0; i < 8; i++) {
+ if (insn & (1 << i)) {
+ if (insn & (1 << 11)) {
+ /* load */
+ tmp = tcg_temp_new_i32();
+ gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
+ if (i == rn) {
+ loaded_var = tmp;
+ } else {
+ store_reg(s, i, tmp);
+ }
+ } else {
+ /* store */
+ tmp = load_reg(s, i);
+ gen_aa32_st32(s, tmp, addr, get_mem_index(s));
+ tcg_temp_free_i32(tmp);
+ }
+ /* advance to the next address */
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+ if ((insn & (1 << rn)) == 0) {
+ /* base reg not in list: base register writeback */
+ store_reg(s, rn, addr);
+ } else {
+ /* base reg in list: if load, complete it now */
+ if (insn & (1 << 11)) {
+ store_reg(s, rn, loaded_var);
+ }
+ tcg_temp_free_i32(addr);
+ }
+ break;
+ }
+ case 13:
+ /* conditional branch or swi */
+ cond = (insn >> 8) & 0xf;
+ if (cond == 0xe)
+ goto undef;
+
+ if (cond == 0xf) {
+ /* swi */
+ gen_set_pc_im(s, s->pc);
+ s->svc_imm = extract32(insn, 0, 8);
+ s->is_jmp = DISAS_SWI;
+ break;
+ }
+ /* generate a conditional jump to next instruction */
+ s->condlabel = gen_new_label();
+ arm_gen_test_cc(cond ^ 1, s->condlabel);
+ s->condjmp = 1;
+
+ /* jump to the offset */
+ val = (uint32_t)s->pc + 2;
+ offset = ((int32_t)insn << 24) >> 24;
+ val += offset << 1;
+ gen_jmp(s, val);
+ break;
+
+ case 14:
+ if (insn & (1 << 11)) {
+ if (disas_thumb2_insn(env, s, insn))
+ goto undef32;
+ break;
+ }
+ /* unconditional branch */
+ val = (uint32_t)s->pc;
+ offset = ((int32_t)insn << 21) >> 21;
+ val += (offset << 1) + 2;
+ gen_jmp(s, val);
+ break;
+
+ case 15:
+ if (disas_thumb2_insn(env, s, insn))
+ goto undef32;
+ break;
+ }
+ return;
+undef32:
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+ return;
+illegal_op:
+undef:
+ gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
+}
+
+static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
+{
+ /* Return true if the insn at dc->pc might cross a page boundary.
+ * (False positives are OK, false negatives are not.)
+ */
+ uint16_t insn;
+
+ if ((s->pc & 3) == 0) {
+ /* At a 4-aligned address we can't be crossing a page */
+ return false;
+ }
+
+ /* This must be a Thumb insn */
+ insn = arm_lduw_code(env, s->pc, s->sctlr_b);
+
+ if ((insn >> 11) >= 0x1d) {
+ /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
+ * First half of a 32-bit Thumb insn. Thumb-1 cores might
+ * end up actually treating this as two 16-bit insns (see the
+ * code at the start of disas_thumb2_insn()) but we don't bother
+ * to check for that as it is unlikely, and false positives here
+ * are harmless.
+ */
+ return true;
+ }
+ /* Definitely a 16-bit insn, can't be crossing a page. */
+ return false;
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc1, *dc = &dc1;
+ target_ulong pc_start;
+ target_ulong next_page_start;
+ int num_insns;
+ int max_insns;
+ bool end_of_page;
+
+ /* generate intermediate code */
+
+ /* The A64 decoder has its own top level loop, because it doesn't need
+ * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
+ */
+ if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
+ gen_intermediate_code_a64(cpu, tb);
+ return;
+ }
+
+ pc_start = tb->pc;
+
+ dc->tb = tb;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->condjmp = 0;
+
+ dc->aarch64 = 0;
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3.
+ */
+ dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
+ !arm_el_is_aa64(env, 3);
+ dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
+ dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
+ dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
+ dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
+ dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
+#if !defined(CONFIG_USER_ONLY)
+ dc->user = (dc->current_el == 0);
+#endif
+ dc->ns = ARM_TBFLAG_NS(tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
+ dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
+ dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
+ dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
+ dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
+ dc->cp_regs = cpu->cp_regs;
+ dc->features = env->features;
+
+ /* Single step state. The code-generation logic here is:
+ * SS_ACTIVE == 0:
+ * generate code with no special handling for single-stepping (except
+ * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
+ * this happens anyway because those changes are all system register or
+ * PSTATE writes).
+ * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
+ * emit code for one insn
+ * emit code to clear PSTATE.SS
+ * emit code to generate software step exception for completed step
+ * end TB (as usual for having generated an exception)
+ * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
+ * emit code to generate a software step exception
+ * end the TB
+ */
+ dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
+ dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
+ dc->is_ldex = false;
+ dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
+
+ cpu_F0s = tcg_temp_new_i32();
+ cpu_F1s = tcg_temp_new_i32();
+ cpu_F0d = tcg_temp_new_i64();
+ cpu_F1d = tcg_temp_new_i64();
+ cpu_V0 = cpu_F0d;
+ cpu_V1 = cpu_F1d;
+ /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
+ cpu_M0 = tcg_temp_new_i64();
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+
+ tcg_clear_temp_count();
+
+ /* A note on handling of the condexec (IT) bits:
+ *
+ * We want to avoid the overhead of having to write the updated condexec
+ * bits back to the CPUARMState for every instruction in an IT block. So:
+ * (1) if the condexec bits are not already zero then we write
+ * zero back into the CPUARMState now. This avoids complications trying
+ * to do it at the end of the block. (For example if we don't do this
+ * it's hard to identify whether we can safely skip writing condexec
+ * at the end of the TB, which we definitely want to do for the case
+ * where a TB doesn't do anything with the IT state at all.)
+ * (2) if we are going to leave the TB then we call gen_set_condexec()
+ * which will write the correct value into CPUARMState if zero is wrong.
+ * This is done both for leaving the TB at the end, and for leaving
+ * it because of an exception we know will happen, which is done in
+ * gen_exception_insn(). The latter is necessary because we need to
+ * leave the TB with the PC/IT state just prior to execution of the
+ * instruction which caused the exception.
+ * (3) if we leave the TB unexpectedly (eg a data abort on a load)
+ * then the CPUARMState will be wrong and we need to reset it.
+ * This is handled in the same way as restoration of the
+ * PC in these situations; we save the value of the condexec bits
+ * for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
+ * then uses this to restore them after an exception.
+ *
+ * Note that there are no instructions which can read the condexec
+ * bits, and none which can write non-static values to them, so
+ * we don't need to care about whether CPUARMState is correct in the
+ * middle of a TB.
+ */
+
+ /* Reset the conditional execution bits immediately. This avoids
+ complications trying to do it at the end of the block. */
+ if (dc->condexec_mask || dc->condexec_cond)
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ store_cpu_field(tmp, condexec_bits);
+ }
+ do {
+ tcg_gen_insn_start(dc->pc,
+ (dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
+ 0);
+ num_insns++;
+
+#ifdef CONFIG_USER_ONLY
+ /* Intercept jump to the magic kernel page. */
+ if (dc->pc >= 0xffff0000) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception_internal(EXCP_KERNEL_TRAP);
+ dc->is_jmp = DISAS_EXC;
+ break;
+ }
+#else
+ if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
+ /* We always get here via a jump, so know we are not in a
+ conditional execution block. */
+ gen_exception_internal(EXCP_EXCEPTION_EXIT);
+ dc->is_jmp = DISAS_EXC;
+ break;
+ }
+#endif
+
+ if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
+ CPUBreakpoint *bp;
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == dc->pc) {
+ if (bp->flags & BP_CPU) {
+ gen_set_condexec(dc);
+ gen_set_pc_im(dc, dc->pc);
+ gen_helper_check_breakpoints(cpu_env);
+ /* End the TB early; it's likely not going to be executed */
+ dc->is_jmp = DISAS_UPDATE;
+ } else {
+ gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
+ /* The address covered by the breakpoint must be
+ included in [tb->pc, tb->pc + tb->size) in order
+ to for it to be properly cleared -- thus we
+ increment the PC here so that the logic setting
+ tb->size below does the right thing. */
+ /* TODO: Advance PC by correct instruction length to
+ * avoid disassembler error messages */
+ dc->pc += 2;
+ goto done_generating;
+ }
+ break;
+ }
+ }
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ if (dc->ss_active && !dc->pstate_ss) {
+ /* Singlestep state is Active-pending.
+ * If we're in this state at the start of a TB then either
+ * a) we just took an exception to an EL which is being debugged
+ * and this is the first insn in the exception handler
+ * b) debug exceptions were masked and we just unmasked them
+ * without changing EL (eg by clearing PSTATE.D)
+ * In either case we're going to take a swstep exception in the
+ * "did not step an insn" case, and so the syndrome ISV and EX
+ * bits should be zero.
+ */
+ assert(num_insns == 1);
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
+ default_exception_el(dc));
+ goto done_generating;
+ }
+
+ if (dc->thumb) {
+ disas_thumb_insn(env, dc);
+ if (dc->condexec_mask) {
+ dc->condexec_cond = (dc->condexec_cond & 0xe)
+ | ((dc->condexec_mask >> 4) & 1);
+ dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
+ if (dc->condexec_mask == 0) {
+ dc->condexec_cond = 0;
+ }
+ }
+ } else {
+ unsigned int insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
+ dc->pc += 4;
+ disas_arm_insn(dc, insn);
+ }
+
+ if (dc->condjmp && !dc->is_jmp) {
+ gen_set_label(dc->condlabel);
+ dc->condjmp = 0;
+ }
+
+ if (tcg_check_temp_count()) {
+ fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
+ dc->pc);
+ }
+
+ /* Translation stops when a conditional branch is encountered.
+ * Otherwise the subsequent code could get translated several times.
+ * Also stop translation when a page boundary is reached. This
+ * ensures prefetch aborts occur at the right place. */
+
+ /* We want to stop the TB if the next insn starts in a new page,
+ * or if it spans between this page and the next. This means that
+ * if we're looking at the last halfword in the page we need to
+ * see if it's a 16-bit Thumb insn (which will fit in this TB)
+ * or a 32-bit Thumb insn (which won't).
+ * This is to avoid generating a silly TB with a single 16-bit insn
+ * in it at the end of this page (which would execute correctly
+ * but isn't very efficient).
+ */
+ end_of_page = (dc->pc >= next_page_start) ||
+ ((dc->pc >= next_page_start - 3) && insn_crosses_page(env, dc));
+
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
+ !cs->singlestep_enabled &&
+ !singlestep &&
+ !dc->ss_active &&
+ !end_of_page &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ if (dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying
+ code. */
+ cpu_abort(cs, "IO on conditional branch instruction");
+ }
+ gen_io_end();
+ }
+
+ /* At this stage dc->condjmp will only be set when the skipped
+ instruction was a conditional branch or trap, and the PC has
+ already been written. */
+ if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
+ /* Unconditional and "condition passed" instruction codepath. */
+ gen_set_condexec(dc);
+ switch (dc->is_jmp) {
+ case DISAS_SWI:
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ break;
+ case DISAS_HVC:
+ gen_ss_advance(dc);
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ break;
+ case DISAS_SMC:
+ gen_ss_advance(dc);
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
+ break;
+ case DISAS_NEXT:
+ case DISAS_UPDATE:
+ gen_set_pc_im(dc, dc->pc);
+ /* fall through */
+ default:
+ if (dc->ss_active) {
+ gen_step_complete_exception(dc);
+ } else {
+ /* FIXME: Single stepping a WFI insn will not halt
+ the CPU. */
+ gen_exception_internal(EXCP_DEBUG);
+ }
+ }
+ if (dc->condjmp) {
+ /* "Condition failed" instruction codepath. */
+ gen_set_label(dc->condlabel);
+ gen_set_condexec(dc);
+ gen_set_pc_im(dc, dc->pc);
+ if (dc->ss_active) {
+ gen_step_complete_exception(dc);
+ } else {
+ gen_exception_internal(EXCP_DEBUG);
+ }
+ }
+ } else {
+ /* While branches must always occur at the end of an IT block,
+ there are a few other things that can cause us to terminate
+ the TB in the middle of an IT block:
+ - Exception generating instructions (bkpt, swi, undefined).
+ - Page boundaries.
+ - Hardware watchpoints.
+ Hardware breakpoints have already been handled and skip this code.
+ */
+ gen_set_condexec(dc);
+ switch(dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, dc->pc);
+ break;
+ case DISAS_UPDATE:
+ gen_set_pc_im(dc, dc->pc);
+ /* fall through */
+ case DISAS_JUMP:
+ default:
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ case DISAS_WFI:
+ gen_helper_wfi(cpu_env);
+ /* The helper doesn't necessarily throw an exception, but we
+ * must go back to the main loop to check for interrupts anyway.
+ */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_WFE:
+ gen_helper_wfe(cpu_env);
+ break;
+ case DISAS_YIELD:
+ gen_helper_yield(cpu_env);
+ break;
+ case DISAS_SWI:
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
+ break;
+ case DISAS_HVC:
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
+ break;
+ case DISAS_SMC:
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
+ break;
+ }
+ if (dc->condjmp) {
+ gen_set_label(dc->condlabel);
+ gen_set_condexec(dc);
+ gen_goto_tb(dc, 1, dc->pc);
+ dc->condjmp = 0;
+ }
+ }
+
+done_generating:
+ gen_tb_end(tb, num_insns);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) &&
+ qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc->pc - pc_start,
+ dc->thumb | (dc->sctlr_b << 1));
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+}
+
+static const char *cpu_mode_names[16] = {
+ "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
+ "???", "???", "hyp", "und", "???", "???", "???", "sys"
+};
+
+void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int i;
+ uint32_t psr;
+ const char *ns_status;
+
+ if (is_a64(env)) {
+ aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
+ return;
+ }
+
+ for(i=0;i<16;i++) {
+ cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3)
+ cpu_fprintf(f, "\n");
+ else
+ cpu_fprintf(f, " ");
+ }
+ psr = cpsr_read(env);
+
+ if (arm_feature(env, ARM_FEATURE_EL3) &&
+ (psr & CPSR_M) != ARM_CPU_MODE_MON) {
+ ns_status = env->cp15.scr_el3 & SCR_NS ? "NS " : "S ";
+ } else {
+ ns_status = "";
+ }
+
+ cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%s%d\n",
+ psr,
+ psr & (1 << 31) ? 'N' : '-',
+ psr & (1 << 30) ? 'Z' : '-',
+ psr & (1 << 29) ? 'C' : '-',
+ psr & (1 << 28) ? 'V' : '-',
+ psr & CPSR_T ? 'T' : 'A',
+ ns_status,
+ cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
+
+ if (flags & CPU_DUMP_FPU) {
+ int numvfpregs = 0;
+ if (arm_feature(env, ARM_FEATURE_VFP)) {
+ numvfpregs += 16;
+ }
+ if (arm_feature(env, ARM_FEATURE_VFP3)) {
+ numvfpregs += 16;
+ }
+ for (i = 0; i < numvfpregs; i++) {
+ uint64_t v = float64_val(env->vfp.regs[i]);
+ cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
+ i * 2, (uint32_t)v,
+ i * 2 + 1, (uint32_t)(v >> 32),
+ i, v);
+ }
+ cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
+ }
+}
+
+void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ if (is_a64(env)) {
+ env->pc = data[0];
+ env->condexec_bits = 0;
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
+ } else {
+ env->regs[15] = data[0];
+ env->condexec_bits = data[1];
+ env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
+ }
+}
diff --git a/target/arm/translate.h b/target/arm/translate.h
new file mode 100644
index 0000000000..285e96f087
--- /dev/null
+++ b/target/arm/translate.h
@@ -0,0 +1,155 @@
+#ifndef TARGET_ARM_TRANSLATE_H
+#define TARGET_ARM_TRANSLATE_H
+
+/* internal defines */
+typedef struct DisasContext {
+ target_ulong pc;
+ uint32_t insn;
+ int is_jmp;
+ /* Nonzero if this instruction has been conditionally skipped. */
+ int condjmp;
+ /* The label that will be jumped to when the instruction is skipped. */
+ TCGLabel *condlabel;
+ /* Thumb-2 conditional execution bits. */
+ int condexec_mask;
+ int condexec_cond;
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+ int thumb;
+ int sctlr_b;
+ TCGMemOp be_data;
+#if !defined(CONFIG_USER_ONLY)
+ int user;
+#endif
+ ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
+ bool tbi0; /* TBI0 for EL0/1 or TBI for EL2/3 */
+ bool tbi1; /* TBI1 for EL0/1, not used for EL2/3 */
+ bool ns; /* Use non-secure CPREG bank on access */
+ int fp_excp_el; /* FP exception EL or 0 if enabled */
+ /* Flag indicating that exceptions from secure mode are routed to EL3. */
+ bool secure_routed_to_el3;
+ bool vfp_enabled; /* FP enabled via FPSCR.EN */
+ int vec_len;
+ int vec_stride;
+ /* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
+ * so that top level loop can generate correct syndrome information.
+ */
+ uint32_t svc_imm;
+ int aarch64;
+ int current_el;
+ GHashTable *cp_regs;
+ uint64_t features; /* CPU features bits */
+ /* Because unallocated encodings generate different exception syndrome
+ * information from traps due to FP being disabled, we can't do a single
+ * "is fp access disabled" check at a high level in the decode tree.
+ * To help in catching bugs where the access check was forgotten in some
+ * code path, we set this flag when the access check is done, and assert
+ * that it is set at the point where we actually touch the FP regs.
+ */
+ bool fp_access_checked;
+ /* ARMv8 single-step state (this is distinct from the QEMU gdbstub
+ * single-step support).
+ */
+ bool ss_active;
+ bool pstate_ss;
+ /* True if the insn just emitted was a load-exclusive instruction
+ * (necessary for syndrome information for single step exceptions),
+ * ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
+ */
+ bool is_ldex;
+ /* True if a single-step exception will be taken to the current EL */
+ bool ss_same_el;
+ /* Bottom two bits of XScale c15_cpar coprocessor access control reg */
+ int c15_cpar;
+ /* TCG op index of the current insn_start. */
+ int insn_start_idx;
+#define TMP_A64_MAX 16
+ int tmp_a64_count;
+ TCGv_i64 tmp_a64[TMP_A64_MAX];
+} DisasContext;
+
+typedef struct DisasCompare {
+ TCGCond cond;
+ TCGv_i32 value;
+ bool value_global;
+} DisasCompare;
+
+/* Share the TCG temporaries common between 32 and 64 bit modes. */
+extern TCGv_env cpu_env;
+extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
+extern TCGv_i64 cpu_exclusive_addr;
+extern TCGv_i64 cpu_exclusive_val;
+
+static inline int arm_dc_feature(DisasContext *dc, int feature)
+{
+ return (dc->features & (1ULL << feature)) != 0;
+}
+
+static inline int get_mem_index(DisasContext *s)
+{
+ return s->mmu_idx;
+}
+
+/* Function used to determine the target exception EL when otherwise not known
+ * or default.
+ */
+static inline int default_exception_el(DisasContext *s)
+{
+ /* If we are coming from secure EL0 in a system with a 32-bit EL3, then
+ * there is no secure EL1, so we route exceptions to EL3. Otherwise,
+ * exceptions can only be routed to ELs above 1, so we target the higher of
+ * 1 or the current EL.
+ */
+ return (s->mmu_idx == ARMMMUIdx_S1SE0 && s->secure_routed_to_el3)
+ ? 3 : MAX(1, s->current_el);
+}
+
+/* target-specific extra values for is_jmp */
+/* These instructions trap after executing, so the A32/T32 decoder must
+ * defer them until after the conditional execution state has been updated.
+ * WFI also needs special handling when single-stepping.
+ */
+#define DISAS_WFI 4
+#define DISAS_SWI 5
+/* For instructions which unconditionally cause an exception we can skip
+ * emitting unreachable code at the end of the TB in the A64 decoder
+ */
+#define DISAS_EXC 6
+/* WFE */
+#define DISAS_WFE 7
+#define DISAS_HVC 8
+#define DISAS_SMC 9
+#define DISAS_YIELD 10
+
+#ifdef TARGET_AARCH64
+void a64_translate_init(void);
+void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb);
+void gen_a64_set_pc_im(uint64_t val);
+void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+#else
+static inline void a64_translate_init(void)
+{
+}
+
+static inline void gen_intermediate_code_a64(ARMCPU *cpu, TranslationBlock *tb)
+{
+}
+
+static inline void gen_a64_set_pc_im(uint64_t val)
+{
+}
+
+static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf,
+ int flags)
+{
+}
+#endif
+
+void arm_test_cc(DisasCompare *cmp, int cc);
+void arm_free_cc(DisasCompare *cmp);
+void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
+void arm_gen_test_cc(int cc, TCGLabel *label);
+
+#endif /* TARGET_ARM_TRANSLATE_H */
diff --git a/target/cris/Makefile.objs b/target/cris/Makefile.objs
new file mode 100644
index 0000000000..7779227fc4
--- /dev/null
+++ b/target/cris/Makefile.objs
@@ -0,0 +1,3 @@
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += mmu.o machine.o
diff --git a/target/cris/cpu-qom.h b/target/cris/cpu-qom.h
new file mode 100644
index 0000000000..7556e9f97e
--- /dev/null
+++ b/target/cris/cpu-qom.h
@@ -0,0 +1,55 @@
+/*
+ * QEMU CRIS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_CRIS_CPU_QOM_H
+#define QEMU_CRIS_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_CRIS_CPU "cris-cpu"
+
+#define CRIS_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(CRISCPUClass, (klass), TYPE_CRIS_CPU)
+#define CRIS_CPU(obj) \
+ OBJECT_CHECK(CRISCPU, (obj), TYPE_CRIS_CPU)
+#define CRIS_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(CRISCPUClass, (obj), TYPE_CRIS_CPU)
+
+/**
+ * CRISCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @vr: Version Register value.
+ *
+ * A CRIS CPU model.
+ */
+typedef struct CRISCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+
+ uint32_t vr;
+} CRISCPUClass;
+
+typedef struct CRISCPU CRISCPU;
+
+#endif
diff --git a/target/cris/cpu.c b/target/cris/cpu.c
new file mode 100644
index 0000000000..2e9ab9700e
--- /dev/null
+++ b/target/cris/cpu.c
@@ -0,0 +1,357 @@
+/*
+ * QEMU CRIS CPU
+ *
+ * Copyright (c) 2008 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "mmu.h"
+#include "exec/exec-all.h"
+
+
+static void cris_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool cris_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
+/* CPUClass::reset() */
+static void cris_cpu_reset(CPUState *s)
+{
+ CRISCPU *cpu = CRIS_CPU(s);
+ CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(cpu);
+ CPUCRISState *env = &cpu->env;
+ uint32_t vr;
+
+ ccc->parent_reset(s);
+
+ vr = env->pregs[PR_VR];
+ memset(env, 0, offsetof(CPUCRISState, load_info));
+ env->pregs[PR_VR] = vr;
+ tlb_flush(s, 1);
+
+#if defined(CONFIG_USER_ONLY)
+ /* start in user mode with interrupts enabled. */
+ env->pregs[PR_CCS] |= U_FLAG | I_FLAG | P_FLAG;
+#else
+ cris_mmu_init(env);
+ env->pregs[PR_CCS] = 0;
+#endif
+}
+
+static ObjectClass *cris_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ if (strcasecmp(cpu_model, "any") == 0) {
+ return object_class_by_name("crisv32-" TYPE_CRIS_CPU);
+ }
+#endif
+
+ typename = g_strdup_printf("%s-" TYPE_CRIS_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_CRIS_CPU) ||
+ object_class_is_abstract(oc))) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+CRISCPU *cpu_cris_init(const char *cpu_model)
+{
+ return CRIS_CPU(cpu_generic_init(TYPE_CRIS_CPU, cpu_model));
+}
+
+/* Sort alphabetically by VR. */
+static gint cris_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ CRISCPUClass *ccc_a = CRIS_CPU_CLASS(a);
+ CRISCPUClass *ccc_b = CRIS_CPU_CLASS(b);
+
+ /* */
+ if (ccc_a->vr > ccc_b->vr) {
+ return 1;
+ } else if (ccc_a->vr < ccc_b->vr) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void cris_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+ const char *typename = object_class_get_name(oc);
+ char *name;
+
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_CRIS_CPU));
+ (*s->cpu_fprintf)(s->file, " %s\n", name);
+ g_free(name);
+}
+
+void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_CRIS_CPU, false);
+ list = g_slist_sort(list, cris_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, cris_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+static void cris_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ ccc->parent_realize(dev, errp);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void cris_cpu_set_irq(void *opaque, int irq, int level)
+{
+ CRISCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ int type = irq == CRIS_CPU_IRQ ? CPU_INTERRUPT_HARD : CPU_INTERRUPT_NMI;
+
+ if (level) {
+ cpu_interrupt(cs, type);
+ } else {
+ cpu_reset_interrupt(cs, type);
+ }
+}
+#endif
+
+static void cris_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ CRISCPU *cc = CRIS_CPU(cpu);
+ CPUCRISState *env = &cc->env;
+
+ if (env->pregs[PR_VR] != 32) {
+ info->mach = bfd_mach_cris_v0_v10;
+ info->print_insn = print_insn_crisv10;
+ } else {
+ info->mach = bfd_mach_cris_v32;
+ info->print_insn = print_insn_crisv32;
+ }
+}
+
+static void cris_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ CRISCPU *cpu = CRIS_CPU(obj);
+ CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj);
+ CPUCRISState *env = &cpu->env;
+ static bool tcg_initialized;
+
+ cs->env_ptr = env;
+
+ env->pregs[PR_VR] = ccc->vr;
+
+#ifndef CONFIG_USER_ONLY
+ /* IRQ and NMI lines. */
+ qdev_init_gpio_in(DEVICE(cpu), cris_cpu_set_irq, 2);
+#endif
+
+ if (tcg_enabled() && !tcg_initialized) {
+ tcg_initialized = true;
+ if (env->pregs[PR_VR] < 32) {
+ cris_initialize_crisv10_tcg();
+ } else {
+ cris_initialize_tcg();
+ }
+ }
+}
+
+static void crisv8_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 8;
+ cc->do_interrupt = crisv10_cpu_do_interrupt;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+}
+
+static void crisv9_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 9;
+ cc->do_interrupt = crisv10_cpu_do_interrupt;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+}
+
+static void crisv10_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 10;
+ cc->do_interrupt = crisv10_cpu_do_interrupt;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+}
+
+static void crisv11_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 11;
+ cc->do_interrupt = crisv10_cpu_do_interrupt;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+}
+
+static void crisv17_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 17;
+ cc->do_interrupt = crisv10_cpu_do_interrupt;
+ cc->gdb_read_register = crisv10_cpu_gdb_read_register;
+}
+
+static void crisv32_cpu_class_init(ObjectClass *oc, void *data)
+{
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->vr = 32;
+}
+
+#define TYPE(model) model "-" TYPE_CRIS_CPU
+
+static const TypeInfo cris_cpu_model_type_infos[] = {
+ {
+ .name = TYPE("crisv8"),
+ .parent = TYPE_CRIS_CPU,
+ .class_init = crisv8_cpu_class_init,
+ }, {
+ .name = TYPE("crisv9"),
+ .parent = TYPE_CRIS_CPU,
+ .class_init = crisv9_cpu_class_init,
+ }, {
+ .name = TYPE("crisv10"),
+ .parent = TYPE_CRIS_CPU,
+ .class_init = crisv10_cpu_class_init,
+ }, {
+ .name = TYPE("crisv11"),
+ .parent = TYPE_CRIS_CPU,
+ .class_init = crisv11_cpu_class_init,
+ }, {
+ .name = TYPE("crisv17"),
+ .parent = TYPE_CRIS_CPU,
+ .class_init = crisv17_cpu_class_init,
+ }, {
+ .name = TYPE("crisv32"),
+ .parent = TYPE_CRIS_CPU,
+ .class_init = crisv32_cpu_class_init,
+ }
+};
+
+#undef TYPE
+
+static void cris_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ CRISCPUClass *ccc = CRIS_CPU_CLASS(oc);
+
+ ccc->parent_realize = dc->realize;
+ dc->realize = cris_cpu_realizefn;
+
+ ccc->parent_reset = cc->reset;
+ cc->reset = cris_cpu_reset;
+
+ cc->class_by_name = cris_cpu_class_by_name;
+ cc->has_work = cris_cpu_has_work;
+ cc->do_interrupt = cris_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = cris_cpu_exec_interrupt;
+ cc->dump_state = cris_cpu_dump_state;
+ cc->set_pc = cris_cpu_set_pc;
+ cc->gdb_read_register = cris_cpu_gdb_read_register;
+ cc->gdb_write_register = cris_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = cris_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = cris_cpu_get_phys_page_debug;
+ dc->vmsd = &vmstate_cris_cpu;
+#endif
+
+ cc->gdb_num_core_regs = 49;
+ cc->gdb_stop_before_watchpoint = true;
+
+ cc->disas_set_info = cris_disas_set_info;
+}
+
+static const TypeInfo cris_cpu_type_info = {
+ .name = TYPE_CRIS_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(CRISCPU),
+ .instance_init = cris_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(CRISCPUClass),
+ .class_init = cris_cpu_class_init,
+};
+
+static void cris_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&cris_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(cris_cpu_model_type_infos); i++) {
+ type_register_static(&cris_cpu_model_type_infos[i]);
+ }
+}
+
+type_init(cris_cpu_register_types)
diff --git a/target/cris/cpu.h b/target/cris/cpu.h
new file mode 100644
index 0000000000..43d5f9d1da
--- /dev/null
+++ b/target/cris/cpu.h
@@ -0,0 +1,309 @@
+/*
+ * CRIS virtual CPU header
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CRIS_CPU_H
+#define CRIS_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUCRISState
+
+#include "exec/cpu-defs.h"
+
+#define EXCP_NMI 1
+#define EXCP_GURU 2
+#define EXCP_BUSFAULT 3
+#define EXCP_IRQ 4
+#define EXCP_BREAK 5
+
+/* CRIS-specific interrupt pending bits. */
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+
+/* CRUS CPU device objects interrupt lines. */
+#define CRIS_CPU_IRQ 0
+#define CRIS_CPU_NMI 1
+
+/* Register aliases. R0 - R15 */
+#define R_FP 8
+#define R_SP 14
+#define R_ACR 15
+
+/* Support regs, P0 - P15 */
+#define PR_BZ 0
+#define PR_VR 1
+#define PR_PID 2
+#define PR_SRS 3
+#define PR_WZ 4
+#define PR_EXS 5
+#define PR_EDA 6
+#define PR_PREFIX 6 /* On CRISv10 P6 is reserved, we use it as prefix. */
+#define PR_MOF 7
+#define PR_DZ 8
+#define PR_EBP 9
+#define PR_ERP 10
+#define PR_SRP 11
+#define PR_NRP 12
+#define PR_CCS 13
+#define PR_USP 14
+#define PRV10_BRP 14
+#define PR_SPC 15
+
+/* CPU flags. */
+#define Q_FLAG 0x80000000
+#define M_FLAG_V32 0x40000000
+#define PFIX_FLAG 0x800 /* CRISv10 Only. */
+#define F_FLAG_V10 0x400
+#define P_FLAG_V10 0x200
+#define S_FLAG 0x200
+#define R_FLAG 0x100
+#define P_FLAG 0x80
+#define M_FLAG_V10 0x80
+#define U_FLAG 0x40
+#define I_FLAG 0x20
+#define X_FLAG 0x10
+#define N_FLAG 0x08
+#define Z_FLAG 0x04
+#define V_FLAG 0x02
+#define C_FLAG 0x01
+#define ALU_FLAGS 0x1F
+
+/* Condition codes. */
+#define CC_CC 0
+#define CC_CS 1
+#define CC_NE 2
+#define CC_EQ 3
+#define CC_VC 4
+#define CC_VS 5
+#define CC_PL 6
+#define CC_MI 7
+#define CC_LS 8
+#define CC_HI 9
+#define CC_GE 10
+#define CC_LT 11
+#define CC_GT 12
+#define CC_LE 13
+#define CC_A 14
+#define CC_P 15
+
+#define NB_MMU_MODES 2
+
+typedef struct {
+ uint32_t hi;
+ uint32_t lo;
+} TLBSet;
+
+typedef struct CPUCRISState {
+ uint32_t regs[16];
+ /* P0 - P15 are referred to as special registers in the docs. */
+ uint32_t pregs[16];
+
+ /* Pseudo register for the PC. Not directly accessible on CRIS. */
+ uint32_t pc;
+
+ /* Pseudo register for the kernel stack. */
+ uint32_t ksp;
+
+ /* Branch. */
+ int dslot;
+ int btaken;
+ uint32_t btarget;
+
+ /* Condition flag tracking. */
+ uint32_t cc_op;
+ uint32_t cc_mask;
+ uint32_t cc_dest;
+ uint32_t cc_src;
+ uint32_t cc_result;
+ /* size of the operation, 1 = byte, 2 = word, 4 = dword. */
+ int cc_size;
+ /* X flag at the time of cc snapshot. */
+ int cc_x;
+
+ /* CRIS has certain insns that lockout interrupts. */
+ int locked_irq;
+ int interrupt_vector;
+ int fault_vector;
+ int trap_vector;
+
+ /* FIXME: add a check in the translator to avoid writing to support
+ register sets beyond the 4th. The ISA allows up to 256! but in
+ practice there is no core that implements more than 4.
+
+ Support function registers are used to control units close to the
+ core. Accesses do not pass down the normal hierarchy.
+ */
+ uint32_t sregs[4][16];
+
+ /* Linear feedback shift reg in the mmu. Used to provide pseudo
+ randomness for the 'hint' the mmu gives to sw for choosing valid
+ sets on TLB refills. */
+ uint32_t mmu_rand_lfsr;
+
+ /*
+ * We just store the stores to the tlbset here for later evaluation
+ * when the hw needs access to them.
+ *
+ * One for I and another for D.
+ */
+ TLBSet tlbsets[2][4][16];
+
+ CPU_COMMON
+
+ /* Members from load_info on are preserved across resets. */
+ void *load_info;
+} CPUCRISState;
+
+/**
+ * CRISCPU:
+ * @env: #CPUCRISState
+ *
+ * A CRIS CPU.
+ */
+struct CRISCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUCRISState env;
+};
+
+static inline CRISCPU *cris_env_get_cpu(CPUCRISState *env)
+{
+ return container_of(env, CRISCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(cris_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(CRISCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_cris_cpu;
+#endif
+
+void cris_cpu_do_interrupt(CPUState *cpu);
+void crisv10_cpu_do_interrupt(CPUState *cpu);
+bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+void cris_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+
+hwaddr cris_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+int crisv10_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int cris_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int cris_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+CRISCPU *cpu_cris_init(const char *cpu_model);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_cris_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+
+void cris_initialize_tcg(void);
+void cris_initialize_crisv10_tcg(void);
+
+/* Instead of computing the condition codes after each CRIS instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DEST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+enum {
+ CC_OP_DYNAMIC, /* Use env->cc_op */
+ CC_OP_FLAGS,
+ CC_OP_CMP,
+ CC_OP_MOVE,
+ CC_OP_ADD,
+ CC_OP_ADDC,
+ CC_OP_MCP,
+ CC_OP_ADDU,
+ CC_OP_SUB,
+ CC_OP_SUBU,
+ CC_OP_NEG,
+ CC_OP_BTST,
+ CC_OP_MULS,
+ CC_OP_MULU,
+ CC_OP_DSTEP,
+ CC_OP_MSTEP,
+ CC_OP_BOUND,
+
+ CC_OP_OR,
+ CC_OP_AND,
+ CC_OP_XOR,
+ CC_OP_LSL,
+ CC_OP_LSR,
+ CC_OP_ASR,
+ CC_OP_LZ
+};
+
+/* CRIS uses 8k pages. */
+#define TARGET_PAGE_BITS 13
+#define MMAP_SHIFT TARGET_PAGE_BITS
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define cpu_init(cpu_model) CPU(cpu_cris_init(cpu_model))
+
+#define cpu_signal_handler cpu_cris_signal_handler
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
+{
+ return !!(env->pregs[PR_CCS] & U_FLAG);
+}
+
+int cris_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+/* Support function regs. */
+#define SFR_RW_GC_CFG 0][0
+#define SFR_RW_MM_CFG env->pregs[PR_SRS]][0
+#define SFR_RW_MM_KBASE_LO env->pregs[PR_SRS]][1
+#define SFR_RW_MM_KBASE_HI env->pregs[PR_SRS]][2
+#define SFR_R_MM_CAUSE env->pregs[PR_SRS]][3
+#define SFR_RW_MM_TLB_SEL env->pregs[PR_SRS]][4
+#define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5
+#define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUCRISState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = env->dslot |
+ (env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG
+ | X_FLAG | PFIX_FLAG));
+}
+
+#define cpu_list cris_cpu_list
+void cris_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+
+#endif
diff --git a/target/cris/crisv10-decode.h b/target/cris/crisv10-decode.h
new file mode 100644
index 0000000000..bdb4b6d318
--- /dev/null
+++ b/target/cris/crisv10-decode.h
@@ -0,0 +1,108 @@
+/*
+ * CRISv10 insn decoding macros.
+ *
+ * Copyright (c) 2010 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define CRISV10_MODE_QIMMEDIATE 0
+#define CRISV10_MODE_REG 1
+#define CRISV10_MODE_INDIRECT 2
+#define CRISV10_MODE_AUTOINC 3
+
+/* Quick Immediate. */
+#define CRISV10_QIMM_BCC_R0 0
+#define CRISV10_QIMM_BCC_R1 1
+#define CRISV10_QIMM_BCC_R2 2
+#define CRISV10_QIMM_BCC_R3 3
+
+#define CRISV10_QIMM_BDAP_R0 4
+#define CRISV10_QIMM_BDAP_R1 5
+#define CRISV10_QIMM_BDAP_R2 6
+#define CRISV10_QIMM_BDAP_R3 7
+
+#define CRISV10_QIMM_ADDQ 8
+#define CRISV10_QIMM_MOVEQ 9
+#define CRISV10_QIMM_SUBQ 10
+#define CRISV10_QIMM_CMPQ 11
+#define CRISV10_QIMM_ANDQ 12
+#define CRISV10_QIMM_ORQ 13
+#define CRISV10_QIMM_ASHQ 14
+#define CRISV10_QIMM_LSHQ 15
+
+
+#define CRISV10_REG_ADDX 0
+#define CRISV10_REG_MOVX 1
+#define CRISV10_REG_SUBX 2
+#define CRISV10_REG_LSL 3
+#define CRISV10_REG_ADDI 4
+#define CRISV10_REG_BIAP 5
+#define CRISV10_REG_NEG 6
+#define CRISV10_REG_BOUND 7
+#define CRISV10_REG_ADD 8
+#define CRISV10_REG_MOVE_R 9
+#define CRISV10_REG_MOVE_SPR_R 9
+#define CRISV10_REG_MOVE_R_SPR 8
+#define CRISV10_REG_SUB 10
+#define CRISV10_REG_CMP 11
+#define CRISV10_REG_AND 12
+#define CRISV10_REG_OR 13
+#define CRISV10_REG_ASR 14
+#define CRISV10_REG_LSR 15
+
+#define CRISV10_REG_BTST 3
+#define CRISV10_REG_SCC 4
+#define CRISV10_REG_SETF 6
+#define CRISV10_REG_CLEARF 7
+#define CRISV10_REG_BIAP 5
+#define CRISV10_REG_ABS 10
+#define CRISV10_REG_DSTEP 11
+#define CRISV10_REG_LZ 12
+#define CRISV10_REG_NOT 13
+#define CRISV10_REG_SWAP 13
+#define CRISV10_REG_XOR 14
+#define CRISV10_REG_MSTEP 15
+
+/* Indirect, var size. */
+#define CRISV10_IND_TEST 14
+#define CRISV10_IND_MUL 4
+#define CRISV10_IND_BDAP_M 5
+#define CRISV10_IND_ADD 8
+#define CRISV10_IND_MOVE_M_R 9
+
+
+/* indirect fixed size. */
+#define CRISV10_IND_ADDX 0
+#define CRISV10_IND_MOVX 1
+#define CRISV10_IND_SUBX 2
+#define CRISV10_IND_CMPX 3
+#define CRISV10_IND_JUMP_M 4
+#define CRISV10_IND_DIP 5
+#define CRISV10_IND_JUMP_R 6
+#define CRISV17_IND_ADDC 6
+#define CRISV10_IND_BOUND 7
+#define CRISV10_IND_BCC_M 7
+#define CRISV10_IND_MOVE_M_SPR 8
+#define CRISV10_IND_MOVE_SPR_M 9
+#define CRISV10_IND_SUB 10
+#define CRISV10_IND_CMP 11
+#define CRISV10_IND_AND 12
+#define CRISV10_IND_OR 13
+#define CRISV10_IND_MOVE_R_M 15
+
+#define CRISV10_IND_MOVEM_M_R 14
+#define CRISV10_IND_MOVEM_R_M 15
+
diff --git a/target/cris/crisv32-decode.h b/target/cris/crisv32-decode.h
new file mode 100644
index 0000000000..cdc2f8cbe6
--- /dev/null
+++ b/target/cris/crisv32-decode.h
@@ -0,0 +1,133 @@
+/*
+ * CRIS insn decoding macros.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CRISV32_DECODE_H
+#define CRISV32_DECODE_H
+
+/* Convenient binary macros. */
+#define HEX__(n) 0x##n##LU
+#define B8__(x) ((x&0x0000000FLU)?1:0) \
+ + ((x&0x000000F0LU)?2:0) \
+ + ((x&0x00000F00LU)?4:0) \
+ + ((x&0x0000F000LU)?8:0) \
+ + ((x&0x000F0000LU)?16:0) \
+ + ((x&0x00F00000LU)?32:0) \
+ + ((x&0x0F000000LU)?64:0) \
+ + ((x&0xF0000000LU)?128:0)
+#define B8(d) ((unsigned char)B8__(HEX__(d)))
+
+/* Quick imm. */
+#define DEC_BCCQ {B8(00000000), B8(11110000)}
+#define DEC_ADDOQ {B8(00010000), B8(11110000)}
+#define DEC_ADDQ {B8(00100000), B8(11111100)}
+#define DEC_MOVEQ {B8(00100100), B8(11111100)}
+#define DEC_SUBQ {B8(00101000), B8(11111100)}
+#define DEC_CMPQ {B8(00101100), B8(11111100)}
+#define DEC_ANDQ {B8(00110000), B8(11111100)}
+#define DEC_ORQ {B8(00110100), B8(11111100)}
+#define DEC_BTSTQ {B8(00111000), B8(11111110)}
+#define DEC_ASRQ {B8(00111010), B8(11111110)}
+#define DEC_LSLQ {B8(00111100), B8(11111110)}
+#define DEC_LSRQ {B8(00111110), B8(11111110)}
+
+/* Register. */
+#define DEC_MOVU_R {B8(01000100), B8(11111110)}
+#define DEC_MOVU_R {B8(01000100), B8(11111110)}
+#define DEC_MOVS_R {B8(01000110), B8(11111110)}
+#define DEC_MOVE_R {B8(01100100), B8(11111100)}
+#define DEC_MOVE_RP {B8(01100011), B8(11111111)}
+#define DEC_MOVE_PR {B8(01100111), B8(11111111)}
+#define DEC_DSTEP_R {B8(01101111), B8(11111111)}
+#define DEC_MOVE_RS {B8(10110111), B8(11111111)}
+#define DEC_MOVE_SR {B8(11110111), B8(11111111)}
+#define DEC_ADDU_R {B8(01000000), B8(11111110)}
+#define DEC_ADDS_R {B8(01000010), B8(11111110)}
+#define DEC_ADD_R {B8(01100000), B8(11111100)}
+#define DEC_ADDI_R {B8(01010000), B8(11111100)}
+#define DEC_MULS_R {B8(11010000), B8(11111100)}
+#define DEC_MULU_R {B8(10010000), B8(11111100)}
+#define DEC_ADDI_ACR {B8(01010100), B8(11111100)}
+#define DEC_NEG_R {B8(01011000), B8(11111100)}
+#define DEC_BOUND_R {B8(01011100), B8(11111100)}
+#define DEC_SUBU_R {B8(01001000), B8(11111110)}
+#define DEC_SUBS_R {B8(01001010), B8(11111110)}
+#define DEC_SUB_R {B8(01101000), B8(11111100)}
+#define DEC_CMP_R {B8(01101100), B8(11111100)}
+#define DEC_AND_R {B8(01110000), B8(11111100)}
+#define DEC_ABS_R {B8(01101011), B8(11111111)}
+#define DEC_LZ_R {B8(01110011), B8(11111111)}
+#define DEC_MCP_R {B8(01111111), B8(11111111)}
+#define DEC_SWAP_R {B8(01110111), B8(11111111)}
+#define DEC_XOR_R {B8(01111011), B8(11111111)}
+#define DEC_LSL_R {B8(01001100), B8(11111100)}
+#define DEC_LSR_R {B8(01111100), B8(11111100)}
+#define DEC_ASR_R {B8(01111000), B8(11111100)}
+#define DEC_OR_R {B8(01110100), B8(11111100)}
+#define DEC_BTST_R {B8(01001111), B8(11111111)}
+
+/* Fixed. */
+#define DEC_SETF {B8(01011011), B8(11111111)}
+#define DEC_CLEARF {B8(01011111), B8(11111111)}
+
+/* Memory. */
+#define DEC_ADDU_M {B8(10000000), B8(10111110)}
+#define DEC_ADDS_M {B8(10000010), B8(10111110)}
+#define DEC_MOVU_M {B8(10000100), B8(10111110)}
+#define DEC_MOVS_M {B8(10000110), B8(10111110)}
+#define DEC_SUBU_M {B8(10001000), B8(10111110)}
+#define DEC_SUBS_M {B8(10001010), B8(10111110)}
+#define DEC_CMPU_M {B8(10001100), B8(10111110)}
+#define DEC_CMPS_M {B8(10001110), B8(10111110)}
+#define DEC_ADDO_M {B8(10010100), B8(10111100)}
+#define DEC_BOUND_M {B8(10011100), B8(10111100)}
+#define DEC_ADD_M {B8(10100000), B8(10111100)}
+#define DEC_MOVE_MR {B8(10100100), B8(10111100)}
+#define DEC_SUB_M {B8(10101000), B8(10111100)}
+#define DEC_CMP_M {B8(10101100), B8(10111100)}
+#define DEC_AND_M {B8(10110000), B8(10111100)}
+#define DEC_OR_M {B8(10110100), B8(10111100)}
+#define DEC_TEST_M {B8(10111000), B8(10111100)}
+#define DEC_MOVE_RM {B8(10111100), B8(10111100)}
+
+#define DEC_ADDC_R {B8(01010111), B8(11111111)}
+#define DEC_ADDC_MR {B8(10011010), B8(10111111)}
+#define DEC_LAPCQ {B8(10010111), B8(11111111)}
+#define DEC_LAPC_IM {B8(11010111), B8(11111111)}
+
+#define DEC_MOVE_MP {B8(10100011), B8(10111111)}
+#define DEC_MOVE_PM {B8(10100111), B8(10111111)}
+
+#define DEC_SCC_R {B8(01010011), B8(11111111)}
+#define DEC_RFE_ETC {B8(10010011), B8(11111111)}
+#define DEC_JUMP_P {B8(10011111), B8(11111111)}
+#define DEC_BCC_IM {B8(11011111), B8(11111111)}
+#define DEC_JAS_R {B8(10011011), B8(11111111)}
+#define DEC_JASC_R {B8(10110011), B8(11111111)}
+#define DEC_JAS_IM {B8(11011011), B8(11111111)}
+#define DEC_JASC_IM {B8(11110011), B8(11111111)}
+#define DEC_BAS_IM {B8(11101011), B8(11111111)}
+#define DEC_BASC_IM {B8(11101111), B8(11111111)}
+#define DEC_MOVEM_MR {B8(10111011), B8(10111111)}
+#define DEC_MOVEM_RM {B8(10111111), B8(10111111)}
+
+#define DEC_FTAG_FIDX_D_M {B8(10101011), B8(11111111)}
+#define DEC_FTAG_FIDX_I_M {B8(11010011), B8(11111111)}
+
+#endif
diff --git a/target/cris/gdbstub.c b/target/cris/gdbstub.c
new file mode 100644
index 0000000000..3a72ee2a98
--- /dev/null
+++ b/target/cris/gdbstub.c
@@ -0,0 +1,131 @@
+/*
+ * CRIS gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int crisv10_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+
+ if (n < 15) {
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ }
+
+ if (n == 15) {
+ return gdb_get_reg32(mem_buf, env->pc);
+ }
+
+ if (n < 32) {
+ switch (n) {
+ case 16:
+ return gdb_get_reg8(mem_buf, env->pregs[n - 16]);
+ case 17:
+ return gdb_get_reg8(mem_buf, env->pregs[n - 16]);
+ case 20:
+ case 21:
+ return gdb_get_reg16(mem_buf, env->pregs[n - 16]);
+ default:
+ if (n >= 23) {
+ return gdb_get_reg32(mem_buf, env->pregs[n - 16]);
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+int cris_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ uint8_t srs;
+
+ srs = env->pregs[PR_SRS];
+ if (n < 16) {
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ }
+
+ if (n >= 21 && n < 32) {
+ return gdb_get_reg32(mem_buf, env->pregs[n - 16]);
+ }
+ if (n >= 33 && n < 49) {
+ return gdb_get_reg32(mem_buf, env->sregs[srs][n - 33]);
+ }
+ switch (n) {
+ case 16:
+ return gdb_get_reg8(mem_buf, env->pregs[0]);
+ case 17:
+ return gdb_get_reg8(mem_buf, env->pregs[1]);
+ case 18:
+ return gdb_get_reg32(mem_buf, env->pregs[2]);
+ case 19:
+ return gdb_get_reg8(mem_buf, srs);
+ case 20:
+ return gdb_get_reg16(mem_buf, env->pregs[4]);
+ case 32:
+ return gdb_get_reg32(mem_buf, env->pc);
+ }
+
+ return 0;
+}
+
+int cris_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > 49) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 16) {
+ env->regs[n] = tmp;
+ }
+
+ if (n >= 21 && n < 32) {
+ env->pregs[n - 16] = tmp;
+ }
+
+ /* FIXME: Should support function regs be writable? */
+ switch (n) {
+ case 16:
+ return 1;
+ case 17:
+ return 1;
+ case 18:
+ env->pregs[PR_PID] = tmp;
+ break;
+ case 19:
+ return 1;
+ case 20:
+ return 2;
+ case 32:
+ env->pc = tmp;
+ break;
+ }
+
+ return 4;
+}
diff --git a/target/cris/helper.c b/target/cris/helper.c
new file mode 100644
index 0000000000..af78cca8b9
--- /dev/null
+++ b/target/cris/helper.c
@@ -0,0 +1,319 @@
+/*
+ * CRIS helper routines.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "mmu.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+
+//#define CRIS_HELPER_DEBUG
+
+
+#ifdef CRIS_HELPER_DEBUG
+#define D(x) x
+#define D_LOG(...) qemu_log(__VA_ARGS__)
+#else
+#define D(x)
+#define D_LOG(...) do { } while (0)
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+
+void cris_cpu_do_interrupt(CPUState *cs)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+
+ cs->exception_index = -1;
+ env->pregs[PR_ERP] = env->pc;
+}
+
+void crisv10_cpu_do_interrupt(CPUState *cs)
+{
+ cris_cpu_do_interrupt(cs);
+}
+
+int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+
+ cs->exception_index = 0xaa;
+ cpu->env.pregs[PR_EDA] = address;
+ cpu_dump_state(cs, stderr, fprintf, 0);
+ return 1;
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+
+static void cris_shift_ccs(CPUCRISState *env)
+{
+ uint32_t ccs;
+ /* Apply the ccs shift. */
+ ccs = env->pregs[PR_CCS];
+ ccs = ((ccs & 0xc0000000) | ((ccs << 12) >> 2)) & ~0x3ff;
+ env->pregs[PR_CCS] = ccs;
+}
+
+int cris_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ struct cris_mmu_result res;
+ int prot, miss;
+ int r = -1;
+ target_ulong phy;
+
+ qemu_log_mask(CPU_LOG_MMU, "%s addr=%" VADDR_PRIx " pc=%x rw=%x\n",
+ __func__, address, env->pc, rw);
+ miss = cris_mmu_translate(&res, env, address & TARGET_PAGE_MASK,
+ rw, mmu_idx, 0);
+ if (miss) {
+ if (cs->exception_index == EXCP_BUSFAULT) {
+ cpu_abort(cs,
+ "CRIS: Illegal recursive bus fault."
+ "addr=%" VADDR_PRIx " rw=%d\n",
+ address, rw);
+ }
+
+ env->pregs[PR_EDA] = address;
+ cs->exception_index = EXCP_BUSFAULT;
+ env->fault_vector = res.bf_vec;
+ r = 1;
+ } else {
+ /*
+ * Mask off the cache selection bit. The ETRAX busses do not
+ * see the top bit.
+ */
+ phy = res.phy & ~0x80000000;
+ prot = res.prot;
+ tlb_set_page(cs, address & TARGET_PAGE_MASK, phy,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
+ }
+ if (r > 0) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s returns %d irqreq=%x addr=%" VADDR_PRIx " phy=%x vec=%x"
+ " pc=%x\n", __func__, r, cs->interrupt_request, address,
+ res.phy, res.bf_vec, env->pc);
+ }
+ return r;
+}
+
+void crisv10_cpu_do_interrupt(CPUState *cs)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ int ex_vec = -1;
+
+ D_LOG("exception index=%d interrupt_req=%d\n",
+ cs->exception_index,
+ cs->interrupt_request);
+
+ if (env->dslot) {
+ /* CRISv10 never takes interrupts while in a delay-slot. */
+ cpu_abort(cs, "CRIS: Interrupt on delay-slot\n");
+ }
+
+ assert(!(env->pregs[PR_CCS] & PFIX_FLAG));
+ switch (cs->exception_index) {
+ case EXCP_BREAK:
+ /* These exceptions are genereated by the core itself.
+ ERP should point to the insn following the brk. */
+ ex_vec = env->trap_vector;
+ env->pregs[PRV10_BRP] = env->pc;
+ break;
+
+ case EXCP_NMI:
+ /* NMI is hardwired to vector zero. */
+ ex_vec = 0;
+ env->pregs[PR_CCS] &= ~M_FLAG_V10;
+ env->pregs[PRV10_BRP] = env->pc;
+ break;
+
+ case EXCP_BUSFAULT:
+ cpu_abort(cs, "Unhandled busfault");
+ break;
+
+ default:
+ /* The interrupt controller gives us the vector. */
+ ex_vec = env->interrupt_vector;
+ /* Normal interrupts are taken between
+ TB's. env->pc is valid here. */
+ env->pregs[PR_ERP] = env->pc;
+ break;
+ }
+
+ if (env->pregs[PR_CCS] & U_FLAG) {
+ /* Swap stack pointers. */
+ env->pregs[PR_USP] = env->regs[R_SP];
+ env->regs[R_SP] = env->ksp;
+ }
+
+ /* Now that we are in kernel mode, load the handlers address. */
+ env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
+ env->locked_irq = 1;
+ env->pregs[PR_CCS] |= F_FLAG_V10; /* set F. */
+
+ qemu_log_mask(CPU_LOG_INT, "%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
+ __func__, env->pc, ex_vec,
+ env->pregs[PR_CCS],
+ env->pregs[PR_PID],
+ env->pregs[PR_ERP]);
+}
+
+void cris_cpu_do_interrupt(CPUState *cs)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ int ex_vec = -1;
+
+ D_LOG("exception index=%d interrupt_req=%d\n",
+ cs->exception_index,
+ cs->interrupt_request);
+
+ switch (cs->exception_index) {
+ case EXCP_BREAK:
+ /* These exceptions are genereated by the core itself.
+ ERP should point to the insn following the brk. */
+ ex_vec = env->trap_vector;
+ env->pregs[PR_ERP] = env->pc;
+ break;
+
+ case EXCP_NMI:
+ /* NMI is hardwired to vector zero. */
+ ex_vec = 0;
+ env->pregs[PR_CCS] &= ~M_FLAG_V32;
+ env->pregs[PR_NRP] = env->pc;
+ break;
+
+ case EXCP_BUSFAULT:
+ ex_vec = env->fault_vector;
+ env->pregs[PR_ERP] = env->pc;
+ break;
+
+ default:
+ /* The interrupt controller gives us the vector. */
+ ex_vec = env->interrupt_vector;
+ /* Normal interrupts are taken between
+ TB's. env->pc is valid here. */
+ env->pregs[PR_ERP] = env->pc;
+ break;
+ }
+
+ /* Fill in the IDX field. */
+ env->pregs[PR_EXS] = (ex_vec & 0xff) << 8;
+
+ if (env->dslot) {
+ D_LOG("excp isr=%x PC=%x ds=%d SP=%x"
+ " ERP=%x pid=%x ccs=%x cc=%d %x\n",
+ ex_vec, env->pc, env->dslot,
+ env->regs[R_SP],
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->cc_op, env->cc_mask);
+ /* We loose the btarget, btaken state here so rexec the
+ branch. */
+ env->pregs[PR_ERP] -= env->dslot;
+ /* Exception starts with dslot cleared. */
+ env->dslot = 0;
+ }
+
+ if (env->pregs[PR_CCS] & U_FLAG) {
+ /* Swap stack pointers. */
+ env->pregs[PR_USP] = env->regs[R_SP];
+ env->regs[R_SP] = env->ksp;
+ }
+
+ /* Apply the CRIS CCS shift. Clears U if set. */
+ cris_shift_ccs(env);
+
+ /* Now that we are in kernel mode, load the handlers address.
+ This load may not fault, real hw leaves that behaviour as
+ undefined. */
+ env->pc = cpu_ldl_code(env, env->pregs[PR_EBP] + ex_vec * 4);
+
+ /* Clear the excption_index to avoid spurios hw_aborts for recursive
+ bus faults. */
+ cs->exception_index = -1;
+
+ D_LOG("%s isr=%x vec=%x ccs=%x pid=%d erp=%x\n",
+ __func__, env->pc, ex_vec,
+ env->pregs[PR_CCS],
+ env->pregs[PR_PID],
+ env->pregs[PR_ERP]);
+}
+
+hwaddr cris_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ uint32_t phy = addr;
+ struct cris_mmu_result res;
+ int miss;
+
+ miss = cris_mmu_translate(&res, &cpu->env, addr, 0, 0, 1);
+ /* If D TLB misses, try I TLB. */
+ if (miss) {
+ miss = cris_mmu_translate(&res, &cpu->env, addr, 2, 0, 1);
+ }
+
+ if (!miss) {
+ phy = res.phy;
+ }
+ D(fprintf(stderr, "%s %x -> %x\n", __func__, addr, phy));
+ return phy;
+}
+#endif
+
+bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ bool ret = false;
+
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && (env->pregs[PR_CCS] & I_FLAG)
+ && !env->locked_irq) {
+ cs->exception_index = EXCP_IRQ;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ if (interrupt_request & CPU_INTERRUPT_NMI) {
+ unsigned int m_flag_archval;
+ if (env->pregs[PR_VR] < 32) {
+ m_flag_archval = M_FLAG_V10;
+ } else {
+ m_flag_archval = M_FLAG_V32;
+ }
+ if ((env->pregs[PR_CCS] & m_flag_archval)) {
+ cs->exception_index = EXCP_NMI;
+ cc->do_interrupt(cs);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
diff --git a/target/cris/helper.h b/target/cris/helper.h
new file mode 100644
index 0000000000..ff3595641a
--- /dev/null
+++ b/target/cris/helper.h
@@ -0,0 +1,24 @@
+DEF_HELPER_2(raise_exception, void, env, i32)
+DEF_HELPER_2(tlb_flush_pid, void, env, i32)
+DEF_HELPER_2(spc_write, void, env, i32)
+DEF_HELPER_1(rfe, void, env)
+DEF_HELPER_1(rfn, void, env)
+
+DEF_HELPER_3(movl_sreg_reg, void, env, i32, i32)
+DEF_HELPER_3(movl_reg_sreg, void, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(lz, TCG_CALL_NO_SE, i32, i32)
+DEF_HELPER_FLAGS_4(btst, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
+
+DEF_HELPER_FLAGS_4(evaluate_flags_muls, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_4(evaluate_flags_mulu, TCG_CALL_NO_SE, i32, env, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_mcp, TCG_CALL_NO_SE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_alu_4, TCG_CALL_NO_SE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(evaluate_flags_sub_4, TCG_CALL_NO_SE, i32, env,
+ i32, i32, i32, i32)
+DEF_HELPER_FLAGS_3(evaluate_flags_move_4, TCG_CALL_NO_SE, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(evaluate_flags_move_2, TCG_CALL_NO_SE, i32, env, i32, i32)
+DEF_HELPER_1(evaluate_flags, void, env)
+DEF_HELPER_1(top_evaluate_flags, void, env)
diff --git a/target/cris/machine.c b/target/cris/machine.c
new file mode 100644
index 0000000000..6b797e8c1d
--- /dev/null
+++ b/target/cris/machine.c
@@ -0,0 +1,95 @@
+/*
+ * CRIS virtual CPU state save/load support
+ *
+ * Copyright (c) 2012 Red Hat, Inc.
+ * Written by Juan Quintela <quintela@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "migration/cpu.h"
+
+static const VMStateDescription vmstate_tlbset = {
+ .name = "cpu/tlbset",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(lo, TLBSet),
+ VMSTATE_UINT32(hi, TLBSet),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_cris_env = {
+ .name = "env",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(pregs, CPUCRISState, 16),
+ VMSTATE_UINT32(pc, CPUCRISState),
+ VMSTATE_UINT32(ksp, CPUCRISState),
+ VMSTATE_INT32(dslot, CPUCRISState),
+ VMSTATE_INT32(btaken, CPUCRISState),
+ VMSTATE_UINT32(btarget, CPUCRISState),
+ VMSTATE_UINT32(cc_op, CPUCRISState),
+ VMSTATE_UINT32(cc_mask, CPUCRISState),
+ VMSTATE_UINT32(cc_dest, CPUCRISState),
+ VMSTATE_UINT32(cc_src, CPUCRISState),
+ VMSTATE_UINT32(cc_result, CPUCRISState),
+ VMSTATE_INT32(cc_size, CPUCRISState),
+ VMSTATE_INT32(cc_x, CPUCRISState),
+ VMSTATE_INT32(locked_irq, CPUCRISState),
+ VMSTATE_INT32(interrupt_vector, CPUCRISState),
+ VMSTATE_INT32(fault_vector, CPUCRISState),
+ VMSTATE_INT32(trap_vector, CPUCRISState),
+ VMSTATE_UINT32_ARRAY(sregs[0], CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(sregs[1], CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(sregs[2], CPUCRISState, 16),
+ VMSTATE_UINT32_ARRAY(sregs[3], CPUCRISState, 16),
+ VMSTATE_UINT32(mmu_rand_lfsr, CPUCRISState),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][0], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][1], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][2], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[0][3], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][0], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][1], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][2], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_STRUCT_ARRAY(tlbsets[1][3], CPUCRISState, 16, 0,
+ vmstate_tlbset, TLBSet),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_cris_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, CRISCPU, 1, vmstate_cris_env, CPUCRISState),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/cris/mmu.c b/target/cris/mmu.c
new file mode 100644
index 0000000000..b8db908823
--- /dev/null
+++ b/target/cris/mmu.c
@@ -0,0 +1,362 @@
+/*
+ * CRIS mmu emulation.
+ *
+ * Copyright (c) 2007 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "mmu.h"
+
+#ifdef DEBUG
+#define D(x) x
+#define D_LOG(...) qemu_log(__VA_ARGS__)
+#else
+#define D(x) do { } while (0)
+#define D_LOG(...) do { } while (0)
+#endif
+
+void cris_mmu_init(CPUCRISState *env)
+{
+ env->mmu_rand_lfsr = 0xcccc;
+}
+
+#define SR_POLYNOM 0x8805
+static inline unsigned int compute_polynom(unsigned int sr)
+{
+ unsigned int i;
+ unsigned int f;
+
+ f = 0;
+ for (i = 0; i < 16; i++)
+ f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
+
+ return f;
+}
+
+static void cris_mmu_update_rand_lfsr(CPUCRISState *env)
+{
+ unsigned int f;
+
+ /* Update lfsr at every fault. */
+ f = compute_polynom(env->mmu_rand_lfsr);
+ env->mmu_rand_lfsr >>= 1;
+ env->mmu_rand_lfsr |= (f << 15);
+ env->mmu_rand_lfsr &= 0xffff;
+}
+
+static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
+{
+ return (rw_gc_cfg & 12) != 0;
+}
+
+static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
+{
+ return (1 << seg) & rw_mm_cfg;
+}
+
+static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg)
+{
+ uint32_t base;
+ int i;
+
+ if (seg < 8)
+ base = env->sregs[SFR_RW_MM_KBASE_LO];
+ else
+ base = env->sregs[SFR_RW_MM_KBASE_HI];
+
+ i = seg & 7;
+ base >>= i * 4;
+ base &= 15;
+
+ base <<= 28;
+ return base;
+}
+/* Used by the tlb decoder. */
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+static inline void set_field(uint32_t *dst, unsigned int val,
+ unsigned int offset, unsigned int width)
+{
+ uint32_t mask;
+
+ mask = (1 << width) - 1;
+ mask <<= offset;
+ val <<= offset;
+
+ val &= mask;
+ *dst &= ~(mask);
+ *dst |= val;
+}
+
+#ifdef DEBUG
+static void dump_tlb(CPUCRISState *env, int mmu)
+{
+ int set;
+ int idx;
+ uint32_t hi, lo, tlb_vpn, tlb_pfn;
+
+ for (set = 0; set < 4; set++) {
+ for (idx = 0; idx < 16; idx++) {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+ tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
+ tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
+
+ printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
+ set, idx, hi, lo, tlb_vpn, tlb_pfn);
+ }
+ }
+}
+#endif
+
+/* rw 0 = read, 1 = write, 2 = exec. */
+static int cris_mmu_translate_page(struct cris_mmu_result *res,
+ CPUCRISState *env, uint32_t vaddr,
+ int rw, int usermode, int debug)
+{
+ unsigned int vpage;
+ unsigned int idx;
+ uint32_t pid, lo, hi;
+ uint32_t tlb_vpn, tlb_pfn = 0;
+ int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
+ int cfg_v, cfg_k, cfg_w, cfg_x;
+ int set, match = 0;
+ uint32_t r_cause;
+ uint32_t r_cfg;
+ int rwcause;
+ int mmu = 1; /* Data mmu is default. */
+ int vect_base;
+
+ r_cause = env->sregs[SFR_R_MM_CAUSE];
+ r_cfg = env->sregs[SFR_RW_MM_CFG];
+ pid = env->pregs[PR_PID] & 0xff;
+
+ switch (rw) {
+ case 2: rwcause = CRIS_MMU_ERR_EXEC; mmu = 0; break;
+ case 1: rwcause = CRIS_MMU_ERR_WRITE; break;
+ default:
+ case 0: rwcause = CRIS_MMU_ERR_READ; break;
+ }
+
+ /* I exception vectors 4 - 7, D 8 - 11. */
+ vect_base = (mmu + 1) * 4;
+
+ vpage = vaddr >> 13;
+
+ /* We know the index which to check on each set.
+ Scan both I and D. */
+#if 0
+ for (set = 0; set < 4; set++) {
+ for (idx = 0; idx < 16; idx++) {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+ tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
+ tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
+
+ printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
+ set, idx, hi, lo, tlb_vpn, tlb_pfn);
+ }
+ }
+#endif
+
+ idx = vpage & 15;
+ for (set = 0; set < 4; set++)
+ {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+
+ tlb_vpn = hi >> 13;
+ tlb_pid = EXTRACT_FIELD(hi, 0, 7);
+ tlb_g = EXTRACT_FIELD(lo, 4, 4);
+
+ D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
+ mmu, set, idx, tlb_vpn, vpage, lo, hi);
+ if ((tlb_g || (tlb_pid == pid))
+ && tlb_vpn == vpage) {
+ match = 1;
+ break;
+ }
+ }
+
+ res->bf_vec = vect_base;
+ if (match) {
+ cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
+ cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
+ cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
+ cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
+
+ tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
+ tlb_v = EXTRACT_FIELD(lo, 3, 3);
+ tlb_k = EXTRACT_FIELD(lo, 2, 2);
+ tlb_w = EXTRACT_FIELD(lo, 1, 1);
+ tlb_x = EXTRACT_FIELD(lo, 0, 0);
+
+ /*
+ set_exception_vector(0x04, i_mmu_refill);
+ set_exception_vector(0x05, i_mmu_invalid);
+ set_exception_vector(0x06, i_mmu_access);
+ set_exception_vector(0x07, i_mmu_execute);
+ set_exception_vector(0x08, d_mmu_refill);
+ set_exception_vector(0x09, d_mmu_invalid);
+ set_exception_vector(0x0a, d_mmu_access);
+ set_exception_vector(0x0b, d_mmu_write);
+ */
+ if (cfg_k && tlb_k && usermode) {
+ D(printf ("tlb: kernel protected %x lo=%x pc=%x\n",
+ vaddr, lo, env->pc));
+ match = 0;
+ res->bf_vec = vect_base + 2;
+ } else if (rw == 1 && cfg_w && !tlb_w) {
+ D(printf ("tlb: write protected %x lo=%x pc=%x\n",
+ vaddr, lo, env->pc));
+ match = 0;
+ /* write accesses never go through the I mmu. */
+ res->bf_vec = vect_base + 3;
+ } else if (rw == 2 && cfg_x && !tlb_x) {
+ D(printf ("tlb: exec protected %x lo=%x pc=%x\n",
+ vaddr, lo, env->pc));
+ match = 0;
+ res->bf_vec = vect_base + 3;
+ } else if (cfg_v && !tlb_v) {
+ D(printf ("tlb: invalid %x\n", vaddr));
+ match = 0;
+ res->bf_vec = vect_base + 1;
+ }
+
+ res->prot = 0;
+ if (match) {
+ res->prot |= PAGE_READ;
+ if (tlb_w)
+ res->prot |= PAGE_WRITE;
+ if (mmu == 0 && (cfg_x || tlb_x))
+ res->prot |= PAGE_EXEC;
+ }
+ else
+ D(dump_tlb(env, mmu));
+ } else {
+ /* If refill, provide a randomized set. */
+ set = env->mmu_rand_lfsr & 3;
+ }
+
+ if (!match && !debug) {
+ cris_mmu_update_rand_lfsr(env);
+
+ /* Compute index. */
+ idx = vpage & 15;
+
+ /* Update RW_MM_TLB_SEL. */
+ env->sregs[SFR_RW_MM_TLB_SEL] = 0;
+ set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
+ set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
+
+ /* Update RW_MM_CAUSE. */
+ set_field(&r_cause, rwcause, 8, 2);
+ set_field(&r_cause, vpage, 13, 19);
+ set_field(&r_cause, pid, 0, 8);
+ env->sregs[SFR_R_MM_CAUSE] = r_cause;
+ D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
+ }
+
+ D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
+ " %x cause=%x sel=%x sp=%x %x %x\n",
+ __func__, rw, match, env->pc,
+ vaddr, vpage,
+ tlb_vpn, tlb_pfn, tlb_pid,
+ pid,
+ r_cause,
+ env->sregs[SFR_RW_MM_TLB_SEL],
+ env->regs[R_SP], env->pregs[PR_USP], env->ksp));
+
+ res->phy = tlb_pfn << TARGET_PAGE_BITS;
+ return !match;
+}
+
+void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid)
+{
+ CRISCPU *cpu = cris_env_get_cpu(env);
+ target_ulong vaddr;
+ unsigned int idx;
+ uint32_t lo, hi;
+ uint32_t tlb_vpn;
+ int tlb_pid, tlb_g, tlb_v;
+ unsigned int set;
+ unsigned int mmu;
+
+ pid &= 0xff;
+ for (mmu = 0; mmu < 2; mmu++) {
+ for (set = 0; set < 4; set++)
+ {
+ for (idx = 0; idx < 16; idx++) {
+ lo = env->tlbsets[mmu][set][idx].lo;
+ hi = env->tlbsets[mmu][set][idx].hi;
+
+ tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
+ tlb_pid = EXTRACT_FIELD(hi, 0, 7);
+ tlb_g = EXTRACT_FIELD(lo, 4, 4);
+ tlb_v = EXTRACT_FIELD(lo, 3, 3);
+
+ if (tlb_v && !tlb_g && (tlb_pid == pid)) {
+ vaddr = tlb_vpn << TARGET_PAGE_BITS;
+ D_LOG("flush pid=%x vaddr=%x\n",
+ pid, vaddr);
+ tlb_flush_page(CPU(cpu), vaddr);
+ }
+ }
+ }
+ }
+}
+
+int cris_mmu_translate(struct cris_mmu_result *res,
+ CPUCRISState *env, uint32_t vaddr,
+ int rw, int mmu_idx, int debug)
+{
+ int seg;
+ int miss = 0;
+ int is_user = mmu_idx == MMU_USER_IDX;
+ uint32_t old_srs;
+
+ old_srs= env->pregs[PR_SRS];
+
+ /* rw == 2 means exec, map the access to the insn mmu. */
+ env->pregs[PR_SRS] = rw == 2 ? 1 : 2;
+
+ if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
+ res->phy = vaddr;
+ res->prot = PAGE_BITS;
+ goto done;
+ }
+
+ seg = vaddr >> 28;
+ if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG]))
+ {
+ uint32_t base;
+
+ miss = 0;
+ base = cris_mmu_translate_seg(env, seg);
+ res->phy = base | (0x0fffffff & vaddr);
+ res->prot = PAGE_BITS;
+ } else {
+ miss = cris_mmu_translate_page(res, env, vaddr, rw,
+ is_user, debug);
+ }
+ done:
+ env->pregs[PR_SRS] = old_srs;
+ return miss;
+}
diff --git a/target/cris/mmu.h b/target/cris/mmu.h
new file mode 100644
index 0000000000..8e249e812b
--- /dev/null
+++ b/target/cris/mmu.h
@@ -0,0 +1,17 @@
+#define CRIS_MMU_ERR_EXEC 0
+#define CRIS_MMU_ERR_READ 1
+#define CRIS_MMU_ERR_WRITE 2
+#define CRIS_MMU_ERR_FLUSH 3
+
+struct cris_mmu_result
+{
+ uint32_t phy;
+ int prot;
+ int bf_vec;
+};
+
+void cris_mmu_init(CPUCRISState *env);
+void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid);
+int cris_mmu_translate(struct cris_mmu_result *res,
+ CPUCRISState *env, uint32_t vaddr,
+ int rw, int mmu_idx, int debug);
diff --git a/target/cris/op_helper.c b/target/cris/op_helper.c
new file mode 100644
index 0000000000..504303913c
--- /dev/null
+++ b/target/cris/op_helper.c
@@ -0,0 +1,639 @@
+/*
+ * CRIS helper routines
+ *
+ * Copyright (c) 2007 AXIS Communications
+ * Written by Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "mmu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+//#define CRIS_OP_HELPER_DEBUG
+
+
+#ifdef CRIS_OP_HELPER_DEBUG
+#define D(x) x
+#define D_LOG(...) qemu_log(__VA_ARGS__)
+#else
+#define D(x)
+#define D_LOG(...) do { } while (0)
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/* Try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ int ret;
+
+ D_LOG("%s pc=%x tpc=%x ra=%p\n", __func__,
+ env->pc, env->pregs[PR_EDA], (void *)retaddr);
+ ret = cris_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ if (cpu_restore_state(cs, retaddr)) {
+ /* Evaluate flags after retranslation. */
+ helper_top_evaluate_flags(env);
+ }
+ }
+ cpu_loop_exit(cs);
+ }
+}
+
+#endif
+
+void helper_raise_exception(CPUCRISState *env, uint32_t index)
+{
+ CPUState *cs = CPU(cris_env_get_cpu(env));
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
+
+void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid)
+{
+#if !defined(CONFIG_USER_ONLY)
+ pid &= 0xff;
+ if (pid != (env->pregs[PR_PID] & 0xff))
+ cris_mmu_flush_pid(env, env->pregs[PR_PID]);
+#endif
+}
+
+void helper_spc_write(CPUCRISState *env, uint32_t new_spc)
+{
+#if !defined(CONFIG_USER_ONLY)
+ CRISCPU *cpu = cris_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ tlb_flush_page(cs, env->pregs[PR_SPC]);
+ tlb_flush_page(cs, new_spc);
+#endif
+}
+
+/* Used by the tlb decoder. */
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg)
+{
+#if !defined(CONFIG_USER_ONLY)
+ CRISCPU *cpu = cris_env_get_cpu(env);
+#endif
+ uint32_t srs;
+ srs = env->pregs[PR_SRS];
+ srs &= 3;
+ env->sregs[srs][sreg] = env->regs[reg];
+
+#if !defined(CONFIG_USER_ONLY)
+ if (srs == 1 || srs == 2) {
+ if (sreg == 6) {
+ /* Writes to tlb-hi write to mm_cause as a side
+ effect. */
+ env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg];
+ env->sregs[SFR_R_MM_CAUSE] = env->regs[reg];
+ }
+ else if (sreg == 5) {
+ uint32_t set;
+ uint32_t idx;
+ uint32_t lo, hi;
+ uint32_t vaddr;
+ int tlb_v;
+
+ idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
+ set >>= 4;
+ set &= 3;
+
+ idx &= 15;
+ /* We've just made a write to tlb_lo. */
+ lo = env->sregs[SFR_RW_MM_TLB_LO];
+ /* Writes are done via r_mm_cause. */
+ hi = env->sregs[SFR_R_MM_CAUSE];
+
+ vaddr = EXTRACT_FIELD(env->tlbsets[srs-1][set][idx].hi,
+ 13, 31);
+ vaddr <<= TARGET_PAGE_BITS;
+ tlb_v = EXTRACT_FIELD(env->tlbsets[srs-1][set][idx].lo,
+ 3, 3);
+ env->tlbsets[srs - 1][set][idx].lo = lo;
+ env->tlbsets[srs - 1][set][idx].hi = hi;
+
+ D_LOG("tlb flush vaddr=%x v=%d pc=%x\n",
+ vaddr, tlb_v, env->pc);
+ if (tlb_v) {
+ tlb_flush_page(CPU(cpu), vaddr);
+ }
+ }
+ }
+#endif
+}
+
+void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg)
+{
+ uint32_t srs;
+ env->pregs[PR_SRS] &= 3;
+ srs = env->pregs[PR_SRS];
+
+#if !defined(CONFIG_USER_ONLY)
+ if (srs == 1 || srs == 2)
+ {
+ uint32_t set;
+ uint32_t idx;
+ uint32_t lo, hi;
+
+ idx = set = env->sregs[SFR_RW_MM_TLB_SEL];
+ set >>= 4;
+ set &= 3;
+ idx &= 15;
+
+ /* Update the mirror regs. */
+ hi = env->tlbsets[srs - 1][set][idx].hi;
+ lo = env->tlbsets[srs - 1][set][idx].lo;
+ env->sregs[SFR_RW_MM_TLB_HI] = hi;
+ env->sregs[SFR_RW_MM_TLB_LO] = lo;
+ }
+#endif
+ env->regs[reg] = env->sregs[srs][sreg];
+}
+
+static void cris_ccs_rshift(CPUCRISState *env)
+{
+ uint32_t ccs;
+
+ /* Apply the ccs shift. */
+ ccs = env->pregs[PR_CCS];
+ ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10);
+ if (ccs & U_FLAG)
+ {
+ /* Enter user mode. */
+ env->ksp = env->regs[R_SP];
+ env->regs[R_SP] = env->pregs[PR_USP];
+ }
+
+ env->pregs[PR_CCS] = ccs;
+}
+
+void helper_rfe(CPUCRISState *env)
+{
+ int rflag = env->pregs[PR_CCS] & R_FLAG;
+
+ D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n",
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->btarget);
+
+ cris_ccs_rshift(env);
+
+ /* RFE sets the P_FLAG only if the R_FLAG is not set. */
+ if (!rflag)
+ env->pregs[PR_CCS] |= P_FLAG;
+}
+
+void helper_rfn(CPUCRISState *env)
+{
+ int rflag = env->pregs[PR_CCS] & R_FLAG;
+
+ D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n",
+ env->pregs[PR_ERP], env->pregs[PR_PID],
+ env->pregs[PR_CCS],
+ env->btarget);
+
+ cris_ccs_rshift(env);
+
+ /* Set the P_FLAG only if the R_FLAG is not set. */
+ if (!rflag)
+ env->pregs[PR_CCS] |= P_FLAG;
+
+ /* Always set the M flag. */
+ env->pregs[PR_CCS] |= M_FLAG_V32;
+}
+
+uint32_t helper_lz(uint32_t t0)
+{
+ return clz32(t0);
+}
+
+uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs)
+{
+ /* FIXME: clean this up. */
+
+ /* des ref:
+ The N flag is set according to the selected bit in the dest reg.
+ The Z flag is set if the selected bit and all bits to the right are
+ zero.
+ The X flag is cleared.
+ Other flags are left untouched.
+ The destination reg is not affected.*/
+ unsigned int fz, sbit, bset, mask, masked_t0;
+
+ sbit = t1 & 31;
+ bset = !!(t0 & (1 << sbit));
+ mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1;
+ masked_t0 = t0 & mask;
+ fz = !(masked_t0 | bset);
+
+ /* Clear the X, N and Z flags. */
+ ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG);
+ if (env->pregs[PR_VR] < 32)
+ ccs &= ~(V_FLAG | C_FLAG);
+ /* Set the N and Z flags accordingly. */
+ ccs |= (bset << 3) | (fz << 2);
+ return ccs;
+}
+
+static inline uint32_t evaluate_flags_writeback(CPUCRISState *env,
+ uint32_t flags, uint32_t ccs)
+{
+ unsigned int x, z, mask;
+
+ /* Extended arithmetics, leave the z flag alone. */
+ x = env->cc_x;
+ mask = env->cc_mask | X_FLAG;
+ if (x) {
+ z = flags & Z_FLAG;
+ mask = mask & ~z;
+ }
+ flags &= mask;
+
+ /* all insn clear the x-flag except setf or clrf. */
+ ccs &= ~mask;
+ ccs |= flags;
+ return ccs;
+}
+
+uint32_t helper_evaluate_flags_muls(CPUCRISState *env,
+ uint32_t ccs, uint32_t res, uint32_t mof)
+{
+ uint32_t flags = 0;
+ int64_t tmp;
+ int dneg;
+
+ dneg = ((int32_t)res) < 0;
+
+ tmp = mof;
+ tmp <<= 32;
+ tmp |= res;
+ if (tmp == 0)
+ flags |= Z_FLAG;
+ else if (tmp < 0)
+ flags |= N_FLAG;
+ if ((dneg && mof != -1)
+ || (!dneg && mof != 0))
+ flags |= V_FLAG;
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_mulu(CPUCRISState *env,
+ uint32_t ccs, uint32_t res, uint32_t mof)
+{
+ uint32_t flags = 0;
+ uint64_t tmp;
+
+ tmp = mof;
+ tmp <<= 32;
+ tmp |= res;
+ if (tmp == 0)
+ flags |= Z_FLAG;
+ else if (tmp >> 63)
+ flags |= N_FLAG;
+ if (mof)
+ flags |= V_FLAG;
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs,
+ uint32_t src, uint32_t dst, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ src = src & 0x80000000;
+ dst = dst & 0x80000000;
+
+ if ((res & 0x80000000L) != 0L)
+ {
+ flags |= N_FLAG;
+ if (!src && !dst)
+ flags |= V_FLAG;
+ else if (src & dst)
+ flags |= R_FLAG;
+ }
+ else
+ {
+ if (res == 0L)
+ flags |= Z_FLAG;
+ if (src & dst)
+ flags |= V_FLAG;
+ if (dst | src)
+ flags |= R_FLAG;
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs,
+ uint32_t src, uint32_t dst, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ src = src & 0x80000000;
+ dst = dst & 0x80000000;
+
+ if ((res & 0x80000000L) != 0L)
+ {
+ flags |= N_FLAG;
+ if (!src && !dst)
+ flags |= V_FLAG;
+ else if (src & dst)
+ flags |= C_FLAG;
+ }
+ else
+ {
+ if (res == 0L)
+ flags |= Z_FLAG;
+ if (src & dst)
+ flags |= V_FLAG;
+ if (dst | src)
+ flags |= C_FLAG;
+ }
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs,
+ uint32_t src, uint32_t dst, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ src = (~src) & 0x80000000;
+ dst = dst & 0x80000000;
+
+ if ((res & 0x80000000L) != 0L)
+ {
+ flags |= N_FLAG;
+ if (!src && !dst)
+ flags |= V_FLAG;
+ else if (src & dst)
+ flags |= C_FLAG;
+ }
+ else
+ {
+ if (res == 0L)
+ flags |= Z_FLAG;
+ if (src & dst)
+ flags |= V_FLAG;
+ if (dst | src)
+ flags |= C_FLAG;
+ }
+
+ flags ^= C_FLAG;
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+uint32_t helper_evaluate_flags_move_4(CPUCRISState *env,
+ uint32_t ccs, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ if ((int32_t)res < 0)
+ flags |= N_FLAG;
+ else if (res == 0L)
+ flags |= Z_FLAG;
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+uint32_t helper_evaluate_flags_move_2(CPUCRISState *env,
+ uint32_t ccs, uint32_t res)
+{
+ uint32_t flags = 0;
+
+ if ((int16_t)res < 0L)
+ flags |= N_FLAG;
+ else if (res == 0)
+ flags |= Z_FLAG;
+
+ return evaluate_flags_writeback(env, flags, ccs);
+}
+
+/* TODO: This is expensive. We could split things up and only evaluate part of
+ CCR on a need to know basis. For now, we simply re-evaluate everything. */
+void helper_evaluate_flags(CPUCRISState *env)
+{
+ uint32_t src, dst, res;
+ uint32_t flags = 0;
+
+ src = env->cc_src;
+ dst = env->cc_dest;
+ res = env->cc_result;
+
+ if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP)
+ src = ~src;
+
+ /* Now, evaluate the flags. This stuff is based on
+ Per Zander's CRISv10 simulator. */
+ switch (env->cc_size)
+ {
+ case 1:
+ if ((res & 0x80L) != 0L)
+ {
+ flags |= N_FLAG;
+ if (((src & 0x80L) == 0L)
+ && ((dst & 0x80L) == 0L))
+ {
+ flags |= V_FLAG;
+ }
+ else if (((src & 0x80L) != 0L)
+ && ((dst & 0x80L) != 0L))
+ {
+ flags |= C_FLAG;
+ }
+ }
+ else
+ {
+ if ((res & 0xFFL) == 0L)
+ {
+ flags |= Z_FLAG;
+ }
+ if (((src & 0x80L) != 0L)
+ && ((dst & 0x80L) != 0L))
+ {
+ flags |= V_FLAG;
+ }
+ if ((dst & 0x80L) != 0L
+ || (src & 0x80L) != 0L)
+ {
+ flags |= C_FLAG;
+ }
+ }
+ break;
+ case 2:
+ if ((res & 0x8000L) != 0L)
+ {
+ flags |= N_FLAG;
+ if (((src & 0x8000L) == 0L)
+ && ((dst & 0x8000L) == 0L))
+ {
+ flags |= V_FLAG;
+ }
+ else if (((src & 0x8000L) != 0L)
+ && ((dst & 0x8000L) != 0L))
+ {
+ flags |= C_FLAG;
+ }
+ }
+ else
+ {
+ if ((res & 0xFFFFL) == 0L)
+ {
+ flags |= Z_FLAG;
+ }
+ if (((src & 0x8000L) != 0L)
+ && ((dst & 0x8000L) != 0L))
+ {
+ flags |= V_FLAG;
+ }
+ if ((dst & 0x8000L) != 0L
+ || (src & 0x8000L) != 0L)
+ {
+ flags |= C_FLAG;
+ }
+ }
+ break;
+ case 4:
+ if ((res & 0x80000000L) != 0L)
+ {
+ flags |= N_FLAG;
+ if (((src & 0x80000000L) == 0L)
+ && ((dst & 0x80000000L) == 0L))
+ {
+ flags |= V_FLAG;
+ }
+ else if (((src & 0x80000000L) != 0L) &&
+ ((dst & 0x80000000L) != 0L))
+ {
+ flags |= C_FLAG;
+ }
+ }
+ else
+ {
+ if (res == 0L)
+ flags |= Z_FLAG;
+ if (((src & 0x80000000L) != 0L)
+ && ((dst & 0x80000000L) != 0L))
+ flags |= V_FLAG;
+ if ((dst & 0x80000000L) != 0L
+ || (src & 0x80000000L) != 0L)
+ flags |= C_FLAG;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP)
+ flags ^= C_FLAG;
+
+ env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags,
+ env->pregs[PR_CCS]);
+}
+
+void helper_top_evaluate_flags(CPUCRISState *env)
+{
+ switch (env->cc_op)
+ {
+ case CC_OP_MCP:
+ env->pregs[PR_CCS] = helper_evaluate_flags_mcp(env,
+ env->pregs[PR_CCS], env->cc_src,
+ env->cc_dest, env->cc_result);
+ break;
+ case CC_OP_MULS:
+ env->pregs[PR_CCS] = helper_evaluate_flags_muls(env,
+ env->pregs[PR_CCS], env->cc_result,
+ env->pregs[PR_MOF]);
+ break;
+ case CC_OP_MULU:
+ env->pregs[PR_CCS] = helper_evaluate_flags_mulu(env,
+ env->pregs[PR_CCS], env->cc_result,
+ env->pregs[PR_MOF]);
+ break;
+ case CC_OP_MOVE:
+ case CC_OP_AND:
+ case CC_OP_OR:
+ case CC_OP_XOR:
+ case CC_OP_ASR:
+ case CC_OP_LSR:
+ case CC_OP_LSL:
+ switch (env->cc_size)
+ {
+ case 4:
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_move_4(env,
+ env->pregs[PR_CCS],
+ env->cc_result);
+ break;
+ case 2:
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_move_2(env,
+ env->pregs[PR_CCS],
+ env->cc_result);
+ break;
+ default:
+ helper_evaluate_flags(env);
+ break;
+ }
+ break;
+ case CC_OP_FLAGS:
+ /* live. */
+ break;
+ case CC_OP_SUB:
+ case CC_OP_CMP:
+ if (env->cc_size == 4)
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_sub_4(env,
+ env->pregs[PR_CCS],
+ env->cc_src, env->cc_dest,
+ env->cc_result);
+ else
+ helper_evaluate_flags(env);
+ break;
+ default:
+ {
+ switch (env->cc_size)
+ {
+ case 4:
+ env->pregs[PR_CCS] =
+ helper_evaluate_flags_alu_4(env,
+ env->pregs[PR_CCS],
+ env->cc_src, env->cc_dest,
+ env->cc_result);
+ break;
+ default:
+ helper_evaluate_flags(env);
+ break;
+ }
+ }
+ break;
+ }
+}
diff --git a/target/cris/opcode-cris.h b/target/cris/opcode-cris.h
new file mode 100644
index 0000000000..e7ebb98cd0
--- /dev/null
+++ b/target/cris/opcode-cris.h
@@ -0,0 +1,355 @@
+/* cris.h -- Header file for CRIS opcode and register tables.
+ Copyright (C) 2000, 2001, 2004 Free Software Foundation, Inc.
+ Contributed by Axis Communications AB, Lund, Sweden.
+ Originally written for GAS 1.38.1 by Mikael Asker.
+ Updated, BFDized and GNUified by Hans-Peter Nilsson.
+
+This file is part of GAS, GDB and the GNU binutils.
+
+GAS, GDB, and GNU binutils is free software; you can redistribute it
+and/or modify it under the terms of the GNU General Public License as
+published by the Free Software Foundation; either version 2, or (at your
+option) any later version.
+
+GAS, GDB, and GNU binutils are distributed in the hope that they will be
+useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef __CRIS_H_INCLUDED_
+#define __CRIS_H_INCLUDED_
+
+#if !defined(__STDC__) && !defined(const)
+#define const
+#endif
+
+
+/* Registers. */
+#define MAX_REG (15)
+#define CRIS_REG_SP (14)
+#define CRIS_REG_PC (15)
+
+/* CPU version control of disassembly and assembly of instructions.
+ May affect how the instruction is assembled, at least the size of
+ immediate operands. */
+enum cris_insn_version_usage
+{
+ /* Any version. */
+ cris_ver_version_all=0,
+
+ /* Indeterminate (intended for disassembly only, or obsolete). */
+ cris_ver_warning,
+
+ /* Only for v0..3 (Etrax 1..4). */
+ cris_ver_v0_3,
+
+ /* Only for v3 or higher (ETRAX 4 and beyond). */
+ cris_ver_v3p,
+
+ /* Only for v8 (Etrax 100). */
+ cris_ver_v8,
+
+ /* Only for v8 or higher (ETRAX 100, ETRAX 100 LX). */
+ cris_ver_v8p,
+
+ /* Only for v0..10. FIXME: Not sure what to do with this. */
+ cris_ver_sim_v0_10,
+
+ /* Only for v0..10. */
+ cris_ver_v0_10,
+
+ /* Only for v3..10. (ETRAX 4, ETRAX 100 and ETRAX 100 LX). */
+ cris_ver_v3_10,
+
+ /* Only for v8..10 (ETRAX 100 and ETRAX 100 LX). */
+ cris_ver_v8_10,
+
+ /* Only for v10 (ETRAX 100 LX) and same series. */
+ cris_ver_v10,
+
+ /* Only for v10 (ETRAX 100 LX) and same series. */
+ cris_ver_v10p,
+
+ /* Only for v32 or higher (codename GUINNESS).
+ Of course some or all these of may change to cris_ver_v32p if/when
+ there's a new revision. */
+ cris_ver_v32p
+};
+
+
+/* Special registers. */
+struct cris_spec_reg
+{
+ const char *const name;
+ unsigned int number;
+
+ /* The size of the register. */
+ unsigned int reg_size;
+
+ /* What CPU version the special register of that name is implemented
+ in. If cris_ver_warning, emit an unimplemented-warning. */
+ enum cris_insn_version_usage applicable_version;
+
+ /* There might be a specific warning for using a special register
+ here. */
+ const char *const warning;
+};
+extern const struct cris_spec_reg cris_spec_regs[];
+
+
+/* Support registers (kind of special too, but not named as such). */
+struct cris_support_reg
+{
+ const char *const name;
+ unsigned int number;
+};
+extern const struct cris_support_reg cris_support_regs[];
+
+/* Opcode-dependent constants. */
+#define AUTOINCR_BIT (0x04)
+
+/* Prefixes. */
+#define BDAP_QUICK_OPCODE (0x0100)
+#define BDAP_QUICK_Z_BITS (0x0e00)
+
+#define BIAP_OPCODE (0x0540)
+#define BIAP_Z_BITS (0x0a80)
+
+#define DIP_OPCODE (0x0970)
+#define DIP_Z_BITS (0xf280)
+
+#define BDAP_INDIR_LOW (0x40)
+#define BDAP_INDIR_LOW_Z (0x80)
+#define BDAP_INDIR_HIGH (0x09)
+#define BDAP_INDIR_HIGH_Z (0x02)
+
+#define BDAP_INDIR_OPCODE (BDAP_INDIR_HIGH * 0x0100 + BDAP_INDIR_LOW)
+#define BDAP_INDIR_Z_BITS (BDAP_INDIR_HIGH_Z * 0x100 + BDAP_INDIR_LOW_Z)
+#define BDAP_PC_LOW (BDAP_INDIR_LOW + CRIS_REG_PC)
+#define BDAP_INCR_HIGH (BDAP_INDIR_HIGH + AUTOINCR_BIT)
+
+/* No prefix must have this code for its "match" bits in the
+ opcode-table. "BCC .+2" will do nicely. */
+#define NO_CRIS_PREFIX 0
+
+/* Definitions for condition codes. */
+#define CC_CC 0x0
+#define CC_HS 0x0
+#define CC_CS 0x1
+#define CC_LO 0x1
+#define CC_NE 0x2
+#define CC_EQ 0x3
+#define CC_VC 0x4
+#define CC_VS 0x5
+#define CC_PL 0x6
+#define CC_MI 0x7
+#define CC_LS 0x8
+#define CC_HI 0x9
+#define CC_GE 0xA
+#define CC_LT 0xB
+#define CC_GT 0xC
+#define CC_LE 0xD
+#define CC_A 0xE
+#define CC_EXT 0xF
+
+/* A table of strings "cc", "cs"... indexed with condition code
+ values as above. */
+extern const char *const cris_cc_strings[];
+
+/* Bcc quick. */
+#define BRANCH_QUICK_LOW (0)
+#define BRANCH_QUICK_HIGH (0)
+#define BRANCH_QUICK_OPCODE (BRANCH_QUICK_HIGH * 0x0100 + BRANCH_QUICK_LOW)
+#define BRANCH_QUICK_Z_BITS (0x0F00)
+
+/* BA quick. */
+#define BA_QUICK_HIGH (BRANCH_QUICK_HIGH + CC_A * 0x10)
+#define BA_QUICK_OPCODE (BA_QUICK_HIGH * 0x100 + BRANCH_QUICK_LOW)
+
+/* Bcc [PC+]. */
+#define BRANCH_PC_LOW (0xFF)
+#define BRANCH_INCR_HIGH (0x0D)
+#define BA_PC_INCR_OPCODE \
+ ((BRANCH_INCR_HIGH + CC_A * 0x10) * 0x0100 + BRANCH_PC_LOW)
+
+/* Jump. */
+/* Note that old versions generated special register 8 (in high bits)
+ and not-that-old versions recognized it as a jump-instruction.
+ That opcode now belongs to JUMPU. */
+#define JUMP_INDIR_OPCODE (0x0930)
+#define JUMP_INDIR_Z_BITS (0xf2c0)
+#define JUMP_PC_INCR_OPCODE \
+ (JUMP_INDIR_OPCODE + AUTOINCR_BIT * 0x0100 + CRIS_REG_PC)
+
+#define MOVE_M_TO_PREG_OPCODE 0x0a30
+#define MOVE_M_TO_PREG_ZBITS 0x01c0
+
+/* BDAP.D N,PC. */
+#define MOVE_PC_INCR_OPCODE_PREFIX \
+ (((BDAP_INCR_HIGH | (CRIS_REG_PC << 4)) << 8) | BDAP_PC_LOW | (2 << 4))
+#define MOVE_PC_INCR_OPCODE_SUFFIX \
+ (MOVE_M_TO_PREG_OPCODE | CRIS_REG_PC | (AUTOINCR_BIT << 8))
+
+#define JUMP_PC_INCR_OPCODE_V32 (0x0DBF)
+
+/* BA DWORD (V32). */
+#define BA_DWORD_OPCODE (0x0EBF)
+
+/* Nop. */
+#define NOP_OPCODE (0x050F)
+#define NOP_Z_BITS (0xFFFF ^ NOP_OPCODE)
+
+#define NOP_OPCODE_V32 (0x05B0)
+#define NOP_Z_BITS_V32 (0xFFFF ^ NOP_OPCODE_V32)
+
+/* For the compatibility mode, let's use "MOVE R0,P0". Doesn't affect
+ registers or flags. Unfortunately shuts off interrupts for one cycle
+ for < v32, but there doesn't seem to be any alternative without that
+ effect. */
+#define NOP_OPCODE_COMMON (0x630)
+#define NOP_OPCODE_ZBITS_COMMON (0xffff & ~NOP_OPCODE_COMMON)
+
+/* LAPC.D */
+#define LAPC_DWORD_OPCODE (0x0D7F)
+#define LAPC_DWORD_Z_BITS (0x0fff & ~LAPC_DWORD_OPCODE)
+
+/* Structure of an opcode table entry. */
+enum cris_imm_oprnd_size_type
+{
+ /* No size is applicable. */
+ SIZE_NONE,
+
+ /* Always 32 bits. */
+ SIZE_FIX_32,
+
+ /* Indicated by size of special register. */
+ SIZE_SPEC_REG,
+
+ /* Indicated by size field, signed. */
+ SIZE_FIELD_SIGNED,
+
+ /* Indicated by size field, unsigned. */
+ SIZE_FIELD_UNSIGNED,
+
+ /* Indicated by size field, no sign implied. */
+ SIZE_FIELD
+};
+
+/* For GDB. FIXME: Is this the best way to handle opcode
+ interpretation? */
+enum cris_op_type
+{
+ cris_not_implemented_op = 0,
+ cris_abs_op,
+ cris_addi_op,
+ cris_asr_op,
+ cris_asrq_op,
+ cris_ax_ei_setf_op,
+ cris_bdap_prefix,
+ cris_biap_prefix,
+ cris_break_op,
+ cris_btst_nop_op,
+ cris_clearf_di_op,
+ cris_dip_prefix,
+ cris_dstep_logshift_mstep_neg_not_op,
+ cris_eight_bit_offset_branch_op,
+ cris_move_mem_to_reg_movem_op,
+ cris_move_reg_to_mem_movem_op,
+ cris_move_to_preg_op,
+ cris_muls_op,
+ cris_mulu_op,
+ cris_none_reg_mode_add_sub_cmp_and_or_move_op,
+ cris_none_reg_mode_clear_test_op,
+ cris_none_reg_mode_jump_op,
+ cris_none_reg_mode_move_from_preg_op,
+ cris_quick_mode_add_sub_op,
+ cris_quick_mode_and_cmp_move_or_op,
+ cris_quick_mode_bdap_prefix,
+ cris_reg_mode_add_sub_cmp_and_or_move_op,
+ cris_reg_mode_clear_op,
+ cris_reg_mode_jump_op,
+ cris_reg_mode_move_from_preg_op,
+ cris_reg_mode_test_op,
+ cris_scc_op,
+ cris_sixteen_bit_offset_branch_op,
+ cris_three_operand_add_sub_cmp_and_or_op,
+ cris_three_operand_bound_op,
+ cris_two_operand_bound_op,
+ cris_xor_op
+};
+
+struct cris_opcode
+{
+ /* The name of the insn. */
+ const char *name;
+
+ /* Bits that must be 1 for a match. */
+ unsigned int match;
+
+ /* Bits that must be 0 for a match. */
+ unsigned int lose;
+
+ /* See the table in "opcodes/cris-opc.c". */
+ const char *args;
+
+ /* Nonzero if this is a delayed branch instruction. */
+ char delayed;
+
+ /* Size of immediate operands. */
+ enum cris_imm_oprnd_size_type imm_oprnd_size;
+
+ /* Indicates which version this insn was first implemented in. */
+ enum cris_insn_version_usage applicable_version;
+
+ /* What kind of operation this is. */
+ enum cris_op_type op;
+};
+extern const struct cris_opcode cris_opcodes[];
+
+
+/* These macros are for the target-specific flags in disassemble_info
+ used at disassembly. */
+
+/* This insn accesses memory. This flag is more trustworthy than
+ checking insn_type for "dis_dref" which does not work for
+ e.g. "JSR [foo]". */
+#define CRIS_DIS_FLAG_MEMREF (1 << 0)
+
+/* The "target" field holds a register number. */
+#define CRIS_DIS_FLAG_MEM_TARGET_IS_REG (1 << 1)
+
+/* The "target2" field holds a register number; add it to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_IS_REG (1 << 2)
+
+/* Yet another add-on: the register in "target2" must be multiplied
+ by 2 before adding to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MULT2 (1 << 3)
+
+/* Yet another add-on: the register in "target2" must be multiplied
+ by 4 (mutually exclusive with .._MULT2). */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MULT4 (1 << 4)
+
+/* The register in "target2" is an indirect memory reference (of the
+ register there), add to "target". Assumed size is dword (mutually
+ exclusive with .._MULT[24]). */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MEM (1 << 5)
+
+/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "byte";
+ sign-extended before adding to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_BYTE (1 << 6)
+
+/* Add-on to CRIS_DIS_FLAG_MEM_TARGET2_MEM; the memory access is "word";
+ sign-extended before adding to "target". */
+#define CRIS_DIS_FLAG_MEM_TARGET2_MEM_WORD (1 << 7)
+
+#endif /* __CRIS_H_INCLUDED_ */
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/target/cris/translate.c b/target/cris/translate.c
new file mode 100644
index 0000000000..b91042743f
--- /dev/null
+++ b/target/cris/translate.c
@@ -0,0 +1,3413 @@
+/*
+ * CRIS emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2008 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * FIXME:
+ * The condition code translation is in need of attention.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/helper-proto.h"
+#include "mmu.h"
+#include "exec/cpu_ldst.h"
+#include "crisv32-decode.h"
+
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define DISAS_CRIS 0
+#if DISAS_CRIS
+# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DIS(...) do { } while (0)
+#endif
+
+#define D(x)
+#define BUG() (gen_BUG(dc, __FILE__, __LINE__))
+#define BUG_ON(x) ({if (x) BUG();})
+
+#define DISAS_SWI 5
+
+/* Used by the decoder. */
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+#define CC_MASK_NZ 0xc
+#define CC_MASK_NZV 0xe
+#define CC_MASK_NZVC 0xf
+#define CC_MASK_RNZV 0x10e
+
+static TCGv_env cpu_env;
+static TCGv cpu_R[16];
+static TCGv cpu_PR[16];
+static TCGv cc_x;
+static TCGv cc_src;
+static TCGv cc_dest;
+static TCGv cc_result;
+static TCGv cc_op;
+static TCGv cc_size;
+static TCGv cc_mask;
+
+static TCGv env_btaken;
+static TCGv env_btarget;
+static TCGv env_pc;
+
+#include "exec/gen-icount.h"
+
+/* This is the state at translation time. */
+typedef struct DisasContext {
+ CRISCPU *cpu;
+ target_ulong pc, ppc;
+
+ /* Decoder. */
+ unsigned int (*decoder)(CPUCRISState *env, struct DisasContext *dc);
+ uint32_t ir;
+ uint32_t opcode;
+ unsigned int op1;
+ unsigned int op2;
+ unsigned int zsize, zzsize;
+ unsigned int mode;
+ unsigned int postinc;
+
+ unsigned int size;
+ unsigned int src;
+ unsigned int dst;
+ unsigned int cond;
+
+ int update_cc;
+ int cc_op;
+ int cc_size;
+ uint32_t cc_mask;
+
+ int cc_size_uptodate; /* -1 invalid or last written value. */
+
+ int cc_x_uptodate; /* 1 - ccs, 2 - known | X_FLAG. 0 not up-to-date. */
+ int flags_uptodate; /* Whether or not $ccs is up-to-date. */
+ int flagx_known; /* Whether or not flags_x has the x flag known at
+ translation time. */
+ int flags_x;
+
+ int clear_x; /* Clear x after this insn? */
+ int clear_prefix; /* Clear prefix after this insn? */
+ int clear_locked_irq; /* Clear the irq lockout. */
+ int cpustate_changed;
+ unsigned int tb_flags; /* tb dependent flags. */
+ int is_jmp;
+
+#define JMP_NOJMP 0
+#define JMP_DIRECT 1
+#define JMP_DIRECT_CC 2
+#define JMP_INDIRECT 3
+ int jmp; /* 0=nojmp, 1=direct, 2=indirect. */
+ uint32_t jmp_pc;
+
+ int delayed_branch;
+
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+} DisasContext;
+
+static void gen_BUG(DisasContext *dc, const char *file, int line)
+{
+ fprintf(stderr, "BUG: pc=%x %s %d\n", dc->pc, file, line);
+ if (qemu_log_separate()) {
+ qemu_log("BUG: pc=%x %s %d\n", dc->pc, file, line);
+ }
+ cpu_abort(CPU(dc->cpu), "%s:%d\n", file, line);
+}
+
+static const char *regnames_v32[] =
+{
+ "$r0", "$r1", "$r2", "$r3",
+ "$r4", "$r5", "$r6", "$r7",
+ "$r8", "$r9", "$r10", "$r11",
+ "$r12", "$r13", "$sp", "$acr",
+};
+static const char *pregnames_v32[] =
+{
+ "$bz", "$vr", "$pid", "$srs",
+ "$wz", "$exs", "$eda", "$mof",
+ "$dz", "$ebp", "$erp", "$srp",
+ "$nrp", "$ccs", "$usp", "$spc",
+};
+
+/* We need this table to handle preg-moves with implicit width. */
+static int preg_sizes[] = {
+ 1, /* bz. */
+ 1, /* vr. */
+ 4, /* pid. */
+ 1, /* srs. */
+ 2, /* wz. */
+ 4, 4, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+};
+
+#define t_gen_mov_TN_env(tn, member) \
+ tcg_gen_ld_tl(tn, cpu_env, offsetof(CPUCRISState, member))
+#define t_gen_mov_env_TN(member, tn) \
+ tcg_gen_st_tl(tn, cpu_env, offsetof(CPUCRISState, member))
+
+static inline void t_gen_mov_TN_preg(TCGv tn, int r)
+{
+ assert(r >= 0 && r <= 15);
+ if (r == PR_BZ || r == PR_WZ || r == PR_DZ) {
+ tcg_gen_mov_tl(tn, tcg_const_tl(0));
+ } else if (r == PR_VR) {
+ tcg_gen_mov_tl(tn, tcg_const_tl(32));
+ } else {
+ tcg_gen_mov_tl(tn, cpu_PR[r]);
+ }
+}
+static inline void t_gen_mov_preg_TN(DisasContext *dc, int r, TCGv tn)
+{
+ assert(r >= 0 && r <= 15);
+ if (r == PR_BZ || r == PR_WZ || r == PR_DZ) {
+ return;
+ } else if (r == PR_SRS) {
+ tcg_gen_andi_tl(cpu_PR[r], tn, 3);
+ } else {
+ if (r == PR_PID) {
+ gen_helper_tlb_flush_pid(cpu_env, tn);
+ }
+ if (dc->tb_flags & S_FLAG && r == PR_SPC) {
+ gen_helper_spc_write(cpu_env, tn);
+ } else if (r == PR_CCS) {
+ dc->cpustate_changed = 1;
+ }
+ tcg_gen_mov_tl(cpu_PR[r], tn);
+ }
+}
+
+/* Sign extend at translation time. */
+static int sign_extend(unsigned int val, unsigned int width)
+{
+ int sval;
+
+ /* LSL. */
+ val <<= 31 - width;
+ sval = val;
+ /* ASR. */
+ sval >>= 31 - width;
+ return sval;
+}
+
+static int cris_fetch(CPUCRISState *env, DisasContext *dc, uint32_t addr,
+ unsigned int size, unsigned int sign)
+{
+ int r;
+
+ switch (size) {
+ case 4:
+ {
+ r = cpu_ldl_code(env, addr);
+ break;
+ }
+ case 2:
+ {
+ if (sign) {
+ r = cpu_ldsw_code(env, addr);
+ } else {
+ r = cpu_lduw_code(env, addr);
+ }
+ break;
+ }
+ case 1:
+ {
+ if (sign) {
+ r = cpu_ldsb_code(env, addr);
+ } else {
+ r = cpu_ldub_code(env, addr);
+ }
+ break;
+ }
+ default:
+ cpu_abort(CPU(dc->cpu), "Invalid fetch size %d\n", size);
+ break;
+ }
+ return r;
+}
+
+static void cris_lock_irq(DisasContext *dc)
+{
+ dc->clear_locked_irq = 0;
+ t_gen_mov_env_TN(locked_irq, tcg_const_tl(1));
+}
+
+static inline void t_gen_raise_exception(uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void t_gen_lsl(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t0, t_31;
+
+ t0 = tcg_temp_new();
+ t_31 = tcg_const_tl(31);
+ tcg_gen_shl_tl(d, a, b);
+
+ tcg_gen_sub_tl(t0, t_31, b);
+ tcg_gen_sar_tl(t0, t0, t_31);
+ tcg_gen_and_tl(t0, t0, d);
+ tcg_gen_xor_tl(d, d, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t_31);
+}
+
+static void t_gen_lsr(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t0, t_31;
+
+ t0 = tcg_temp_new();
+ t_31 = tcg_temp_new();
+ tcg_gen_shr_tl(d, a, b);
+
+ tcg_gen_movi_tl(t_31, 31);
+ tcg_gen_sub_tl(t0, t_31, b);
+ tcg_gen_sar_tl(t0, t0, t_31);
+ tcg_gen_and_tl(t0, t0, d);
+ tcg_gen_xor_tl(d, d, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t_31);
+}
+
+static void t_gen_asr(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t0, t_31;
+
+ t0 = tcg_temp_new();
+ t_31 = tcg_temp_new();
+ tcg_gen_sar_tl(d, a, b);
+
+ tcg_gen_movi_tl(t_31, 31);
+ tcg_gen_sub_tl(t0, t_31, b);
+ tcg_gen_sar_tl(t0, t0, t_31);
+ tcg_gen_or_tl(d, d, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t_31);
+}
+
+static void t_gen_cris_dstep(TCGv d, TCGv a, TCGv b)
+{
+ TCGv t = tcg_temp_new();
+
+ /*
+ * d <<= 1
+ * if (d >= s)
+ * d -= s;
+ */
+ tcg_gen_shli_tl(d, a, 1);
+ tcg_gen_sub_tl(t, d, b);
+ tcg_gen_movcond_tl(TCG_COND_GEU, d, d, b, t, d);
+ tcg_temp_free(t);
+}
+
+static void t_gen_cris_mstep(TCGv d, TCGv a, TCGv b, TCGv ccs)
+{
+ TCGv t;
+
+ /*
+ * d <<= 1
+ * if (n)
+ * d += s;
+ */
+ t = tcg_temp_new();
+ tcg_gen_shli_tl(d, a, 1);
+ tcg_gen_shli_tl(t, ccs, 31 - 3);
+ tcg_gen_sari_tl(t, t, 31);
+ tcg_gen_and_tl(t, t, b);
+ tcg_gen_add_tl(d, d, t);
+ tcg_temp_free(t);
+}
+
+/* Extended arithmetics on CRIS. */
+static inline void t_gen_add_flag(TCGv d, int flag)
+{
+ TCGv c;
+
+ c = tcg_temp_new();
+ t_gen_mov_TN_preg(c, PR_CCS);
+ /* Propagate carry into d. */
+ tcg_gen_andi_tl(c, c, 1 << flag);
+ if (flag) {
+ tcg_gen_shri_tl(c, c, flag);
+ }
+ tcg_gen_add_tl(d, d, c);
+ tcg_temp_free(c);
+}
+
+static inline void t_gen_addx_carry(DisasContext *dc, TCGv d)
+{
+ if (dc->flagx_known) {
+ if (dc->flags_x) {
+ TCGv c;
+
+ c = tcg_temp_new();
+ t_gen_mov_TN_preg(c, PR_CCS);
+ /* C flag is already at bit 0. */
+ tcg_gen_andi_tl(c, c, C_FLAG);
+ tcg_gen_add_tl(d, d, c);
+ tcg_temp_free(c);
+ }
+ } else {
+ TCGv x, c;
+
+ x = tcg_temp_new();
+ c = tcg_temp_new();
+ t_gen_mov_TN_preg(x, PR_CCS);
+ tcg_gen_mov_tl(c, x);
+
+ /* Propagate carry into d if X is set. Branch free. */
+ tcg_gen_andi_tl(c, c, C_FLAG);
+ tcg_gen_andi_tl(x, x, X_FLAG);
+ tcg_gen_shri_tl(x, x, 4);
+
+ tcg_gen_and_tl(x, x, c);
+ tcg_gen_add_tl(d, d, x);
+ tcg_temp_free(x);
+ tcg_temp_free(c);
+ }
+}
+
+static inline void t_gen_subx_carry(DisasContext *dc, TCGv d)
+{
+ if (dc->flagx_known) {
+ if (dc->flags_x) {
+ TCGv c;
+
+ c = tcg_temp_new();
+ t_gen_mov_TN_preg(c, PR_CCS);
+ /* C flag is already at bit 0. */
+ tcg_gen_andi_tl(c, c, C_FLAG);
+ tcg_gen_sub_tl(d, d, c);
+ tcg_temp_free(c);
+ }
+ } else {
+ TCGv x, c;
+
+ x = tcg_temp_new();
+ c = tcg_temp_new();
+ t_gen_mov_TN_preg(x, PR_CCS);
+ tcg_gen_mov_tl(c, x);
+
+ /* Propagate carry into d if X is set. Branch free. */
+ tcg_gen_andi_tl(c, c, C_FLAG);
+ tcg_gen_andi_tl(x, x, X_FLAG);
+ tcg_gen_shri_tl(x, x, 4);
+
+ tcg_gen_and_tl(x, x, c);
+ tcg_gen_sub_tl(d, d, x);
+ tcg_temp_free(x);
+ tcg_temp_free(c);
+ }
+}
+
+/* Swap the two bytes within each half word of the s operand.
+ T0 = ((T0 << 8) & 0xff00ff00) | ((T0 >> 8) & 0x00ff00ff) */
+static inline void t_gen_swapb(TCGv d, TCGv s)
+{
+ TCGv t, org_s;
+
+ t = tcg_temp_new();
+ org_s = tcg_temp_new();
+
+ /* d and s may refer to the same object. */
+ tcg_gen_mov_tl(org_s, s);
+ tcg_gen_shli_tl(t, org_s, 8);
+ tcg_gen_andi_tl(d, t, 0xff00ff00);
+ tcg_gen_shri_tl(t, org_s, 8);
+ tcg_gen_andi_tl(t, t, 0x00ff00ff);
+ tcg_gen_or_tl(d, d, t);
+ tcg_temp_free(t);
+ tcg_temp_free(org_s);
+}
+
+/* Swap the halfwords of the s operand. */
+static inline void t_gen_swapw(TCGv d, TCGv s)
+{
+ TCGv t;
+ /* d and s refer the same object. */
+ t = tcg_temp_new();
+ tcg_gen_mov_tl(t, s);
+ tcg_gen_shli_tl(d, t, 16);
+ tcg_gen_shri_tl(t, t, 16);
+ tcg_gen_or_tl(d, d, t);
+ tcg_temp_free(t);
+}
+
+/* Reverse the within each byte.
+ T0 = (((T0 << 7) & 0x80808080) |
+ ((T0 << 5) & 0x40404040) |
+ ((T0 << 3) & 0x20202020) |
+ ((T0 << 1) & 0x10101010) |
+ ((T0 >> 1) & 0x08080808) |
+ ((T0 >> 3) & 0x04040404) |
+ ((T0 >> 5) & 0x02020202) |
+ ((T0 >> 7) & 0x01010101));
+ */
+static inline void t_gen_swapr(TCGv d, TCGv s)
+{
+ struct {
+ int shift; /* LSL when positive, LSR when negative. */
+ uint32_t mask;
+ } bitrev[] = {
+ {7, 0x80808080},
+ {5, 0x40404040},
+ {3, 0x20202020},
+ {1, 0x10101010},
+ {-1, 0x08080808},
+ {-3, 0x04040404},
+ {-5, 0x02020202},
+ {-7, 0x01010101}
+ };
+ int i;
+ TCGv t, org_s;
+
+ /* d and s refer the same object. */
+ t = tcg_temp_new();
+ org_s = tcg_temp_new();
+ tcg_gen_mov_tl(org_s, s);
+
+ tcg_gen_shli_tl(t, org_s, bitrev[0].shift);
+ tcg_gen_andi_tl(d, t, bitrev[0].mask);
+ for (i = 1; i < ARRAY_SIZE(bitrev); i++) {
+ if (bitrev[i].shift >= 0) {
+ tcg_gen_shli_tl(t, org_s, bitrev[i].shift);
+ } else {
+ tcg_gen_shri_tl(t, org_s, -bitrev[i].shift);
+ }
+ tcg_gen_andi_tl(t, t, bitrev[i].mask);
+ tcg_gen_or_tl(d, d, t);
+ }
+ tcg_temp_free(t);
+ tcg_temp_free(org_s);
+}
+
+static void t_gen_cc_jmp(TCGv pc_true, TCGv pc_false)
+{
+ TCGLabel *l1 = gen_new_label();
+
+ /* Conditional jmp. */
+ tcg_gen_mov_tl(env_pc, pc_false);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
+ tcg_gen_mov_tl(env_pc, pc_true);
+ gen_set_label(l1);
+}
+
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ (dc->ppc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (use_goto_tb(dc, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(env_pc, dest);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
+ } else {
+ tcg_gen_movi_tl(env_pc, dest);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void cris_clear_x_flag(DisasContext *dc)
+{
+ if (dc->flagx_known && dc->flags_x) {
+ dc->flags_uptodate = 0;
+ }
+
+ dc->flagx_known = 1;
+ dc->flags_x = 0;
+}
+
+static void cris_flush_cc_state(DisasContext *dc)
+{
+ if (dc->cc_size_uptodate != dc->cc_size) {
+ tcg_gen_movi_tl(cc_size, dc->cc_size);
+ dc->cc_size_uptodate = dc->cc_size;
+ }
+ tcg_gen_movi_tl(cc_op, dc->cc_op);
+ tcg_gen_movi_tl(cc_mask, dc->cc_mask);
+}
+
+static void cris_evaluate_flags(DisasContext *dc)
+{
+ if (dc->flags_uptodate) {
+ return;
+ }
+
+ cris_flush_cc_state(dc);
+
+ switch (dc->cc_op) {
+ case CC_OP_MCP:
+ gen_helper_evaluate_flags_mcp(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_src,
+ cc_dest, cc_result);
+ break;
+ case CC_OP_MULS:
+ gen_helper_evaluate_flags_muls(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_result,
+ cpu_PR[PR_MOF]);
+ break;
+ case CC_OP_MULU:
+ gen_helper_evaluate_flags_mulu(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_result,
+ cpu_PR[PR_MOF]);
+ break;
+ case CC_OP_MOVE:
+ case CC_OP_AND:
+ case CC_OP_OR:
+ case CC_OP_XOR:
+ case CC_OP_ASR:
+ case CC_OP_LSR:
+ case CC_OP_LSL:
+ switch (dc->cc_size) {
+ case 4:
+ gen_helper_evaluate_flags_move_4(cpu_PR[PR_CCS],
+ cpu_env, cpu_PR[PR_CCS], cc_result);
+ break;
+ case 2:
+ gen_helper_evaluate_flags_move_2(cpu_PR[PR_CCS],
+ cpu_env, cpu_PR[PR_CCS], cc_result);
+ break;
+ default:
+ gen_helper_evaluate_flags(cpu_env);
+ break;
+ }
+ break;
+ case CC_OP_FLAGS:
+ /* live. */
+ break;
+ case CC_OP_SUB:
+ case CC_OP_CMP:
+ if (dc->cc_size == 4) {
+ gen_helper_evaluate_flags_sub_4(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
+ } else {
+ gen_helper_evaluate_flags(cpu_env);
+ }
+
+ break;
+ default:
+ switch (dc->cc_size) {
+ case 4:
+ gen_helper_evaluate_flags_alu_4(cpu_PR[PR_CCS], cpu_env,
+ cpu_PR[PR_CCS], cc_src, cc_dest, cc_result);
+ break;
+ default:
+ gen_helper_evaluate_flags(cpu_env);
+ break;
+ }
+ break;
+ }
+
+ if (dc->flagx_known) {
+ if (dc->flags_x) {
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], X_FLAG);
+ } else if (dc->cc_op == CC_OP_FLAGS) {
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~X_FLAG);
+ }
+ }
+ dc->flags_uptodate = 1;
+}
+
+static void cris_cc_mask(DisasContext *dc, unsigned int mask)
+{
+ uint32_t ovl;
+
+ if (!mask) {
+ dc->update_cc = 0;
+ return;
+ }
+
+ /* Check if we need to evaluate the condition codes due to
+ CC overlaying. */
+ ovl = (dc->cc_mask ^ mask) & ~mask;
+ if (ovl) {
+ /* TODO: optimize this case. It trigs all the time. */
+ cris_evaluate_flags(dc);
+ }
+ dc->cc_mask = mask;
+ dc->update_cc = 1;
+}
+
+static void cris_update_cc_op(DisasContext *dc, int op, int size)
+{
+ dc->cc_op = op;
+ dc->cc_size = size;
+ dc->flags_uptodate = 0;
+}
+
+static inline void cris_update_cc_x(DisasContext *dc)
+{
+ /* Save the x flag state at the time of the cc snapshot. */
+ if (dc->flagx_known) {
+ if (dc->cc_x_uptodate == (2 | dc->flags_x)) {
+ return;
+ }
+ tcg_gen_movi_tl(cc_x, dc->flags_x);
+ dc->cc_x_uptodate = 2 | dc->flags_x;
+ } else {
+ tcg_gen_andi_tl(cc_x, cpu_PR[PR_CCS], X_FLAG);
+ dc->cc_x_uptodate = 1;
+ }
+}
+
+/* Update cc prior to executing ALU op. Needs source operands untouched. */
+static void cris_pre_alu_update_cc(DisasContext *dc, int op,
+ TCGv dst, TCGv src, int size)
+{
+ if (dc->update_cc) {
+ cris_update_cc_op(dc, op, size);
+ tcg_gen_mov_tl(cc_src, src);
+
+ if (op != CC_OP_MOVE
+ && op != CC_OP_AND
+ && op != CC_OP_OR
+ && op != CC_OP_XOR
+ && op != CC_OP_ASR
+ && op != CC_OP_LSR
+ && op != CC_OP_LSL) {
+ tcg_gen_mov_tl(cc_dest, dst);
+ }
+
+ cris_update_cc_x(dc);
+ }
+}
+
+/* Update cc after executing ALU op. needs the result. */
+static inline void cris_update_result(DisasContext *dc, TCGv res)
+{
+ if (dc->update_cc) {
+ tcg_gen_mov_tl(cc_result, res);
+ }
+}
+
+/* Returns one if the write back stage should execute. */
+static void cris_alu_op_exec(DisasContext *dc, int op,
+ TCGv dst, TCGv a, TCGv b, int size)
+{
+ /* Emit the ALU insns. */
+ switch (op) {
+ case CC_OP_ADD:
+ tcg_gen_add_tl(dst, a, b);
+ /* Extended arithmetics. */
+ t_gen_addx_carry(dc, dst);
+ break;
+ case CC_OP_ADDC:
+ tcg_gen_add_tl(dst, a, b);
+ t_gen_add_flag(dst, 0); /* C_FLAG. */
+ break;
+ case CC_OP_MCP:
+ tcg_gen_add_tl(dst, a, b);
+ t_gen_add_flag(dst, 8); /* R_FLAG. */
+ break;
+ case CC_OP_SUB:
+ tcg_gen_sub_tl(dst, a, b);
+ /* Extended arithmetics. */
+ t_gen_subx_carry(dc, dst);
+ break;
+ case CC_OP_MOVE:
+ tcg_gen_mov_tl(dst, b);
+ break;
+ case CC_OP_OR:
+ tcg_gen_or_tl(dst, a, b);
+ break;
+ case CC_OP_AND:
+ tcg_gen_and_tl(dst, a, b);
+ break;
+ case CC_OP_XOR:
+ tcg_gen_xor_tl(dst, a, b);
+ break;
+ case CC_OP_LSL:
+ t_gen_lsl(dst, a, b);
+ break;
+ case CC_OP_LSR:
+ t_gen_lsr(dst, a, b);
+ break;
+ case CC_OP_ASR:
+ t_gen_asr(dst, a, b);
+ break;
+ case CC_OP_NEG:
+ tcg_gen_neg_tl(dst, b);
+ /* Extended arithmetics. */
+ t_gen_subx_carry(dc, dst);
+ break;
+ case CC_OP_LZ:
+ gen_helper_lz(dst, b);
+ break;
+ case CC_OP_MULS:
+ tcg_gen_muls2_tl(dst, cpu_PR[PR_MOF], a, b);
+ break;
+ case CC_OP_MULU:
+ tcg_gen_mulu2_tl(dst, cpu_PR[PR_MOF], a, b);
+ break;
+ case CC_OP_DSTEP:
+ t_gen_cris_dstep(dst, a, b);
+ break;
+ case CC_OP_MSTEP:
+ t_gen_cris_mstep(dst, a, b, cpu_PR[PR_CCS]);
+ break;
+ case CC_OP_BOUND:
+ tcg_gen_movcond_tl(TCG_COND_LEU, dst, a, b, a, b);
+ break;
+ case CC_OP_CMP:
+ tcg_gen_sub_tl(dst, a, b);
+ /* Extended arithmetics. */
+ t_gen_subx_carry(dc, dst);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "illegal ALU op.\n");
+ BUG();
+ break;
+ }
+
+ if (size == 1) {
+ tcg_gen_andi_tl(dst, dst, 0xff);
+ } else if (size == 2) {
+ tcg_gen_andi_tl(dst, dst, 0xffff);
+ }
+}
+
+static void cris_alu(DisasContext *dc, int op,
+ TCGv d, TCGv op_a, TCGv op_b, int size)
+{
+ TCGv tmp;
+ int writeback;
+
+ writeback = 1;
+
+ if (op == CC_OP_CMP) {
+ tmp = tcg_temp_new();
+ writeback = 0;
+ } else if (size == 4) {
+ tmp = d;
+ writeback = 0;
+ } else {
+ tmp = tcg_temp_new();
+ }
+
+
+ cris_pre_alu_update_cc(dc, op, op_a, op_b, size);
+ cris_alu_op_exec(dc, op, tmp, op_a, op_b, size);
+ cris_update_result(dc, tmp);
+
+ /* Writeback. */
+ if (writeback) {
+ if (size == 1) {
+ tcg_gen_andi_tl(d, d, ~0xff);
+ } else {
+ tcg_gen_andi_tl(d, d, ~0xffff);
+ }
+ tcg_gen_or_tl(d, d, tmp);
+ }
+ if (!TCGV_EQUAL(tmp, d)) {
+ tcg_temp_free(tmp);
+ }
+}
+
+static int arith_cc(DisasContext *dc)
+{
+ if (dc->update_cc) {
+ switch (dc->cc_op) {
+ case CC_OP_ADDC: return 1;
+ case CC_OP_ADD: return 1;
+ case CC_OP_SUB: return 1;
+ case CC_OP_DSTEP: return 1;
+ case CC_OP_LSL: return 1;
+ case CC_OP_LSR: return 1;
+ case CC_OP_ASR: return 1;
+ case CC_OP_CMP: return 1;
+ case CC_OP_NEG: return 1;
+ case CC_OP_OR: return 1;
+ case CC_OP_AND: return 1;
+ case CC_OP_XOR: return 1;
+ case CC_OP_MULU: return 1;
+ case CC_OP_MULS: return 1;
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static void gen_tst_cc (DisasContext *dc, TCGv cc, int cond)
+{
+ int arith_opt, move_opt;
+
+ /* TODO: optimize more condition codes. */
+
+ /*
+ * If the flags are live, we've gotta look into the bits of CCS.
+ * Otherwise, if we just did an arithmetic operation we try to
+ * evaluate the condition code faster.
+ *
+ * When this function is done, T0 should be non-zero if the condition
+ * code is true.
+ */
+ arith_opt = arith_cc(dc) && !dc->flags_uptodate;
+ move_opt = (dc->cc_op == CC_OP_MOVE);
+ switch (cond) {
+ case CC_EQ:
+ if ((arith_opt || move_opt)
+ && dc->cc_x_uptodate != (2 | X_FLAG)) {
+ tcg_gen_setcond_tl(TCG_COND_EQ, cc,
+ cc_result, tcg_const_tl(0));
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc,
+ cpu_PR[PR_CCS], Z_FLAG);
+ }
+ break;
+ case CC_NE:
+ if ((arith_opt || move_opt)
+ && dc->cc_x_uptodate != (2 | X_FLAG)) {
+ tcg_gen_mov_tl(cc, cc_result);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
+ Z_FLAG);
+ tcg_gen_andi_tl(cc, cc, Z_FLAG);
+ }
+ break;
+ case CC_CS:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], C_FLAG);
+ break;
+ case CC_CC:
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS], C_FLAG);
+ tcg_gen_andi_tl(cc, cc, C_FLAG);
+ break;
+ case CC_VS:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], V_FLAG);
+ break;
+ case CC_VC:
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
+ V_FLAG);
+ tcg_gen_andi_tl(cc, cc, V_FLAG);
+ break;
+ case CC_PL:
+ if (arith_opt || move_opt) {
+ int bits = 31;
+
+ if (dc->cc_size == 1) {
+ bits = 7;
+ } else if (dc->cc_size == 2) {
+ bits = 15;
+ }
+
+ tcg_gen_shri_tl(cc, cc_result, bits);
+ tcg_gen_xori_tl(cc, cc, 1);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_xori_tl(cc, cpu_PR[PR_CCS],
+ N_FLAG);
+ tcg_gen_andi_tl(cc, cc, N_FLAG);
+ }
+ break;
+ case CC_MI:
+ if (arith_opt || move_opt) {
+ int bits = 31;
+
+ if (dc->cc_size == 1) {
+ bits = 7;
+ } else if (dc->cc_size == 2) {
+ bits = 15;
+ }
+
+ tcg_gen_shri_tl(cc, cc_result, bits);
+ tcg_gen_andi_tl(cc, cc, 1);
+ } else {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
+ N_FLAG);
+ }
+ break;
+ case CC_LS:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS],
+ C_FLAG | Z_FLAG);
+ break;
+ case CC_HI:
+ cris_evaluate_flags(dc);
+ {
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+ tcg_gen_xori_tl(tmp, cpu_PR[PR_CCS],
+ C_FLAG | Z_FLAG);
+ /* Overlay the C flag on top of the Z. */
+ tcg_gen_shli_tl(cc, tmp, 2);
+ tcg_gen_and_tl(cc, tmp, cc);
+ tcg_gen_andi_tl(cc, cc, Z_FLAG);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+ case CC_GE:
+ cris_evaluate_flags(dc);
+ /* Overlay the V flag on top of the N. */
+ tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
+ tcg_gen_xor_tl(cc,
+ cpu_PR[PR_CCS], cc);
+ tcg_gen_andi_tl(cc, cc, N_FLAG);
+ tcg_gen_xori_tl(cc, cc, N_FLAG);
+ break;
+ case CC_LT:
+ cris_evaluate_flags(dc);
+ /* Overlay the V flag on top of the N. */
+ tcg_gen_shli_tl(cc, cpu_PR[PR_CCS], 2);
+ tcg_gen_xor_tl(cc,
+ cpu_PR[PR_CCS], cc);
+ tcg_gen_andi_tl(cc, cc, N_FLAG);
+ break;
+ case CC_GT:
+ cris_evaluate_flags(dc);
+ {
+ TCGv n, z;
+
+ n = tcg_temp_new();
+ z = tcg_temp_new();
+
+ /* To avoid a shift we overlay everything on
+ the V flag. */
+ tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
+ tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
+ /* invert Z. */
+ tcg_gen_xori_tl(z, z, 2);
+
+ tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
+ tcg_gen_xori_tl(n, n, 2);
+ tcg_gen_and_tl(cc, z, n);
+ tcg_gen_andi_tl(cc, cc, 2);
+
+ tcg_temp_free(n);
+ tcg_temp_free(z);
+ }
+ break;
+ case CC_LE:
+ cris_evaluate_flags(dc);
+ {
+ TCGv n, z;
+
+ n = tcg_temp_new();
+ z = tcg_temp_new();
+
+ /* To avoid a shift we overlay everything on
+ the V flag. */
+ tcg_gen_shri_tl(n, cpu_PR[PR_CCS], 2);
+ tcg_gen_shri_tl(z, cpu_PR[PR_CCS], 1);
+
+ tcg_gen_xor_tl(n, n, cpu_PR[PR_CCS]);
+ tcg_gen_or_tl(cc, z, n);
+ tcg_gen_andi_tl(cc, cc, 2);
+
+ tcg_temp_free(n);
+ tcg_temp_free(z);
+ }
+ break;
+ case CC_P:
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cc, cpu_PR[PR_CCS], P_FLAG);
+ break;
+ case CC_A:
+ tcg_gen_movi_tl(cc, 1);
+ break;
+ default:
+ BUG();
+ break;
+ };
+}
+
+static void cris_store_direct_jmp(DisasContext *dc)
+{
+ /* Store the direct jmp state into the cpu-state. */
+ if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
+ if (dc->jmp == JMP_DIRECT) {
+ tcg_gen_movi_tl(env_btaken, 1);
+ }
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ dc->jmp = JMP_INDIRECT;
+ }
+}
+
+static void cris_prepare_cc_branch (DisasContext *dc,
+ int offset, int cond)
+{
+ /* This helps us re-schedule the micro-code to insns in delay-slots
+ before the actual jump. */
+ dc->delayed_branch = 2;
+ dc->jmp = JMP_DIRECT_CC;
+ dc->jmp_pc = dc->pc + offset;
+
+ gen_tst_cc(dc, env_btaken, cond);
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+}
+
+
+/* jumps, when the dest is in a live reg for example. Direct should be set
+ when the dest addr is constant to allow tb chaining. */
+static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
+{
+ /* This helps us re-schedule the micro-code to insns in delay-slots
+ before the actual jump. */
+ dc->delayed_branch = 2;
+ dc->jmp = type;
+ if (type == JMP_INDIRECT) {
+ tcg_gen_movi_tl(env_btaken, 1);
+ }
+}
+
+static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+ tcg_gen_qemu_ld_i64(dst, addr, mem_index, MO_TEQ);
+}
+
+static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
+ unsigned int size, int sign)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+ tcg_gen_qemu_ld_tl(dst, addr, mem_index,
+ MO_TE + ctz32(size) + (sign ? MO_SIGN : 0));
+}
+
+static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
+ unsigned int size)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+
+ /* Conditional writes. We only support the kind were X and P are known
+ at translation time. */
+ if (dc->flagx_known && dc->flags_x && (dc->tb_flags & P_FLAG)) {
+ dc->postinc = 0;
+ cris_evaluate_flags(dc);
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], C_FLAG);
+ return;
+ }
+
+ tcg_gen_qemu_st_tl(val, addr, mem_index, MO_TE + ctz32(size));
+
+ if (dc->flagx_known && dc->flags_x) {
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~C_FLAG);
+ }
+}
+
+static inline void t_gen_sext(TCGv d, TCGv s, int size)
+{
+ if (size == 1) {
+ tcg_gen_ext8s_i32(d, s);
+ } else if (size == 2) {
+ tcg_gen_ext16s_i32(d, s);
+ } else if (!TCGV_EQUAL(d, s)) {
+ tcg_gen_mov_tl(d, s);
+ }
+}
+
+static inline void t_gen_zext(TCGv d, TCGv s, int size)
+{
+ if (size == 1) {
+ tcg_gen_ext8u_i32(d, s);
+ } else if (size == 2) {
+ tcg_gen_ext16u_i32(d, s);
+ } else if (!TCGV_EQUAL(d, s)) {
+ tcg_gen_mov_tl(d, s);
+ }
+}
+
+#if DISAS_CRIS
+static char memsize_char(int size)
+{
+ switch (size) {
+ case 1: return 'b'; break;
+ case 2: return 'w'; break;
+ case 4: return 'd'; break;
+ default:
+ return 'x';
+ break;
+ }
+}
+#endif
+
+static inline unsigned int memsize_z(DisasContext *dc)
+{
+ return dc->zsize + 1;
+}
+
+static inline unsigned int memsize_zz(DisasContext *dc)
+{
+ switch (dc->zzsize) {
+ case 0: return 1;
+ case 1: return 2;
+ default:
+ return 4;
+ }
+}
+
+static inline void do_postinc (DisasContext *dc, int size)
+{
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], size);
+ }
+}
+
+static inline void dec_prep_move_r(DisasContext *dc, int rs, int rd,
+ int size, int s_ext, TCGv dst)
+{
+ if (s_ext) {
+ t_gen_sext(dst, cpu_R[rs], size);
+ } else {
+ t_gen_zext(dst, cpu_R[rs], size);
+ }
+}
+
+/* Prepare T0 and T1 for a register alu operation.
+ s_ext decides if the operand1 should be sign-extended or zero-extended when
+ needed. */
+static void dec_prep_alu_r(DisasContext *dc, int rs, int rd,
+ int size, int s_ext, TCGv dst, TCGv src)
+{
+ dec_prep_move_r(dc, rs, rd, size, s_ext, src);
+
+ if (s_ext) {
+ t_gen_sext(dst, cpu_R[rd], size);
+ } else {
+ t_gen_zext(dst, cpu_R[rd], size);
+ }
+}
+
+static int dec_prep_move_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst)
+{
+ unsigned int rs;
+ uint32_t imm;
+ int is_imm;
+ int insn_len = 2;
+
+ rs = dc->op1;
+ is_imm = rs == 15 && dc->postinc;
+
+ /* Load [$rs] onto T1. */
+ if (is_imm) {
+ insn_len = 2 + memsize;
+ if (memsize == 1) {
+ insn_len++;
+ }
+
+ imm = cris_fetch(env, dc, dc->pc + 2, memsize, s_ext);
+ tcg_gen_movi_tl(dst, imm);
+ dc->postinc = 0;
+ } else {
+ cris_flush_cc_state(dc);
+ gen_load(dc, dst, cpu_R[rs], memsize, 0);
+ if (s_ext) {
+ t_gen_sext(dst, dst, memsize);
+ } else {
+ t_gen_zext(dst, dst, memsize);
+ }
+ }
+ return insn_len;
+}
+
+/* Prepare T0 and T1 for a memory + alu operation.
+ s_ext decides if the operand1 should be sign-extended or zero-extended when
+ needed. */
+static int dec_prep_alu_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst, TCGv src)
+{
+ int insn_len;
+
+ insn_len = dec_prep_move_m(env, dc, s_ext, memsize, src);
+ tcg_gen_mov_tl(dst, cpu_R[dc->op2]);
+ return insn_len;
+}
+
+#if DISAS_CRIS
+static const char *cc_name(int cc)
+{
+ static const char *cc_names[16] = {
+ "cc", "cs", "ne", "eq", "vc", "vs", "pl", "mi",
+ "ls", "hi", "ge", "lt", "gt", "le", "a", "p"
+ };
+ assert(cc < 16);
+ return cc_names[cc];
+}
+#endif
+
+/* Start of insn decoders. */
+
+static int dec_bccq(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t offset;
+ int sign;
+ uint32_t cond = dc->op2;
+
+ offset = EXTRACT_FIELD(dc->ir, 1, 7);
+ sign = EXTRACT_FIELD(dc->ir, 0, 0);
+
+ offset *= 2;
+ offset |= sign << 8;
+ offset = sign_extend(offset, 8);
+
+ LOG_DIS("b%s %x\n", cc_name(cond), dc->pc + offset);
+
+ /* op2 holds the condition-code. */
+ cris_cc_mask(dc, 0);
+ cris_prepare_cc_branch(dc, offset, cond);
+ return 2;
+}
+static int dec_addoq(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t imm;
+
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 7);
+ imm = sign_extend(dc->op1, 7);
+
+ LOG_DIS("addoq %d, $r%u\n", imm, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Fetch register operand, */
+ tcg_gen_addi_tl(cpu_R[R_ACR], cpu_R[dc->op2], imm);
+
+ return 2;
+}
+static int dec_addq(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("addq %u, $r%u\n", dc->op1, dc->op2);
+
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
+ return 2;
+}
+static int dec_moveq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+ LOG_DIS("moveq %d, $r%u\n", imm, dc->op2);
+
+ tcg_gen_movi_tl(cpu_R[dc->op2], imm);
+ return 2;
+}
+static int dec_subq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+
+ LOG_DIS("subq %u, $r%u\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB,
+ cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
+ return 2;
+}
+static int dec_cmpq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+
+ LOG_DIS("cmpq %d, $r%d\n", imm, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
+ return 2;
+}
+static int dec_andq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+
+ LOG_DIS("andq %d, $r%d\n", imm, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ cris_alu(dc, CC_OP_AND,
+ cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
+ return 2;
+}
+static int dec_orq(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 5);
+ imm = sign_extend(dc->op1, 5);
+ LOG_DIS("orq %d, $r%d\n", imm, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ cris_alu(dc, CC_OP_OR,
+ cpu_R[dc->op2], cpu_R[dc->op2], tcg_const_tl(imm), 4);
+ return 2;
+}
+static int dec_btstq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("btstq %u, $r%d\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_evaluate_flags(dc);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
+ tcg_const_tl(dc->op1), cpu_PR[PR_CCS]);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->flags_uptodate = 1;
+ return 2;
+}
+static int dec_asrq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("asrq %u, $r%d\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_sari_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+static int dec_lslq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("lslq %u, $r%d\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_shli_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+static int dec_lsrq(CPUCRISState *env, DisasContext *dc)
+{
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
+ LOG_DIS("lsrq %u, $r%d\n", dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ tcg_gen_shri_tl(cpu_R[dc->op2], cpu_R[dc->op2], dc->op1);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+
+static int dec_move_r(CPUCRISState *env, DisasContext *dc)
+{
+ int size = memsize_zz(dc);
+
+ LOG_DIS("move.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ if (size == 4) {
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, cpu_R[dc->op2]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_update_cc_op(dc, CC_OP_MOVE, 4);
+ cris_update_cc_x(dc);
+ cris_update_result(dc, cpu_R[dc->op2]);
+ } else {
+ TCGv t0;
+
+ t0 = tcg_temp_new();
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2],
+ cpu_R[dc->op2], t0, size);
+ tcg_temp_free(t0);
+ }
+ return 2;
+}
+
+static int dec_scc_r(CPUCRISState *env, DisasContext *dc)
+{
+ int cond = dc->op2;
+
+ LOG_DIS("s%s $r%u\n",
+ cc_name(cond), dc->op1);
+
+ gen_tst_cc(dc, cpu_R[dc->op1], cond);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->op1], cpu_R[dc->op1], 0);
+
+ cris_cc_mask(dc, 0);
+ return 2;
+}
+
+static inline void cris_alu_alloc_temps(DisasContext *dc, int size, TCGv *t)
+{
+ if (size == 4) {
+ t[0] = cpu_R[dc->op2];
+ t[1] = cpu_R[dc->op1];
+ } else {
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+ }
+}
+
+static inline void cris_alu_free_temps(DisasContext *dc, int size, TCGv *t)
+{
+ if (size != 4) {
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+ }
+}
+
+static int dec_and_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("and.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_lz_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("lz $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ dec_prep_alu_r(dc, dc->op1, dc->op2, 4, 0, cpu_R[dc->op2], t0);
+ cris_alu(dc, CC_OP_LZ, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_lsl_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("lsl.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ cris_alu(dc, CC_OP_LSL, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_alloc_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_lsr_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("lsr.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ cris_alu(dc, CC_OP_LSR, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_asr_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("asr.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ cris_alu(dc, CC_OP_ASR, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_muls_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("muls.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZV);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 1, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_MULS, cpu_R[dc->op2], t[0], t[1], 4);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_mulu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+
+ LOG_DIS("mulu.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZV);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_MULU, cpu_R[dc->op2], t[0], t[1], 4);
+ cris_alu_alloc_temps(dc, size, t);
+ return 2;
+}
+
+
+static int dec_dstep_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("dstep $r%u, $r%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_DSTEP,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
+ return 2;
+}
+
+static int dec_xor_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("xor.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ BUG_ON(size != 4); /* xor is dword. */
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_XOR, cpu_R[dc->op2], t[0], t[1], 4);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_bound_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv l0;
+ int size = memsize_zz(dc);
+ LOG_DIS("bound.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ l0 = tcg_temp_local_new();
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, l0);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], cpu_R[dc->op2], l0, 4);
+ tcg_temp_free(l0);
+ return 2;
+}
+
+static int dec_cmp_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("cmp.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_abs_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("abs $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, cpu_R[dc->op1], 31);
+ tcg_gen_xor_tl(cpu_R[dc->op2], cpu_R[dc->op1], t0);
+ tcg_gen_sub_tl(cpu_R[dc->op2], cpu_R[dc->op2], t0);
+ tcg_temp_free(t0);
+
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ return 2;
+}
+
+static int dec_add_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("add.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_addc_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("addc $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_evaluate_flags(dc);
+ /* Set for this insn. */
+ dc->flagx_known = 1;
+ dc->flags_x = X_FLAG;
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADDC,
+ cpu_R[dc->op2], cpu_R[dc->op2], cpu_R[dc->op1], 4);
+ return 2;
+}
+
+static int dec_mcp_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("mcp $p%u, $r%u\n",
+ dc->op2, dc->op1);
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, CC_MASK_RNZV);
+ cris_alu(dc, CC_OP_MCP,
+ cpu_R[dc->op1], cpu_R[dc->op1], cpu_PR[dc->op2], 4);
+ return 2;
+}
+
+#if DISAS_CRIS
+static char * swapmode_name(int mode, char *modename) {
+ int i = 0;
+ if (mode & 8) {
+ modename[i++] = 'n';
+ }
+ if (mode & 4) {
+ modename[i++] = 'w';
+ }
+ if (mode & 2) {
+ modename[i++] = 'b';
+ }
+ if (mode & 1) {
+ modename[i++] = 'r';
+ }
+ modename[i++] = 0;
+ return modename;
+}
+#endif
+
+static int dec_swap_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+#if DISAS_CRIS
+ char modename[4];
+#endif
+ LOG_DIS("swap%s $r%u\n",
+ swapmode_name(dc->op2, modename), dc->op1);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(t0, cpu_R[dc->op1]);
+ if (dc->op2 & 8) {
+ tcg_gen_not_tl(t0, t0);
+ }
+ if (dc->op2 & 4) {
+ t_gen_swapw(t0, t0);
+ }
+ if (dc->op2 & 2) {
+ t_gen_swapb(t0, t0);
+ }
+ if (dc->op2 & 1) {
+ t_gen_swapr(t0, t0);
+ }
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op1], cpu_R[dc->op1], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_or_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("or.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ cris_alu(dc, CC_OP_OR, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_addi_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("addi.%c $r%u, $r%u\n",
+ memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+ t0 = tcg_temp_new();
+ tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
+ tcg_gen_add_tl(cpu_R[dc->op1], cpu_R[dc->op1], t0);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_addi_acr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("addi.%c $r%u, $r%u, $acr\n",
+ memsize_char(memsize_zz(dc)), dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+ t0 = tcg_temp_new();
+ tcg_gen_shl_tl(t0, cpu_R[dc->op2], tcg_const_tl(dc->zzsize));
+ tcg_gen_add_tl(cpu_R[R_ACR], cpu_R[dc->op1], t0);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_neg_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("neg.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+
+ cris_alu(dc, CC_OP_NEG, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+static int dec_btst_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("btst $r%u, $r%u\n",
+ dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_evaluate_flags(dc);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->op2],
+ cpu_R[dc->op1], cpu_PR[PR_CCS]);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2],
+ cpu_R[dc->op2], cpu_R[dc->op2], 4);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->flags_uptodate = 1;
+ return 2;
+}
+
+static int dec_sub_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int size = memsize_zz(dc);
+ LOG_DIS("sub.%c $r%u, $r%u\n",
+ memsize_char(size), dc->op1, dc->op2);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_alloc_temps(dc, size, t);
+ dec_prep_alu_r(dc, dc->op1, dc->op2, size, 0, t[0], t[1]);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], size);
+ cris_alu_free_temps(dc, size, t);
+ return 2;
+}
+
+/* Zero extension. From size to dword. */
+static int dec_movu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("movu.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ dec_prep_move_r(dc, dc->op1, dc->op2, size, 0, t0);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Sign extension. From size to dword. */
+static int dec_movs_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("movs.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZ);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_sext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op1], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* zero extension. From size to dword. */
+static int dec_addu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("addu.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_zext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Sign extension. From size to dword. */
+static int dec_adds_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("adds.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_sext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Zero extension. From size to dword. */
+static int dec_subu_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("subu.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_zext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_SUB,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+/* Sign extension. From size to dword. */
+static int dec_subs_r(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int size = memsize_z(dc);
+ LOG_DIS("subs.%c $r%u, $r%u\n",
+ memsize_char(size),
+ dc->op1, dc->op2);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ /* Size can only be qi or hi. */
+ t_gen_sext(t0, cpu_R[dc->op1], size);
+ cris_alu(dc, CC_OP_SUB,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, 4);
+ tcg_temp_free(t0);
+ return 2;
+}
+
+static int dec_setclrf(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t flags;
+ int set = (~dc->opcode >> 2) & 1;
+
+
+ flags = (EXTRACT_FIELD(dc->ir, 12, 15) << 4)
+ | EXTRACT_FIELD(dc->ir, 0, 3);
+ if (set && flags == 0) {
+ LOG_DIS("nop\n");
+ return 2;
+ } else if (!set && (flags & 0x20)) {
+ LOG_DIS("di\n");
+ } else {
+ LOG_DIS("%sf %x\n", set ? "set" : "clr", flags);
+ }
+
+ /* User space is not allowed to touch these. Silently ignore. */
+ if (dc->tb_flags & U_FLAG) {
+ flags &= ~(S_FLAG | I_FLAG | U_FLAG);
+ }
+
+ if (flags & X_FLAG) {
+ dc->flagx_known = 1;
+ if (set) {
+ dc->flags_x = X_FLAG;
+ } else {
+ dc->flags_x = 0;
+ }
+ }
+
+ /* Break the TB if any of the SPI flag changes. */
+ if (flags & (P_FLAG | S_FLAG)) {
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ dc->is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = 1;
+ }
+
+ /* For the I flag, only act on posedge. */
+ if ((flags & I_FLAG)) {
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ dc->is_jmp = DISAS_UPDATE;
+ dc->cpustate_changed = 1;
+ }
+
+
+ /* Simply decode the flags. */
+ cris_evaluate_flags(dc);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ cris_update_cc_x(dc);
+ tcg_gen_movi_tl(cc_op, dc->cc_op);
+
+ if (set) {
+ if (!(dc->tb_flags & U_FLAG) && (flags & U_FLAG)) {
+ /* Enter user mode. */
+ t_gen_mov_env_TN(ksp, cpu_R[R_SP]);
+ tcg_gen_mov_tl(cpu_R[R_SP], cpu_PR[PR_USP]);
+ dc->cpustate_changed = 1;
+ }
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
+ } else {
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~flags);
+ }
+
+ dc->flags_uptodate = 1;
+ dc->clear_x = 0;
+ return 2;
+}
+
+static int dec_move_rs(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("move $r%u, $s%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+ gen_helper_movl_sreg_reg(cpu_env, tcg_const_tl(dc->op2),
+ tcg_const_tl(dc->op1));
+ return 2;
+}
+static int dec_move_sr(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("move $s%u, $r%u\n", dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+ gen_helper_movl_reg_sreg(cpu_env, tcg_const_tl(dc->op1),
+ tcg_const_tl(dc->op2));
+ return 2;
+}
+
+static int dec_move_rp(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ LOG_DIS("move $r%u, $p%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+
+ t[0] = tcg_temp_new();
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ tcg_gen_mov_tl(t[0], cpu_R[dc->op1]);
+ if (dc->tb_flags & U_FLAG) {
+ t[1] = tcg_temp_new();
+ /* User space is not allowed to touch all flags. */
+ tcg_gen_andi_tl(t[0], t[0], 0x39f);
+ tcg_gen_andi_tl(t[1], cpu_PR[PR_CCS], ~0x39f);
+ tcg_gen_or_tl(t[0], t[1], t[0]);
+ tcg_temp_free(t[1]);
+ }
+ } else {
+ tcg_gen_mov_tl(t[0], cpu_R[dc->op1]);
+ }
+
+ t_gen_mov_preg_TN(dc, dc->op2, t[0]);
+ if (dc->op2 == PR_CCS) {
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->flags_uptodate = 1;
+ }
+ tcg_temp_free(t[0]);
+ return 2;
+}
+static int dec_move_pr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ LOG_DIS("move $p%u, $r%u\n", dc->op2, dc->op1);
+ cris_cc_mask(dc, 0);
+
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+
+ if (dc->op2 == PR_DZ) {
+ tcg_gen_movi_tl(cpu_R[dc->op1], 0);
+ } else {
+ t0 = tcg_temp_new();
+ t_gen_mov_TN_preg(t0, dc->op2);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op1], cpu_R[dc->op1], t0,
+ preg_sizes[dc->op2]);
+ tcg_temp_free(t0);
+ }
+ return 2;
+}
+
+static int dec_move_mr(CPUCRISState *env, DisasContext *dc)
+{
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("move.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ if (memsize == 4) {
+ insn_len = dec_prep_move_m(env, dc, 0, 4, cpu_R[dc->op2]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_update_cc_op(dc, CC_OP_MOVE, 4);
+ cris_update_cc_x(dc);
+ cris_update_result(dc, cpu_R[dc->op2]);
+ } else {
+ TCGv t0;
+
+ t0 = tcg_temp_new();
+ insn_len = dec_prep_move_m(env, dc, 0, memsize, t0);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], t0, memsize);
+ tcg_temp_free(t0);
+ }
+ do_postinc(dc, memsize);
+ return insn_len;
+}
+
+static inline void cris_alu_m_alloc_temps(TCGv *t)
+{
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+}
+
+static inline void cris_alu_m_free_temps(TCGv *t)
+{
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+}
+
+static int dec_movs_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("movs.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_MOVE,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_addu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("addu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_adds_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("adds.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_subu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("subu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_subs_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("subs.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ /* sign extend. */
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_movu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+
+ LOG_DIS("movu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_cmpu_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("cmpu.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->op2], cpu_R[dc->op2], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_cmps_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_z(dc);
+ int insn_len;
+ LOG_DIS("cmps.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1],
+ memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_cmp_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("cmp.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], cpu_R[dc->op2], t[1],
+ memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_test_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("test.%c [$r%u%s] op2=%x\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_evaluate_flags(dc);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
+
+ cris_alu(dc, CC_OP_CMP,
+ cpu_R[dc->op2], t[1], tcg_const_tl(0), memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_and_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("and.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_AND, cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_add_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("add.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD,
+ cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_addo_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("add.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 1, memsize, t[0], t[1]);
+ cris_cc_mask(dc, 0);
+ cris_alu(dc, CC_OP_ADD, cpu_R[R_ACR], t[0], t[1], 4);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_bound_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv l[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("bound.%c [$r%u%s, $r%u\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ l[0] = tcg_temp_local_new();
+ l[1] = tcg_temp_local_new();
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, l[0], l[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->op2], l[0], l[1], 4);
+ do_postinc(dc, memsize);
+ tcg_temp_free(l[0]);
+ tcg_temp_free(l[1]);
+ return insn_len;
+}
+
+static int dec_addc_mr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int insn_len = 2;
+ LOG_DIS("addc [$r%u%s, $r%u\n",
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_evaluate_flags(dc);
+
+ /* Set for this insn. */
+ dc->flagx_known = 1;
+ dc->flags_x = X_FLAG;
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, 4, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADDC, cpu_R[dc->op2], t[0], t[1], 4);
+ do_postinc(dc, 4);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_sub_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("sub.%c [$r%u%s, $r%u ir=%x zz=%x\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2, dc->ir, dc->zzsize);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->op2], t[0], t[1], memsize);
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_or_m(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len;
+ LOG_DIS("or.%c [$r%u%s, $r%u pc=%x\n",
+ memsize_char(memsize),
+ dc->op1, dc->postinc ? "+]" : "]",
+ dc->op2, dc->pc);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, CC_MASK_NZ);
+ cris_alu(dc, CC_OP_OR,
+ cpu_R[dc->op2], t[0], t[1], memsize_zz(dc));
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_move_mp(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t[2];
+ int memsize = memsize_zz(dc);
+ int insn_len = 2;
+
+ LOG_DIS("move.%c [$r%u%s, $p%u\n",
+ memsize_char(memsize),
+ dc->op1,
+ dc->postinc ? "+]" : "]",
+ dc->op2);
+
+ cris_alu_m_alloc_temps(t);
+ insn_len = dec_prep_alu_m(env, dc, 0, memsize, t[0], t[1]);
+ cris_cc_mask(dc, 0);
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ if (dc->tb_flags & U_FLAG) {
+ /* User space is not allowed to touch all flags. */
+ tcg_gen_andi_tl(t[1], t[1], 0x39f);
+ tcg_gen_andi_tl(t[0], cpu_PR[PR_CCS], ~0x39f);
+ tcg_gen_or_tl(t[1], t[0], t[1]);
+ }
+ }
+
+ t_gen_mov_preg_TN(dc, dc->op2, t[1]);
+
+ do_postinc(dc, memsize);
+ cris_alu_m_free_temps(t);
+ return insn_len;
+}
+
+static int dec_move_pm(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv t0;
+ int memsize;
+
+ memsize = preg_sizes[dc->op2];
+
+ LOG_DIS("move.%c $p%u, [$r%u%s\n",
+ memsize_char(memsize),
+ dc->op2, dc->op1, dc->postinc ? "+]" : "]");
+
+ /* prepare store. Address in T0, value in T1. */
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+ t0 = tcg_temp_new();
+ t_gen_mov_TN_preg(t0, dc->op2);
+ cris_flush_cc_state(dc);
+ gen_store(dc, cpu_R[dc->op1], t0, memsize);
+ tcg_temp_free(t0);
+
+ cris_cc_mask(dc, 0);
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
+ }
+ return 2;
+}
+
+static int dec_movem_mr(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv_i64 tmp[16];
+ TCGv tmp32;
+ TCGv addr;
+ int i;
+ int nr = dc->op2 + 1;
+
+ LOG_DIS("movem [$r%u%s, $r%u\n", dc->op1,
+ dc->postinc ? "+]" : "]", dc->op2);
+
+ addr = tcg_temp_new();
+ /* There are probably better ways of doing this. */
+ cris_flush_cc_state(dc);
+ for (i = 0; i < (nr >> 1); i++) {
+ tmp[i] = tcg_temp_new_i64();
+ tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
+ gen_load64(dc, tmp[i], addr);
+ }
+ if (nr & 1) {
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_addi_tl(addr, cpu_R[dc->op1], i * 8);
+ gen_load(dc, tmp32, addr, 4, 0);
+ } else {
+ TCGV_UNUSED(tmp32);
+ }
+ tcg_temp_free(addr);
+
+ for (i = 0; i < (nr >> 1); i++) {
+ tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]);
+ tcg_gen_shri_i64(tmp[i], tmp[i], 32);
+ tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]);
+ tcg_temp_free_i64(tmp[i]);
+ }
+ if (nr & 1) {
+ tcg_gen_mov_tl(cpu_R[dc->op2], tmp32);
+ tcg_temp_free(tmp32);
+ }
+
+ /* writeback the updated pointer value. */
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], nr * 4);
+ }
+
+ /* gen_load might want to evaluate the previous insns flags. */
+ cris_cc_mask(dc, 0);
+ return 2;
+}
+
+static int dec_movem_rm(CPUCRISState *env, DisasContext *dc)
+{
+ TCGv tmp;
+ TCGv addr;
+ int i;
+
+ LOG_DIS("movem $r%u, [$r%u%s\n", dc->op2, dc->op1,
+ dc->postinc ? "+]" : "]");
+
+ cris_flush_cc_state(dc);
+
+ tmp = tcg_temp_new();
+ addr = tcg_temp_new();
+ tcg_gen_movi_tl(tmp, 4);
+ tcg_gen_mov_tl(addr, cpu_R[dc->op1]);
+ for (i = 0; i <= dc->op2; i++) {
+ /* Displace addr. */
+ /* Perform the store. */
+ gen_store(dc, addr, cpu_R[i], 4);
+ tcg_gen_add_tl(addr, addr, tmp);
+ }
+ if (dc->postinc) {
+ tcg_gen_mov_tl(cpu_R[dc->op1], addr);
+ }
+ cris_cc_mask(dc, 0);
+ tcg_temp_free(tmp);
+ tcg_temp_free(addr);
+ return 2;
+}
+
+static int dec_move_rm(CPUCRISState *env, DisasContext *dc)
+{
+ int memsize;
+
+ memsize = memsize_zz(dc);
+
+ LOG_DIS("move.%c $r%u, [$r%u]\n",
+ memsize_char(memsize), dc->op2, dc->op1);
+
+ /* prepare store. */
+ cris_flush_cc_state(dc);
+ gen_store(dc, cpu_R[dc->op1], cpu_R[dc->op2], memsize);
+
+ if (dc->postinc) {
+ tcg_gen_addi_tl(cpu_R[dc->op1], cpu_R[dc->op1], memsize);
+ }
+ cris_cc_mask(dc, 0);
+ return 2;
+}
+
+static int dec_lapcq(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("lapcq %x, $r%u\n",
+ dc->pc + dc->op1*2, dc->op2);
+ cris_cc_mask(dc, 0);
+ tcg_gen_movi_tl(cpu_R[dc->op2], dc->pc + dc->op1 * 2);
+ return 2;
+}
+
+static int dec_lapc_im(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int rd;
+ int32_t imm;
+ int32_t pc;
+
+ rd = dc->op2;
+
+ cris_cc_mask(dc, 0);
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+ LOG_DIS("lapc 0x%x, $r%u\n", imm + dc->pc, dc->op2);
+
+ pc = dc->pc;
+ pc += imm;
+ tcg_gen_movi_tl(cpu_R[rd], pc);
+ return 6;
+}
+
+/* Jump to special reg. */
+static int dec_jump_p(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("jump $p%u\n", dc->op2);
+
+ if (dc->op2 == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+ t_gen_mov_TN_preg(env_btarget, dc->op2);
+ /* rete will often have low bit set to indicate delayslot. */
+ tcg_gen_andi_tl(env_btarget, env_btarget, ~1);
+ cris_cc_mask(dc, 0);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return 2;
+}
+
+/* Jump and save. */
+static int dec_jas_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("jas $r%u, $p%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
+ if (dc->op2 > 15) {
+ abort();
+ }
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4));
+
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return 2;
+}
+
+static int dec_jas_im(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("jas 0x%x\n", imm);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
+
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_jasc_im(CPUCRISState *env, DisasContext *dc)
+{
+ uint32_t imm;
+
+ imm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("jasc 0x%x\n", imm);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8 + 4));
+
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_jasc_r(CPUCRISState *env, DisasContext *dc)
+{
+ LOG_DIS("jasc_r $r%u, $p%u\n", dc->op1, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->op1]);
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 4 + 4));
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return 2;
+}
+
+static int dec_bcc_im(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t offset;
+ uint32_t cond = dc->op2;
+
+ offset = cris_fetch(env, dc, dc->pc + 2, 2, 1);
+
+ LOG_DIS("b%s %d pc=%x dst=%x\n",
+ cc_name(cond), offset,
+ dc->pc, dc->pc + offset);
+
+ cris_cc_mask(dc, 0);
+ /* op2 holds the condition-code. */
+ cris_prepare_cc_branch(dc, offset, cond);
+ return 4;
+}
+
+static int dec_bas_im(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t simm;
+
+ simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("bas 0x%x, $p%u\n", dc->pc + simm, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 8));
+
+ dc->jmp_pc = dc->pc + simm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_basc_im(CPUCRISState *env, DisasContext *dc)
+{
+ int32_t simm;
+ simm = cris_fetch(env, dc, dc->pc + 2, 4, 0);
+
+ LOG_DIS("basc 0x%x, $p%u\n", dc->pc + simm, dc->op2);
+ cris_cc_mask(dc, 0);
+ /* Store the return address in Pd. */
+ t_gen_mov_preg_TN(dc, dc->op2, tcg_const_tl(dc->pc + 12));
+
+ dc->jmp_pc = dc->pc + simm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ return 6;
+}
+
+static int dec_rfe_etc(CPUCRISState *env, DisasContext *dc)
+{
+ cris_cc_mask(dc, 0);
+
+ if (dc->op2 == 15) {
+ tcg_gen_st_i32(tcg_const_i32(1), cpu_env,
+ -offsetof(CRISCPU, env) + offsetof(CPUState, halted));
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ t_gen_raise_exception(EXCP_HLT);
+ return 2;
+ }
+
+ switch (dc->op2 & 7) {
+ case 2:
+ /* rfe. */
+ LOG_DIS("rfe\n");
+ cris_evaluate_flags(dc);
+ gen_helper_rfe(cpu_env);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ case 5:
+ /* rfn. */
+ LOG_DIS("rfn\n");
+ cris_evaluate_flags(dc);
+ gen_helper_rfn(cpu_env);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ case 6:
+ LOG_DIS("break %d\n", dc->op1);
+ cris_evaluate_flags(dc);
+ /* break. */
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+
+ /* Breaks start at 16 in the exception vector. */
+ t_gen_mov_env_TN(trap_vector,
+ tcg_const_tl(dc->op1 + 16));
+ t_gen_raise_exception(EXCP_BREAK);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ default:
+ printf("op2=%x\n", dc->op2);
+ BUG();
+ break;
+
+ }
+ return 2;
+}
+
+static int dec_ftag_fidx_d_m(CPUCRISState *env, DisasContext *dc)
+{
+ return 2;
+}
+
+static int dec_ftag_fidx_i_m(CPUCRISState *env, DisasContext *dc)
+{
+ return 2;
+}
+
+static int dec_null(CPUCRISState *env, DisasContext *dc)
+{
+ printf("unknown insn pc=%x opc=%x op1=%x op2=%x\n",
+ dc->pc, dc->opcode, dc->op1, dc->op2);
+ fflush(NULL);
+ BUG();
+ return 2;
+}
+
+static struct decoder_info {
+ struct {
+ uint32_t bits;
+ uint32_t mask;
+ };
+ int (*dec)(CPUCRISState *env, DisasContext *dc);
+} decinfo[] = {
+ /* Order matters here. */
+ {DEC_MOVEQ, dec_moveq},
+ {DEC_BTSTQ, dec_btstq},
+ {DEC_CMPQ, dec_cmpq},
+ {DEC_ADDOQ, dec_addoq},
+ {DEC_ADDQ, dec_addq},
+ {DEC_SUBQ, dec_subq},
+ {DEC_ANDQ, dec_andq},
+ {DEC_ORQ, dec_orq},
+ {DEC_ASRQ, dec_asrq},
+ {DEC_LSLQ, dec_lslq},
+ {DEC_LSRQ, dec_lsrq},
+ {DEC_BCCQ, dec_bccq},
+
+ {DEC_BCC_IM, dec_bcc_im},
+ {DEC_JAS_IM, dec_jas_im},
+ {DEC_JAS_R, dec_jas_r},
+ {DEC_JASC_IM, dec_jasc_im},
+ {DEC_JASC_R, dec_jasc_r},
+ {DEC_BAS_IM, dec_bas_im},
+ {DEC_BASC_IM, dec_basc_im},
+ {DEC_JUMP_P, dec_jump_p},
+ {DEC_LAPC_IM, dec_lapc_im},
+ {DEC_LAPCQ, dec_lapcq},
+
+ {DEC_RFE_ETC, dec_rfe_etc},
+ {DEC_ADDC_MR, dec_addc_mr},
+
+ {DEC_MOVE_MP, dec_move_mp},
+ {DEC_MOVE_PM, dec_move_pm},
+ {DEC_MOVEM_MR, dec_movem_mr},
+ {DEC_MOVEM_RM, dec_movem_rm},
+ {DEC_MOVE_PR, dec_move_pr},
+ {DEC_SCC_R, dec_scc_r},
+ {DEC_SETF, dec_setclrf},
+ {DEC_CLEARF, dec_setclrf},
+
+ {DEC_MOVE_SR, dec_move_sr},
+ {DEC_MOVE_RP, dec_move_rp},
+ {DEC_SWAP_R, dec_swap_r},
+ {DEC_ABS_R, dec_abs_r},
+ {DEC_LZ_R, dec_lz_r},
+ {DEC_MOVE_RS, dec_move_rs},
+ {DEC_BTST_R, dec_btst_r},
+ {DEC_ADDC_R, dec_addc_r},
+
+ {DEC_DSTEP_R, dec_dstep_r},
+ {DEC_XOR_R, dec_xor_r},
+ {DEC_MCP_R, dec_mcp_r},
+ {DEC_CMP_R, dec_cmp_r},
+
+ {DEC_ADDI_R, dec_addi_r},
+ {DEC_ADDI_ACR, dec_addi_acr},
+
+ {DEC_ADD_R, dec_add_r},
+ {DEC_SUB_R, dec_sub_r},
+
+ {DEC_ADDU_R, dec_addu_r},
+ {DEC_ADDS_R, dec_adds_r},
+ {DEC_SUBU_R, dec_subu_r},
+ {DEC_SUBS_R, dec_subs_r},
+ {DEC_LSL_R, dec_lsl_r},
+
+ {DEC_AND_R, dec_and_r},
+ {DEC_OR_R, dec_or_r},
+ {DEC_BOUND_R, dec_bound_r},
+ {DEC_ASR_R, dec_asr_r},
+ {DEC_LSR_R, dec_lsr_r},
+
+ {DEC_MOVU_R, dec_movu_r},
+ {DEC_MOVS_R, dec_movs_r},
+ {DEC_NEG_R, dec_neg_r},
+ {DEC_MOVE_R, dec_move_r},
+
+ {DEC_FTAG_FIDX_I_M, dec_ftag_fidx_i_m},
+ {DEC_FTAG_FIDX_D_M, dec_ftag_fidx_d_m},
+
+ {DEC_MULS_R, dec_muls_r},
+ {DEC_MULU_R, dec_mulu_r},
+
+ {DEC_ADDU_M, dec_addu_m},
+ {DEC_ADDS_M, dec_adds_m},
+ {DEC_SUBU_M, dec_subu_m},
+ {DEC_SUBS_M, dec_subs_m},
+
+ {DEC_CMPU_M, dec_cmpu_m},
+ {DEC_CMPS_M, dec_cmps_m},
+ {DEC_MOVU_M, dec_movu_m},
+ {DEC_MOVS_M, dec_movs_m},
+
+ {DEC_CMP_M, dec_cmp_m},
+ {DEC_ADDO_M, dec_addo_m},
+ {DEC_BOUND_M, dec_bound_m},
+ {DEC_ADD_M, dec_add_m},
+ {DEC_SUB_M, dec_sub_m},
+ {DEC_AND_M, dec_and_m},
+ {DEC_OR_M, dec_or_m},
+ {DEC_MOVE_RM, dec_move_rm},
+ {DEC_TEST_M, dec_test_m},
+ {DEC_MOVE_MR, dec_move_mr},
+
+ {{0, 0}, dec_null}
+};
+
+static unsigned int crisv32_decoder(CPUCRISState *env, DisasContext *dc)
+{
+ int insn_len = 2;
+ int i;
+
+ /* Load a halfword onto the instruction register. */
+ dc->ir = cris_fetch(env, dc, dc->pc, 2, 0);
+
+ /* Now decode it. */
+ dc->opcode = EXTRACT_FIELD(dc->ir, 4, 11);
+ dc->op1 = EXTRACT_FIELD(dc->ir, 0, 3);
+ dc->op2 = EXTRACT_FIELD(dc->ir, 12, 15);
+ dc->zsize = EXTRACT_FIELD(dc->ir, 4, 4);
+ dc->zzsize = EXTRACT_FIELD(dc->ir, 4, 5);
+ dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
+
+ /* Large switch for all insns. */
+ for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
+ if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
+ insn_len = decinfo[i].dec(env, dc);
+ break;
+ }
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ /* Single-stepping ? */
+ if (dc->tb_flags & S_FLAG) {
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_PR[PR_SPC], dc->pc, l1);
+ /* We treat SPC as a break with an odd trap vector. */
+ cris_evaluate_flags(dc);
+ t_gen_mov_env_TN(trap_vector, tcg_const_tl(3));
+ tcg_gen_movi_tl(env_pc, dc->pc + insn_len);
+ tcg_gen_movi_tl(cpu_PR[PR_SPC], dc->pc + insn_len);
+ t_gen_raise_exception(EXCP_BREAK);
+ gen_set_label(l1);
+ }
+#endif
+ return insn_len;
+}
+
+#include "translate_v10.c"
+
+/*
+ * Delay slots on QEMU/CRIS.
+ *
+ * If an exception hits on a delayslot, the core will let ERP (the Exception
+ * Return Pointer) point to the branch (the previous) insn and set the lsb to
+ * to give SW a hint that the exception actually hit on the dslot.
+ *
+ * CRIS expects all PC addresses to be 16-bit aligned. The lsb is ignored by
+ * the core and any jmp to an odd addresses will mask off that lsb. It is
+ * simply there to let sw know there was an exception on a dslot.
+ *
+ * When the software returns from an exception, the branch will re-execute.
+ * On QEMU care needs to be taken when a branch+delayslot sequence is broken
+ * and the branch and delayslot don't share pages.
+ *
+ * The TB contaning the branch insn will set up env->btarget and evaluate
+ * env->btaken. When the translation loop exits we will note that the branch
+ * sequence is broken and let env->dslot be the size of the branch insn (those
+ * vary in length).
+ *
+ * The TB contaning the delayslot will have the PC of its real insn (i.e no lsb
+ * set). It will also expect to have env->dslot setup with the size of the
+ * delay slot so that env->pc - env->dslot point to the branch insn. This TB
+ * will execute the dslot and take the branch, either to btarget or just one
+ * insn ahead.
+ *
+ * When exceptions occur, we check for env->dslot in do_interrupt to detect
+ * broken branch sequences and setup $erp accordingly (i.e let it point to the
+ * branch and set lsb). Then env->dslot gets cleared so that the exception
+ * handler can enter. When returning from exceptions (jump $erp) the lsb gets
+ * masked off and we will reexecute the branch insn.
+ *
+ */
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUCRISState *env, struct TranslationBlock *tb)
+{
+ CRISCPU *cpu = cris_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint32_t pc_start;
+ unsigned int insn_len;
+ struct DisasContext ctx;
+ struct DisasContext *dc = &ctx;
+ uint32_t next_page_start;
+ target_ulong npc;
+ int num_insns;
+ int max_insns;
+
+ if (env->pregs[PR_VR] == 32) {
+ dc->decoder = crisv32_decoder;
+ dc->clear_locked_irq = 0;
+ } else {
+ dc->decoder = crisv10_decoder;
+ dc->clear_locked_irq = 1;
+ }
+
+ /* Odd PC indicates that branch is rexecuting due to exception in the
+ * delayslot, like in real hw.
+ */
+ pc_start = tb->pc & ~1;
+ dc->cpu = cpu;
+ dc->tb = tb;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->ppc = pc_start;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->flags_uptodate = 1;
+ dc->flagx_known = 1;
+ dc->flags_x = tb->flags & X_FLAG;
+ dc->cc_x_uptodate = 0;
+ dc->cc_mask = 0;
+ dc->update_cc = 0;
+ dc->clear_prefix = 0;
+
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ dc->cc_size_uptodate = -1;
+
+ /* Decode TB flags. */
+ dc->tb_flags = tb->flags & (S_FLAG | P_FLAG | U_FLAG \
+ | X_FLAG | PFIX_FLAG);
+ dc->delayed_branch = !!(tb->flags & 7);
+ if (dc->delayed_branch) {
+ dc->jmp = JMP_INDIRECT;
+ } else {
+ dc->jmp = JMP_NOJMP;
+ }
+
+ dc->cpustate_changed = 0;
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ do {
+ tcg_gen_insn_start(dc->delayed_branch == 1
+ ? dc->ppc | 1 : dc->pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ cris_evaluate_flags(dc);
+ tcg_gen_movi_tl(env_pc, dc->pc);
+ t_gen_raise_exception(EXCP_DEBUG);
+ dc->is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 2;
+ break;
+ }
+
+ /* Pretty disas. */
+ LOG_DIS("%8.8x:\t", dc->pc);
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+ dc->clear_x = 1;
+
+ insn_len = dc->decoder(env, dc);
+ dc->ppc = dc->pc;
+ dc->pc += insn_len;
+ if (dc->clear_x) {
+ cris_clear_x_flag(dc);
+ }
+
+ /* Check for delayed branches here. If we do it before
+ actually generating any host code, the simulator will just
+ loop doing nothing for on this program location. */
+ if (dc->delayed_branch) {
+ dc->delayed_branch--;
+ if (dc->delayed_branch == 0) {
+ if (tb->flags & 7) {
+ t_gen_mov_env_TN(dslot, tcg_const_tl(0));
+ }
+ if (dc->cpustate_changed || !dc->flagx_known
+ || (dc->flags_x != (tb->flags & X_FLAG))) {
+ cris_store_direct_jmp(dc);
+ }
+
+ if (dc->clear_locked_irq) {
+ dc->clear_locked_irq = 0;
+ t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
+ }
+
+ if (dc->jmp == JMP_DIRECT_CC) {
+ TCGLabel *l1 = gen_new_label();
+ cris_evaluate_flags(dc);
+
+ /* Conditional jmp. */
+ tcg_gen_brcondi_tl(TCG_COND_EQ,
+ env_btaken, 0, l1);
+ gen_goto_tb(dc, 1, dc->jmp_pc);
+ gen_set_label(l1);
+ gen_goto_tb(dc, 0, dc->pc);
+ dc->is_jmp = DISAS_TB_JUMP;
+ dc->jmp = JMP_NOJMP;
+ } else if (dc->jmp == JMP_DIRECT) {
+ cris_evaluate_flags(dc);
+ gen_goto_tb(dc, 0, dc->jmp_pc);
+ dc->is_jmp = DISAS_TB_JUMP;
+ dc->jmp = JMP_NOJMP;
+ } else {
+ t_gen_cc_jmp(env_btarget, tcg_const_tl(dc->pc));
+ dc->is_jmp = DISAS_JUMP;
+ }
+ break;
+ }
+ }
+
+ /* If we are rexecuting a branch due to exceptions on
+ delay slots don't break. */
+ if (!(tb->pc & 1) && cs->singlestep_enabled) {
+ break;
+ }
+ } while (!dc->is_jmp && !dc->cpustate_changed
+ && !tcg_op_buf_full()
+ && !singlestep
+ && (dc->pc < next_page_start)
+ && num_insns < max_insns);
+
+ if (dc->clear_locked_irq) {
+ t_gen_mov_env_TN(locked_irq, tcg_const_tl(0));
+ }
+
+ npc = dc->pc;
+
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ /* Force an update if the per-tb cpu state has changed. */
+ if (dc->is_jmp == DISAS_NEXT
+ && (dc->cpustate_changed || !dc->flagx_known
+ || (dc->flags_x != (tb->flags & X_FLAG)))) {
+ dc->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_tl(env_pc, npc);
+ }
+ /* Broken branch+delayslot sequence. */
+ if (dc->delayed_branch == 1) {
+ /* Set env->dslot to the size of the branch insn. */
+ t_gen_mov_env_TN(dslot, tcg_const_tl(dc->pc - dc->ppc));
+ cris_store_direct_jmp(dc);
+ }
+
+ cris_evaluate_flags(dc);
+
+ if (unlikely(cs->singlestep_enabled)) {
+ if (dc->is_jmp == DISAS_NEXT) {
+ tcg_gen_movi_tl(env_pc, npc);
+ }
+ t_gen_raise_exception(EXCP_DEBUG);
+ } else {
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, npc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used
+ to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_SWI:
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ }
+ gen_tb_end(tb, num_insns);
+
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+#if !DISAS_CRIS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("--------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc->pc - pc_start,
+ env->pregs[PR_VR]);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
+ }
+#endif
+#endif
+}
+
+void cris_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ CRISCPU *cpu = CRIS_CPU(cs);
+ CPUCRISState *env = &cpu->env;
+ const char **regnames;
+ const char **pregnames;
+ int i;
+
+ if (!env || !f) {
+ return;
+ }
+ if (env->pregs[PR_VR] < 32) {
+ pregnames = pregnames_v10;
+ regnames = regnames_v10;
+ } else {
+ pregnames = pregnames_v32;
+ regnames = regnames_v32;
+ }
+
+ cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n"
+ "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n",
+ env->pc, env->pregs[PR_CCS], env->btaken, env->btarget,
+ env->cc_op,
+ env->cc_src, env->cc_dest, env->cc_result, env->cc_mask);
+
+
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "%s=%8.8x ", regnames[i], env->regs[i]);
+ if ((i + 1) % 4 == 0) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ cpu_fprintf(f, "\nspecial regs:\n");
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "%s=%8.8x ", pregnames[i], env->pregs[i]);
+ if ((i + 1) % 4 == 0) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ if (env->pregs[PR_VR] >= 32) {
+ uint32_t srs = env->pregs[PR_SRS];
+ cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs);
+ if (srs < ARRAY_SIZE(env->sregs)) {
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "s%2.2d=%8.8x ",
+ i, env->sregs[srs][i]);
+ if ((i + 1) % 4 == 0) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ }
+ }
+ cpu_fprintf(f, "\n\n");
+
+}
+
+void cris_initialize_tcg(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cc_x = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_x), "cc_x");
+ cc_src = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_src), "cc_src");
+ cc_dest = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_dest),
+ "cc_dest");
+ cc_result = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_result),
+ "cc_result");
+ cc_op = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_op), "cc_op");
+ cc_size = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_size),
+ "cc_size");
+ cc_mask = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_mask),
+ "cc_mask");
+
+ env_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pc),
+ "pc");
+ env_btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btarget),
+ "btarget");
+ env_btaken = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btaken),
+ "btaken");
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, regs[i]),
+ regnames_v32[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ cpu_PR[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pregs[i]),
+ pregnames_v32[i]);
+ }
+}
+
+void restore_state_to_opc(CPUCRISState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/cris/translate_v10.c b/target/cris/translate_v10.c
new file mode 100644
index 0000000000..4a0b485d8e
--- /dev/null
+++ b/target/cris/translate_v10.c
@@ -0,0 +1,1315 @@
+/*
+ * CRISv10 emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2010 AXIS Communications AB
+ * Written by Edgar E. Iglesias.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "crisv10-decode.h"
+
+static const char *regnames_v10[] =
+{
+ "$r0", "$r1", "$r2", "$r3",
+ "$r4", "$r5", "$r6", "$r7",
+ "$r8", "$r9", "$r10", "$r11",
+ "$r12", "$r13", "$sp", "$pc",
+};
+
+static const char *pregnames_v10[] =
+{
+ "$bz", "$vr", "$p2", "$p3",
+ "$wz", "$ccr", "$p6-prefix", "$mof",
+ "$dz", "$ibr", "$irp", "$srp",
+ "$bar", "$dccr", "$brp", "$usp",
+};
+
+/* We need this table to handle preg-moves with implicit width. */
+static int preg_sizes_v10[] = {
+ 1, /* bz. */
+ 1, /* vr. */
+ 1, /* pid. */
+ 1, /* srs. */
+ 2, /* wz. */
+ 2, 2, 4,
+ 4, 4, 4, 4,
+ 4, 4, 4, 4,
+};
+
+static inline int dec10_size(unsigned int size)
+{
+ size++;
+ if (size == 3)
+ size++;
+ return size;
+}
+
+static inline void cris_illegal_insn(DisasContext *dc)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "illegal insn at pc=%x\n", dc->pc);
+ t_gen_raise_exception(EXCP_BREAK);
+}
+
+static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
+ unsigned int size, int mem_index)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv taddr = tcg_temp_local_new();
+ TCGv tval = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ dc->postinc = 0;
+ cris_evaluate_flags(dc);
+
+ tcg_gen_mov_tl(taddr, addr);
+ tcg_gen_mov_tl(tval, val);
+
+ /* Store only if F flag isn't set */
+ tcg_gen_andi_tl(t1, cpu_PR[PR_CCS], F_FLAG_V10);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ if (size == 1) {
+ tcg_gen_qemu_st8(tval, taddr, mem_index);
+ } else if (size == 2) {
+ tcg_gen_qemu_st16(tval, taddr, mem_index);
+ } else {
+ tcg_gen_qemu_st32(tval, taddr, mem_index);
+ }
+ gen_set_label(l1);
+ tcg_gen_shri_tl(t1, t1, 1); /* shift F to P position */
+ tcg_gen_or_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], t1); /*P=F*/
+ tcg_temp_free(t1);
+ tcg_temp_free(tval);
+ tcg_temp_free(taddr);
+}
+
+static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
+ unsigned int size)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ /* If we get a fault on a delayslot we must keep the jmp state in
+ the cpu-state to be able to re-execute the jmp. */
+ if (dc->delayed_branch == 1) {
+ cris_store_direct_jmp(dc);
+ }
+
+ /* Conditional writes. We only support the kind were X is known
+ at translation time. */
+ if (dc->flagx_known && dc->flags_x) {
+ gen_store_v10_conditional(dc, addr, val, size, mem_index);
+ return;
+ }
+
+ if (size == 1) {
+ tcg_gen_qemu_st8(val, addr, mem_index);
+ } else if (size == 2) {
+ tcg_gen_qemu_st16(val, addr, mem_index);
+ } else {
+ tcg_gen_qemu_st32(val, addr, mem_index);
+ }
+}
+
+
+/* Prefix flag and register are used to handle the more complex
+ addressing modes. */
+static void cris_set_prefix(DisasContext *dc)
+{
+ dc->clear_prefix = 0;
+ dc->tb_flags |= PFIX_FLAG;
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], PFIX_FLAG);
+
+ /* prefix insns don't clear the x flag. */
+ dc->clear_x = 0;
+ cris_lock_irq(dc);
+}
+
+static void crisv10_prepare_memaddr(DisasContext *dc,
+ TCGv addr, unsigned int size)
+{
+ if (dc->tb_flags & PFIX_FLAG) {
+ tcg_gen_mov_tl(addr, cpu_PR[PR_PREFIX]);
+ } else {
+ tcg_gen_mov_tl(addr, cpu_R[dc->src]);
+ }
+}
+
+static unsigned int crisv10_post_memaddr(DisasContext *dc, unsigned int size)
+{
+ unsigned int insn_len = 0;
+
+ if (dc->tb_flags & PFIX_FLAG) {
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], cpu_PR[PR_PREFIX]);
+ }
+ } else {
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
+ if (dc->src == 15) {
+ insn_len += size & ~1;
+ } else {
+ tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], size);
+ }
+ }
+ }
+ return insn_len;
+}
+
+static int dec10_prep_move_m(CPUCRISState *env, DisasContext *dc,
+ int s_ext, int memsize, TCGv dst)
+{
+ unsigned int rs;
+ uint32_t imm;
+ int is_imm;
+ int insn_len = 0;
+
+ rs = dc->src;
+ is_imm = rs == 15 && !(dc->tb_flags & PFIX_FLAG);
+ LOG_DIS("rs=%d rd=%d is_imm=%d mode=%d pfix=%d\n",
+ rs, dc->dst, is_imm, dc->mode, dc->tb_flags & PFIX_FLAG);
+
+ /* Load [$rs] onto T1. */
+ if (is_imm) {
+ if (memsize != 4) {
+ if (s_ext) {
+ if (memsize == 1)
+ imm = cpu_ldsb_code(env, dc->pc + 2);
+ else
+ imm = cpu_ldsw_code(env, dc->pc + 2);
+ } else {
+ if (memsize == 1)
+ imm = cpu_ldub_code(env, dc->pc + 2);
+ else
+ imm = cpu_lduw_code(env, dc->pc + 2);
+ }
+ } else
+ imm = cpu_ldl_code(env, dc->pc + 2);
+
+ tcg_gen_movi_tl(dst, imm);
+
+ if (dc->mode == CRISV10_MODE_AUTOINC) {
+ insn_len += memsize;
+ if (memsize == 1)
+ insn_len++;
+ tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len);
+ }
+ } else {
+ TCGv addr;
+
+ addr = tcg_temp_new();
+ cris_flush_cc_state(dc);
+ crisv10_prepare_memaddr(dc, addr, memsize);
+ gen_load(dc, dst, addr, memsize, 0);
+ if (s_ext)
+ t_gen_sext(dst, dst, memsize);
+ else
+ t_gen_zext(dst, dst, memsize);
+ insn_len += crisv10_post_memaddr(dc, memsize);
+ tcg_temp_free(addr);
+ }
+
+ if (dc->mode == CRISV10_MODE_INDIRECT && (dc->tb_flags & PFIX_FLAG)) {
+ dc->dst = dc->src;
+ }
+ return insn_len;
+}
+
+static unsigned int dec10_quick_imm(DisasContext *dc)
+{
+ int32_t imm, simm;
+ int op;
+
+ /* sign extend. */
+ imm = dc->ir & ((1 << 6) - 1);
+ simm = (int8_t) (imm << 2);
+ simm >>= 2;
+ switch (dc->opcode) {
+ case CRISV10_QIMM_BDAP_R0:
+ case CRISV10_QIMM_BDAP_R1:
+ case CRISV10_QIMM_BDAP_R2:
+ case CRISV10_QIMM_BDAP_R3:
+ simm = (int8_t)dc->ir;
+ LOG_DIS("bdap %d $r%d\n", simm, dc->dst);
+ LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
+ dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
+ cris_set_prefix(dc);
+ if (dc->dst == 15) {
+ tcg_gen_movi_tl(cpu_PR[PR_PREFIX], dc->pc + 2 + simm);
+ } else {
+ tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
+ }
+ break;
+
+ case CRISV10_QIMM_MOVEQ:
+ LOG_DIS("moveq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(simm), 4);
+ break;
+ case CRISV10_QIMM_CMPQ:
+ LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(simm), 4);
+ break;
+ case CRISV10_QIMM_ADDQ:
+ LOG_DIS("addq %d, $r%d\n", imm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(imm), 4);
+ break;
+ case CRISV10_QIMM_ANDQ:
+ LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_AND, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(simm), 4);
+ break;
+ case CRISV10_QIMM_ASHQ:
+ LOG_DIS("ashq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ op = imm & (1 << 5);
+ imm &= 0x1f;
+ if (op) {
+ cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(imm), 4);
+ } else {
+ /* BTST */
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
+ tcg_const_tl(imm), cpu_PR[PR_CCS]);
+ }
+ break;
+ case CRISV10_QIMM_LSHQ:
+ LOG_DIS("lshq %d, $r%d\n", simm, dc->dst);
+
+ op = CC_OP_LSL;
+ if (imm & (1 << 5)) {
+ op = CC_OP_LSR;
+ }
+ imm &= 0x1f;
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, op, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(imm), 4);
+ break;
+ case CRISV10_QIMM_SUBQ:
+ LOG_DIS("subq %d, $r%d\n", imm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(imm), 4);
+ break;
+ case CRISV10_QIMM_ORQ:
+ LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_OR, cpu_R[dc->dst],
+ cpu_R[dc->dst], tcg_const_tl(simm), 4);
+ break;
+
+ case CRISV10_QIMM_BCC_R0:
+ case CRISV10_QIMM_BCC_R1:
+ case CRISV10_QIMM_BCC_R2:
+ case CRISV10_QIMM_BCC_R3:
+ imm = dc->ir & 0xff;
+ /* bit 0 is a sign bit. */
+ if (imm & 1) {
+ imm |= 0xffffff00; /* sign extend. */
+ imm &= ~1; /* get rid of the sign bit. */
+ }
+ imm += 2;
+ LOG_DIS("b%s %d\n", cc_name(dc->cond), imm);
+
+ cris_cc_mask(dc, 0);
+ cris_prepare_cc_branch(dc, imm, dc->cond);
+ break;
+
+ default:
+ LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
+ dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled quickimm\n");
+ break;
+ }
+ return 2;
+}
+
+static unsigned int dec10_setclrf(DisasContext *dc)
+{
+ uint32_t flags;
+ unsigned int set = ~dc->opcode & 1;
+
+ flags = EXTRACT_FIELD(dc->ir, 0, 3)
+ | (EXTRACT_FIELD(dc->ir, 12, 15) << 4);
+ LOG_DIS("%s set=%d flags=%x\n", __func__, set, flags);
+
+
+ if (flags & X_FLAG) {
+ dc->flagx_known = 1;
+ if (set)
+ dc->flags_x = X_FLAG;
+ else
+ dc->flags_x = 0;
+ }
+
+ cris_evaluate_flags (dc);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ cris_update_cc_x(dc);
+ tcg_gen_movi_tl(cc_op, dc->cc_op);
+
+ if (set) {
+ tcg_gen_ori_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], flags);
+ } else {
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS],
+ ~(flags|F_FLAG_V10|P_FLAG_V10));
+ }
+
+ dc->flags_uptodate = 1;
+ dc->clear_x = 0;
+ cris_lock_irq(dc);
+ return 2;
+}
+
+static inline void dec10_reg_prep_sext(DisasContext *dc, int size, int sext,
+ TCGv dd, TCGv ds, TCGv sd, TCGv ss)
+{
+ if (sext) {
+ t_gen_sext(dd, sd, size);
+ t_gen_sext(ds, ss, size);
+ } else {
+ t_gen_zext(dd, sd, size);
+ t_gen_zext(ds, ss, size);
+ }
+}
+
+static void dec10_reg_alu(DisasContext *dc, int op, int size, int sext)
+{
+ TCGv t[2];
+
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+ dec10_reg_prep_sext(dc, size, sext,
+ t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]);
+
+ if (op == CC_OP_LSL || op == CC_OP_LSR || op == CC_OP_ASR) {
+ tcg_gen_andi_tl(t[1], t[1], 63);
+ }
+
+ assert(dc->dst != 15);
+ cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], size);
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+}
+
+static void dec10_reg_bound(DisasContext *dc, int size)
+{
+ TCGv t;
+
+ t = tcg_temp_local_new();
+ t_gen_zext(t, cpu_R[dc->src], size);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
+ tcg_temp_free(t);
+}
+
+static void dec10_reg_mul(DisasContext *dc, int size, int sext)
+{
+ int op = sext ? CC_OP_MULS : CC_OP_MULU;
+ TCGv t[2];
+
+ t[0] = tcg_temp_new();
+ t[1] = tcg_temp_new();
+ dec10_reg_prep_sext(dc, size, sext,
+ t[0], t[1], cpu_R[dc->dst], cpu_R[dc->src]);
+
+ cris_alu(dc, op, cpu_R[dc->dst], t[0], t[1], 4);
+
+ tcg_temp_free(t[0]);
+ tcg_temp_free(t[1]);
+}
+
+
+static void dec10_reg_movs(DisasContext *dc)
+{
+ int size = (dc->size & 1) + 1;
+ TCGv t;
+
+ LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ t = tcg_temp_new();
+ if (dc->ir & 32)
+ t_gen_sext(t, cpu_R[dc->src], size);
+ else
+ t_gen_zext(t, cpu_R[dc->src], size);
+
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
+ tcg_temp_free(t);
+}
+
+static void dec10_reg_alux(DisasContext *dc, int op)
+{
+ int size = (dc->size & 1) + 1;
+ TCGv t;
+
+ LOG_DIS("movx.%d $r%d, $r%d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+
+ t = tcg_temp_new();
+ if (dc->ir & 32)
+ t_gen_sext(t, cpu_R[dc->src], size);
+ else
+ t_gen_zext(t, cpu_R[dc->src], size);
+
+ cris_alu(dc, op, cpu_R[dc->dst], cpu_R[dc->dst], t, 4);
+ tcg_temp_free(t);
+}
+
+static void dec10_reg_mov_pr(DisasContext *dc)
+{
+ LOG_DIS("move p%d r%d sz=%d\n", dc->dst, dc->src, preg_sizes_v10[dc->dst]);
+ cris_lock_irq(dc);
+ if (dc->src == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_PR[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ return;
+ }
+ if (dc->dst == PR_CCS) {
+ cris_evaluate_flags(dc);
+ }
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src],
+ cpu_R[dc->src], cpu_PR[dc->dst], preg_sizes_v10[dc->dst]);
+}
+
+static void dec10_reg_abs(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("abs $r%u, $r%u\n", dc->src, dc->dst);
+
+ assert(dc->dst != 15);
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, cpu_R[dc->src], 31);
+ tcg_gen_xor_tl(cpu_R[dc->dst], cpu_R[dc->src], t0);
+ tcg_gen_sub_tl(t0, cpu_R[dc->dst], t0);
+
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t0, 4);
+ tcg_temp_free(t0);
+}
+
+static void dec10_reg_swap(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("not $r%d, $r%d\n", dc->src, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(t0, cpu_R[dc->src]);
+ if (dc->dst & 8)
+ tcg_gen_not_tl(t0, t0);
+ if (dc->dst & 4)
+ t_gen_swapw(t0, t0);
+ if (dc->dst & 2)
+ t_gen_swapb(t0, t0);
+ if (dc->dst & 1)
+ t_gen_swapr(t0, t0);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->src], cpu_R[dc->src], t0, 4);
+ tcg_temp_free(t0);
+}
+
+static void dec10_reg_scc(DisasContext *dc)
+{
+ int cond = dc->dst;
+
+ LOG_DIS("s%s $r%u\n", cc_name(cond), dc->src);
+
+ gen_tst_cc(dc, cpu_R[dc->src], cond);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_R[dc->src], cpu_R[dc->src], 0);
+
+ cris_cc_mask(dc, 0);
+}
+
+static unsigned int dec10_reg(DisasContext *dc)
+{
+ TCGv t;
+ unsigned int insn_len = 2;
+ unsigned int size = dec10_size(dc->size);
+ unsigned int tmp;
+
+ if (dc->size != 3) {
+ switch (dc->opcode) {
+ case CRISV10_REG_MOVE_R:
+ LOG_DIS("move.%d $r%d, $r%d\n", dc->size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_MOVE, size, 0);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ }
+ break;
+ case CRISV10_REG_MOVX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_movs(dc);
+ break;
+ case CRISV10_REG_ADDX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_ADD);
+ break;
+ case CRISV10_REG_SUBX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_SUB);
+ break;
+ case CRISV10_REG_ADD:
+ LOG_DIS("add $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_ADD, size, 0);
+ break;
+ case CRISV10_REG_SUB:
+ LOG_DIS("sub $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_SUB, size, 0);
+ break;
+ case CRISV10_REG_CMP:
+ LOG_DIS("cmp $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_CMP, size, 0);
+ break;
+ case CRISV10_REG_BOUND:
+ LOG_DIS("bound $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_bound(dc, size);
+ break;
+ case CRISV10_REG_AND:
+ LOG_DIS("and $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_AND, size, 0);
+ break;
+ case CRISV10_REG_ADDI:
+ if (dc->src == 15) {
+ /* nop. */
+ return 2;
+ }
+ t = tcg_temp_new();
+ LOG_DIS("addi r%d r%d size=%d\n", dc->src, dc->dst, dc->size);
+ tcg_gen_shli_tl(t, cpu_R[dc->dst], dc->size & 3);
+ tcg_gen_add_tl(cpu_R[dc->src], cpu_R[dc->src], t);
+ tcg_temp_free(t);
+ break;
+ case CRISV10_REG_LSL:
+ LOG_DIS("lsl $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_LSL, size, 0);
+ break;
+ case CRISV10_REG_LSR:
+ LOG_DIS("lsr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_LSR, size, 0);
+ break;
+ case CRISV10_REG_ASR:
+ LOG_DIS("asr $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_ASR, size, 1);
+ break;
+ case CRISV10_REG_OR:
+ LOG_DIS("or $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_OR, size, 0);
+ break;
+ case CRISV10_REG_NEG:
+ LOG_DIS("neg $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_NEG, size, 0);
+ break;
+ case CRISV10_REG_BIAP:
+ LOG_DIS("BIAP pc=%x reg %d r%d r%d size=%d\n", dc->pc,
+ dc->opcode, dc->src, dc->dst, size);
+ switch (size) {
+ case 4: tmp = 2; break;
+ case 2: tmp = 1; break;
+ case 1: tmp = 0; break;
+ default:
+ cpu_abort(CPU(dc->cpu), "Unhandled BIAP");
+ break;
+ }
+
+ t = tcg_temp_new();
+ tcg_gen_shli_tl(t, cpu_R[dc->dst], tmp);
+ if (dc->src == 15) {
+ tcg_gen_addi_tl(cpu_PR[PR_PREFIX], t, ((dc->pc +2)| 1) + 1);
+ } else {
+ tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_R[dc->src], t);
+ }
+ tcg_temp_free(t);
+ cris_set_prefix(dc);
+ break;
+
+ default:
+ LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
+ dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+ } else {
+ switch (dc->opcode) {
+ case CRISV10_REG_MOVX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_movs(dc);
+ break;
+ case CRISV10_REG_ADDX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_ADD);
+ break;
+ case CRISV10_REG_SUBX:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alux(dc, CC_OP_SUB);
+ break;
+ case CRISV10_REG_MOVE_SPR_R:
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, 0);
+ dec10_reg_mov_pr(dc);
+ break;
+ case CRISV10_REG_MOVE_R_SPR:
+ LOG_DIS("move r%d p%d\n", dc->src, dc->dst);
+ cris_evaluate_flags(dc);
+ if (dc->src != 11) /* fast for srp. */
+ dc->cpustate_changed = 1;
+ t_gen_mov_preg_TN(dc, dc->dst, cpu_R[dc->src]);
+ break;
+ case CRISV10_REG_SETF:
+ case CRISV10_REG_CLEARF:
+ dec10_setclrf(dc);
+ break;
+ case CRISV10_REG_SWAP:
+ dec10_reg_swap(dc);
+ break;
+ case CRISV10_REG_ABS:
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_abs(dc);
+ break;
+ case CRISV10_REG_LZ:
+ LOG_DIS("lz $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_LZ, 4, 0);
+ break;
+ case CRISV10_REG_XOR:
+ LOG_DIS("xor $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_alu(dc, CC_OP_XOR, 4, 0);
+ break;
+ case CRISV10_REG_BTST:
+ LOG_DIS("btst $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_update_cc_op(dc, CC_OP_FLAGS, 4);
+ gen_helper_btst(cpu_PR[PR_CCS], cpu_env, cpu_R[dc->dst],
+ cpu_R[dc->src], cpu_PR[PR_CCS]);
+ break;
+ case CRISV10_REG_DSTEP:
+ LOG_DIS("dstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_DSTEP, cpu_R[dc->dst],
+ cpu_R[dc->dst], cpu_R[dc->src], 4);
+ break;
+ case CRISV10_REG_MSTEP:
+ LOG_DIS("mstep $r%d, $r%d sz=%d\n", dc->src, dc->dst, size);
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu(dc, CC_OP_MSTEP, cpu_R[dc->dst],
+ cpu_R[dc->dst], cpu_R[dc->src], 4);
+ break;
+ case CRISV10_REG_SCC:
+ dec10_reg_scc(dc);
+ break;
+ default:
+ LOG_DIS("pc=%x reg %d r%d r%d\n", dc->pc,
+ dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+ }
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_m_r(CPUCRISState *env, DisasContext *dc,
+ unsigned int size)
+{
+ unsigned int insn_len = 2;
+ TCGv t;
+
+ LOG_DIS("%s: move.%d [$r%d], $r%d\n", __func__,
+ size, dc->src, dc->dst);
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ t = tcg_temp_new();
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t);
+ cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst], cpu_R[dc->dst], t, size);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ return insn_len;
+ }
+
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_r_m(DisasContext *dc, unsigned int size)
+{
+ unsigned int insn_len = 2;
+ TCGv addr;
+
+ LOG_DIS("move.%d $r%d, [$r%d]\n", dc->size, dc->src, dc->dst);
+ addr = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, size);
+ gen_store_v10(dc, addr, cpu_R[dc->dst], size);
+ insn_len += crisv10_post_memaddr(dc, size);
+
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_m_pr(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int insn_len = 2, rd = dc->dst;
+ TCGv t, addr;
+
+ LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src);
+ cris_lock_irq(dc);
+
+ addr = tcg_temp_new();
+ t = tcg_temp_new();
+ insn_len += dec10_prep_move_m(env, dc, 0, 4, t);
+ if (rd == 15) {
+ tcg_gen_mov_tl(env_btarget, t);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ return insn_len;
+ }
+
+ tcg_gen_mov_tl(cpu_PR[rd], t);
+ dc->cpustate_changed = 1;
+ tcg_temp_free(addr);
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static unsigned int dec10_ind_move_pr_m(DisasContext *dc)
+{
+ unsigned int insn_len = 2, size = preg_sizes_v10[dc->dst];
+ TCGv addr, t0;
+
+ LOG_DIS("move.%d $p%d, [$r%d]\n", dc->size, dc->dst, dc->src);
+
+ addr = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, size);
+ if (dc->dst == PR_CCS) {
+ t0 = tcg_temp_new();
+ cris_evaluate_flags(dc);
+ tcg_gen_andi_tl(t0, cpu_PR[PR_CCS], ~PFIX_FLAG);
+ gen_store_v10(dc, addr, t0, size);
+ tcg_temp_free(t0);
+ } else {
+ gen_store_v10(dc, addr, cpu_PR[dc->dst], size);
+ }
+ t0 = tcg_temp_new();
+ insn_len += crisv10_post_memaddr(dc, size);
+ cris_lock_irq(dc);
+
+ return insn_len;
+}
+
+static void dec10_movem_r_m(DisasContext *dc)
+{
+ int i, pfix = dc->tb_flags & PFIX_FLAG;
+ TCGv addr, t0;
+
+ LOG_DIS("%s r%d, [r%d] pi=%d ir=%x\n", __func__,
+ dc->dst, dc->src, dc->postinc, dc->ir);
+
+ addr = tcg_temp_new();
+ t0 = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, 4);
+ tcg_gen_mov_tl(t0, addr);
+ for (i = dc->dst; i >= 0; i--) {
+ if ((pfix && dc->mode == CRISV10_MODE_AUTOINC) && dc->src == i) {
+ gen_store_v10(dc, addr, t0, 4);
+ } else {
+ gen_store_v10(dc, addr, cpu_R[i], 4);
+ }
+ tcg_gen_addi_tl(addr, addr, 4);
+ }
+
+ if (pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], t0);
+ }
+
+ if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], addr);
+ }
+ tcg_temp_free(addr);
+ tcg_temp_free(t0);
+}
+
+static void dec10_movem_m_r(DisasContext *dc)
+{
+ int i, pfix = dc->tb_flags & PFIX_FLAG;
+ TCGv addr, t0;
+
+ LOG_DIS("%s [r%d], r%d pi=%d ir=%x\n", __func__,
+ dc->src, dc->dst, dc->postinc, dc->ir);
+
+ addr = tcg_temp_new();
+ t0 = tcg_temp_new();
+ crisv10_prepare_memaddr(dc, addr, 4);
+ tcg_gen_mov_tl(t0, addr);
+ for (i = dc->dst; i >= 0; i--) {
+ gen_load(dc, cpu_R[i], addr, 4, 0);
+ tcg_gen_addi_tl(addr, addr, 4);
+ }
+
+ if (pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], t0);
+ }
+
+ if (!pfix && dc->mode == CRISV10_MODE_AUTOINC) {
+ tcg_gen_mov_tl(cpu_R[dc->src], addr);
+ }
+ tcg_temp_free(addr);
+ tcg_temp_free(t0);
+}
+
+static int dec10_ind_alu(CPUCRISState *env, DisasContext *dc,
+ int op, unsigned int size)
+{
+ int insn_len = 0;
+ int rd = dc->dst;
+ TCGv t[2];
+
+ cris_alu_m_alloc_temps(t);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
+ cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t[0], size);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ return insn_len;
+ }
+
+ cris_alu_m_free_temps(t);
+
+ return insn_len;
+}
+
+static int dec10_ind_bound(CPUCRISState *env, DisasContext *dc,
+ unsigned int size)
+{
+ int insn_len = 0;
+ int rd = dc->dst;
+ TCGv t;
+
+ t = tcg_temp_local_new();
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t);
+ cris_alu(dc, CC_OP_BOUND, cpu_R[dc->dst], cpu_R[rd], t, 4);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ return insn_len;
+ }
+
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static int dec10_alux_m(CPUCRISState *env, DisasContext *dc, int op)
+{
+ unsigned int size = (dc->size & 1) ? 2 : 1;
+ unsigned int sx = !!(dc->size & 2);
+ int insn_len = 2;
+ int rd = dc->dst;
+ TCGv t;
+
+ LOG_DIS("addx size=%d sx=%d op=%d %d\n", size, sx, dc->src, dc->dst);
+
+ t = tcg_temp_new();
+
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_prep_move_m(env, dc, sx, size, t);
+ cris_alu(dc, op, cpu_R[dc->dst], cpu_R[rd], t, 4);
+ if (dc->dst == 15) {
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->dst]);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch = 1;
+ return insn_len;
+ }
+
+ tcg_temp_free(t);
+ return insn_len;
+}
+
+static int dec10_dip(CPUCRISState *env, DisasContext *dc)
+{
+ int insn_len = 2;
+ uint32_t imm;
+
+ LOG_DIS("dip pc=%x opcode=%d r%d r%d\n",
+ dc->pc, dc->opcode, dc->src, dc->dst);
+ if (dc->src == 15) {
+ imm = cpu_ldl_code(env, dc->pc + 2);
+ tcg_gen_movi_tl(cpu_PR[PR_PREFIX], imm);
+ if (dc->postinc)
+ insn_len += 4;
+ tcg_gen_addi_tl(cpu_R[15], cpu_R[15], insn_len - 2);
+ } else {
+ gen_load(dc, cpu_PR[PR_PREFIX], cpu_R[dc->src], 4, 0);
+ if (dc->postinc)
+ tcg_gen_addi_tl(cpu_R[dc->src], cpu_R[dc->src], 4);
+ }
+
+ cris_set_prefix(dc);
+ return insn_len;
+}
+
+static int dec10_bdap_m(CPUCRISState *env, DisasContext *dc, int size)
+{
+ int insn_len = 2;
+ int rd = dc->dst;
+
+ LOG_DIS("bdap_m pc=%x opcode=%d r%d r%d sz=%d\n",
+ dc->pc, dc->opcode, dc->src, dc->dst, size);
+
+ assert(dc->dst != 15);
+#if 0
+ /* 8bit embedded offset? */
+ if (!dc->postinc && (dc->ir & (1 << 11))) {
+ int simm = dc->ir & 0xff;
+
+ /* cpu_abort(CPU(dc->cpu), "Unhandled opcode"); */
+ /* sign extended. */
+ simm = (int8_t)simm;
+
+ tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
+
+ cris_set_prefix(dc);
+ return insn_len;
+ }
+#endif
+ /* Now the rest of the modes are truly indirect. */
+ insn_len += dec10_prep_move_m(env, dc, 1, size, cpu_PR[PR_PREFIX]);
+ tcg_gen_add_tl(cpu_PR[PR_PREFIX], cpu_PR[PR_PREFIX], cpu_R[rd]);
+ cris_set_prefix(dc);
+ return insn_len;
+}
+
+static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int insn_len = 2;
+ unsigned int size = dec10_size(dc->size);
+ uint32_t imm;
+ int32_t simm;
+ TCGv t[2];
+
+ if (dc->size != 3) {
+ switch (dc->opcode) {
+ case CRISV10_IND_MOVE_M_R:
+ return dec10_ind_move_m_r(env, dc, size);
+ break;
+ case CRISV10_IND_MOVE_R_M:
+ return dec10_ind_move_r_m(dc, size);
+ break;
+ case CRISV10_IND_CMP:
+ LOG_DIS("cmp size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_CMP, size);
+ break;
+ case CRISV10_IND_TEST:
+ LOG_DIS("test size=%d op=%d %d\n", size, dc->src, dc->dst);
+
+ cris_evaluate_flags(dc);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ cris_alu_m_alloc_temps(t);
+ insn_len += dec10_prep_move_m(env, dc, 0, size, t[0]);
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~3);
+ cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
+ t[0], tcg_const_tl(0), size);
+ cris_alu_m_free_temps(t);
+ break;
+ case CRISV10_IND_ADD:
+ LOG_DIS("add size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_ADD, size);
+ break;
+ case CRISV10_IND_SUB:
+ LOG_DIS("sub size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_SUB, size);
+ break;
+ case CRISV10_IND_BOUND:
+ LOG_DIS("bound size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_bound(env, dc, size);
+ break;
+ case CRISV10_IND_AND:
+ LOG_DIS("and size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_AND, size);
+ break;
+ case CRISV10_IND_OR:
+ LOG_DIS("or size=%d op=%d %d\n", size, dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_OR, size);
+ break;
+ case CRISV10_IND_MOVX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
+ break;
+ case CRISV10_IND_ADDX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
+ break;
+ case CRISV10_IND_SUBX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
+ break;
+ case CRISV10_IND_CMPX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
+ break;
+ case CRISV10_IND_MUL:
+ /* This is a reg insn coded in the mem indir space. */
+ LOG_DIS("mul pc=%x opcode=%d\n", dc->pc, dc->opcode);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ dec10_reg_mul(dc, size, dc->ir & (1 << 10));
+ break;
+ case CRISV10_IND_BDAP_M:
+ insn_len = dec10_bdap_m(env, dc, size);
+ break;
+ default:
+ /*
+ * ADDC for v17:
+ *
+ * Instruction format: ADDC [Rs],Rd
+ *
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
+ * |Destination(Rd)| 1 0 0 1 1 0 1 0 | Source(Rs)|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+--+--+
+ *
+ * Instruction format: ADDC [Rs+],Rd
+ *
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
+ * |Destination(Rd)| 1 1 0 1 1 0 1 0 | Source(Rs)|
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+
+ */
+ if (dc->opcode == CRISV17_IND_ADDC && dc->size == 2 &&
+ env->pregs[PR_VR] == 17) {
+ LOG_DIS("addc op=%d %d\n", dc->src, dc->dst);
+ cris_cc_mask(dc, CC_MASK_NZVC);
+ insn_len += dec10_ind_alu(env, dc, CC_OP_ADDC, size);
+ break;
+ }
+
+ LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n",
+ dc->pc, size, dc->opcode, dc->src, dc->dst);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+ return insn_len;
+ }
+
+ switch (dc->opcode) {
+ case CRISV10_IND_MOVE_M_SPR:
+ insn_len = dec10_ind_move_m_pr(env, dc);
+ break;
+ case CRISV10_IND_MOVE_SPR_M:
+ insn_len = dec10_ind_move_pr_m(dc);
+ break;
+ case CRISV10_IND_JUMP_M:
+ if (dc->src == 15) {
+ LOG_DIS("jump.%d %d r%d r%d direct\n", size,
+ dc->opcode, dc->src, dc->dst);
+ imm = cpu_ldl_code(env, dc->pc + 2);
+ if (dc->mode == CRISV10_MODE_AUTOINC)
+ insn_len += size;
+
+ t_gen_mov_preg_TN(dc, dc->dst, tcg_const_tl(dc->pc + insn_len));
+ dc->jmp_pc = imm;
+ cris_prepare_jmp(dc, JMP_DIRECT);
+ dc->delayed_branch--; /* v10 has no dslot here. */
+ } else {
+ if (dc->dst == 14) {
+ LOG_DIS("break %d\n", dc->src);
+ cris_evaluate_flags(dc);
+ tcg_gen_movi_tl(env_pc, dc->pc + 2);
+ t_gen_mov_env_TN(trap_vector, tcg_const_tl(dc->src + 2));
+ t_gen_raise_exception(EXCP_BREAK);
+ dc->is_jmp = DISAS_UPDATE;
+ return insn_len;
+ }
+ LOG_DIS("%d: jump.%d %d r%d r%d\n", __LINE__, size,
+ dc->opcode, dc->src, dc->dst);
+ t[0] = tcg_temp_new();
+ t_gen_mov_preg_TN(dc, dc->dst, tcg_const_tl(dc->pc + insn_len));
+ crisv10_prepare_memaddr(dc, t[0], size);
+ gen_load(dc, env_btarget, t[0], 4, 0);
+ insn_len += crisv10_post_memaddr(dc, size);
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch--; /* v10 has no dslot here. */
+ tcg_temp_free(t[0]);
+ }
+ break;
+
+ case CRISV10_IND_MOVEM_R_M:
+ LOG_DIS("movem_r_m pc=%x opcode=%d r%d r%d\n",
+ dc->pc, dc->opcode, dc->dst, dc->src);
+ dec10_movem_r_m(dc);
+ break;
+ case CRISV10_IND_MOVEM_M_R:
+ LOG_DIS("movem_m_r pc=%x opcode=%d\n", dc->pc, dc->opcode);
+ dec10_movem_m_r(dc);
+ break;
+ case CRISV10_IND_JUMP_R:
+ LOG_DIS("jmp pc=%x opcode=%d r%d r%d\n",
+ dc->pc, dc->opcode, dc->dst, dc->src);
+ tcg_gen_mov_tl(env_btarget, cpu_R[dc->src]);
+ t_gen_mov_preg_TN(dc, dc->dst, tcg_const_tl(dc->pc + insn_len));
+ cris_prepare_jmp(dc, JMP_INDIRECT);
+ dc->delayed_branch--; /* v10 has no dslot here. */
+ break;
+ case CRISV10_IND_MOVX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_MOVE);
+ break;
+ case CRISV10_IND_ADDX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_ADD);
+ break;
+ case CRISV10_IND_SUBX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_SUB);
+ break;
+ case CRISV10_IND_CMPX:
+ insn_len = dec10_alux_m(env, dc, CC_OP_CMP);
+ break;
+ case CRISV10_IND_DIP:
+ insn_len = dec10_dip(env, dc);
+ break;
+ case CRISV10_IND_BCC_M:
+
+ cris_cc_mask(dc, 0);
+ imm = cpu_ldsw_code(env, dc->pc + 2);
+ simm = (int16_t)imm;
+ simm += 4;
+
+ LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm);
+ cris_prepare_cc_branch(dc, simm, dc->cond);
+ insn_len = 4;
+ break;
+ default:
+ LOG_DIS("ERROR pc=%x opcode=%d\n", dc->pc, dc->opcode);
+ cpu_abort(CPU(dc->cpu), "Unhandled opcode");
+ break;
+ }
+
+ return insn_len;
+}
+
+static unsigned int crisv10_decoder(CPUCRISState *env, DisasContext *dc)
+{
+ unsigned int insn_len = 2;
+
+ /* Load a halfword onto the instruction register. */
+ dc->ir = cpu_lduw_code(env, dc->pc);
+
+ /* Now decode it. */
+ dc->opcode = EXTRACT_FIELD(dc->ir, 6, 9);
+ dc->mode = EXTRACT_FIELD(dc->ir, 10, 11);
+ dc->src = EXTRACT_FIELD(dc->ir, 0, 3);
+ dc->size = EXTRACT_FIELD(dc->ir, 4, 5);
+ dc->cond = dc->dst = EXTRACT_FIELD(dc->ir, 12, 15);
+ dc->postinc = EXTRACT_FIELD(dc->ir, 10, 10);
+
+ dc->clear_prefix = 1;
+
+ /* FIXME: What if this insn insn't 2 in length?? */
+ if (dc->src == 15 || dc->dst == 15)
+ tcg_gen_movi_tl(cpu_R[15], dc->pc + 2);
+
+ switch (dc->mode) {
+ case CRISV10_MODE_QIMMEDIATE:
+ insn_len = dec10_quick_imm(dc);
+ break;
+ case CRISV10_MODE_REG:
+ insn_len = dec10_reg(dc);
+ break;
+ case CRISV10_MODE_AUTOINC:
+ case CRISV10_MODE_INDIRECT:
+ insn_len = dec10_ind(env, dc);
+ break;
+ }
+
+ if (dc->clear_prefix && dc->tb_flags & PFIX_FLAG) {
+ dc->tb_flags &= ~PFIX_FLAG;
+ tcg_gen_andi_tl(cpu_PR[PR_CCS], cpu_PR[PR_CCS], ~PFIX_FLAG);
+ if (dc->tb_flags != dc->tb->flags) {
+ dc->cpustate_changed = 1;
+ }
+ }
+
+ /* CRISv10 locks out interrupts on dslots. */
+ if (dc->delayed_branch == 2) {
+ cris_lock_irq(dc);
+ }
+ return insn_len;
+}
+
+void cris_initialize_crisv10_tcg(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cc_x = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_x), "cc_x");
+ cc_src = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_src), "cc_src");
+ cc_dest = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_dest),
+ "cc_dest");
+ cc_result = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_result),
+ "cc_result");
+ cc_op = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_op), "cc_op");
+ cc_size = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_size),
+ "cc_size");
+ cc_mask = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, cc_mask),
+ "cc_mask");
+
+ env_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pc),
+ "pc");
+ env_btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btarget),
+ "btarget");
+ env_btaken = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, btaken),
+ "btaken");
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, regs[i]),
+ regnames_v10[i]);
+ }
+ for (i = 0; i < 16; i++) {
+ cpu_PR[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUCRISState, pregs[i]),
+ pregnames_v10[i]);
+ }
+}
diff --git a/target/i386/Makefile.objs b/target/i386/Makefile.objs
new file mode 100644
index 0000000000..b223d7932b
--- /dev/null
+++ b/target/i386/Makefile.objs
@@ -0,0 +1,7 @@
+obj-y += translate.o helper.o cpu.o bpt_helper.o
+obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
+obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o mpx_helper.o
+obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
+obj-$(CONFIG_KVM) += kvm.o hyperv.o
+obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
diff --git a/target/i386/TODO b/target/i386/TODO
new file mode 100644
index 0000000000..a8d69cf87f
--- /dev/null
+++ b/target/i386/TODO
@@ -0,0 +1,31 @@
+Correctness issues:
+
+- some eflags manipulation incorrectly reset the bit 0x2.
+- SVM: test, cpu save/restore, SMM save/restore.
+- x86_64: lcall/ljmp intel/amd differences ?
+- better code fetch (different exception handling + CS.limit support)
+- user/kernel PUSHL/POPL in helper.c
+- add missing cpuid tests
+- return UD exception if LOCK prefix incorrectly used
+- test ldt limit < 7 ?
+- fix some 16 bit sp push/pop overflow (pusha/popa, lcall lret)
+- full support of segment limit/rights
+- full x87 exception support
+- improve x87 bit exactness (use bochs code ?)
+- DRx register support
+- CR0.AC emulation
+- SSE alignment checks
+
+Optimizations/Features:
+
+- add SVM nested paging support
+- add VMX support
+- add AVX support
+- add SSE5 support
+- fxsave/fxrstor AMD extensions
+- improve monitor/mwait support
+- faster EFLAGS update: consider SZAP, C, O can be updated separately
+ with a bit field in CC_OP and more state variables.
+- evaluate x87 stack pointer statically
+- find a way to avoid translating several time the same TB if CR0.TS
+ is set or not.
diff --git a/target/i386/arch_dump.c b/target/i386/arch_dump.c
new file mode 100644
index 0000000000..5a2e4be5d0
--- /dev/null
+++ b/target/i386/arch_dump.c
@@ -0,0 +1,453 @@
+/*
+ * i386 memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "sysemu/dump.h"
+#include "elf.h"
+#include "sysemu/memory_mapping.h"
+
+#ifdef TARGET_X86_64
+typedef struct {
+ target_ulong r15, r14, r13, r12, rbp, rbx, r11, r10;
+ target_ulong r9, r8, rax, rcx, rdx, rsi, rdi, orig_rax;
+ target_ulong rip, cs, eflags;
+ target_ulong rsp, ss;
+ target_ulong fs_base, gs_base;
+ target_ulong ds, es, fs, gs;
+} x86_64_user_regs_struct;
+
+typedef struct {
+ char pad1[32];
+ uint32_t pid;
+ char pad2[76];
+ x86_64_user_regs_struct regs;
+ char pad3[8];
+} x86_64_elf_prstatus;
+
+static int x86_64_write_elf64_note(WriteCoreDumpFunction f,
+ CPUX86State *env, int id,
+ void *opaque)
+{
+ x86_64_user_regs_struct regs;
+ Elf64_Nhdr *note;
+ char *buf;
+ int descsz, note_size, name_size = 5;
+ const char *name = "CORE";
+ int ret;
+
+ regs.r15 = env->regs[15];
+ regs.r14 = env->regs[14];
+ regs.r13 = env->regs[13];
+ regs.r12 = env->regs[12];
+ regs.r11 = env->regs[11];
+ regs.r10 = env->regs[10];
+ regs.r9 = env->regs[9];
+ regs.r8 = env->regs[8];
+ regs.rbp = env->regs[R_EBP];
+ regs.rsp = env->regs[R_ESP];
+ regs.rdi = env->regs[R_EDI];
+ regs.rsi = env->regs[R_ESI];
+ regs.rdx = env->regs[R_EDX];
+ regs.rcx = env->regs[R_ECX];
+ regs.rbx = env->regs[R_EBX];
+ regs.rax = env->regs[R_EAX];
+ regs.rip = env->eip;
+ regs.eflags = env->eflags;
+
+ regs.orig_rax = 0; /* FIXME */
+ regs.cs = env->segs[R_CS].selector;
+ regs.ss = env->segs[R_SS].selector;
+ regs.fs_base = env->segs[R_FS].base;
+ regs.gs_base = env->segs[R_GS].base;
+ regs.ds = env->segs[R_DS].selector;
+ regs.es = env->segs[R_ES].selector;
+ regs.fs = env->segs[R_FS].selector;
+ regs.gs = env->segs[R_GS].selector;
+
+ descsz = sizeof(x86_64_elf_prstatus);
+ note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
+ (descsz + 3) / 4) * 4;
+ note = g_malloc0(note_size);
+ note->n_namesz = cpu_to_le32(name_size);
+ note->n_descsz = cpu_to_le32(descsz);
+ note->n_type = cpu_to_le32(NT_PRSTATUS);
+ buf = (char *)note;
+ buf += ((sizeof(Elf64_Nhdr) + 3) / 4) * 4;
+ memcpy(buf, name, name_size);
+ buf += ((name_size + 3) / 4) * 4;
+ memcpy(buf + 32, &id, 4); /* pr_pid */
+ buf += descsz - sizeof(x86_64_user_regs_struct)-sizeof(target_ulong);
+ memcpy(buf, &regs, sizeof(x86_64_user_regs_struct));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+#endif
+
+typedef struct {
+ uint32_t ebx, ecx, edx, esi, edi, ebp, eax;
+ unsigned short ds, __ds, es, __es;
+ unsigned short fs, __fs, gs, __gs;
+ uint32_t orig_eax, eip;
+ unsigned short cs, __cs;
+ uint32_t eflags, esp;
+ unsigned short ss, __ss;
+} x86_user_regs_struct;
+
+typedef struct {
+ char pad1[24];
+ uint32_t pid;
+ char pad2[44];
+ x86_user_regs_struct regs;
+ char pad3[4];
+} x86_elf_prstatus;
+
+static void x86_fill_elf_prstatus(x86_elf_prstatus *prstatus, CPUX86State *env,
+ int id)
+{
+ memset(prstatus, 0, sizeof(x86_elf_prstatus));
+ prstatus->regs.ebp = env->regs[R_EBP] & 0xffffffff;
+ prstatus->regs.esp = env->regs[R_ESP] & 0xffffffff;
+ prstatus->regs.edi = env->regs[R_EDI] & 0xffffffff;
+ prstatus->regs.esi = env->regs[R_ESI] & 0xffffffff;
+ prstatus->regs.edx = env->regs[R_EDX] & 0xffffffff;
+ prstatus->regs.ecx = env->regs[R_ECX] & 0xffffffff;
+ prstatus->regs.ebx = env->regs[R_EBX] & 0xffffffff;
+ prstatus->regs.eax = env->regs[R_EAX] & 0xffffffff;
+ prstatus->regs.eip = env->eip & 0xffffffff;
+ prstatus->regs.eflags = env->eflags & 0xffffffff;
+
+ prstatus->regs.cs = env->segs[R_CS].selector;
+ prstatus->regs.ss = env->segs[R_SS].selector;
+ prstatus->regs.ds = env->segs[R_DS].selector;
+ prstatus->regs.es = env->segs[R_ES].selector;
+ prstatus->regs.fs = env->segs[R_FS].selector;
+ prstatus->regs.gs = env->segs[R_GS].selector;
+
+ prstatus->pid = id;
+}
+
+static int x86_write_elf64_note(WriteCoreDumpFunction f, CPUX86State *env,
+ int id, void *opaque)
+{
+ x86_elf_prstatus prstatus;
+ Elf64_Nhdr *note;
+ char *buf;
+ int descsz, note_size, name_size = 5;
+ const char *name = "CORE";
+ int ret;
+
+ x86_fill_elf_prstatus(&prstatus, env, id);
+ descsz = sizeof(x86_elf_prstatus);
+ note_size = ((sizeof(Elf64_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
+ (descsz + 3) / 4) * 4;
+ note = g_malloc0(note_size);
+ note->n_namesz = cpu_to_le32(name_size);
+ note->n_descsz = cpu_to_le32(descsz);
+ note->n_type = cpu_to_le32(NT_PRSTATUS);
+ buf = (char *)note;
+ buf += ((sizeof(Elf64_Nhdr) + 3) / 4) * 4;
+ memcpy(buf, name, name_size);
+ buf += ((name_size + 3) / 4) * 4;
+ memcpy(buf, &prstatus, sizeof(prstatus));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ int ret;
+#ifdef TARGET_X86_64
+ X86CPU *first_x86_cpu = X86_CPU(first_cpu);
+ bool lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
+
+ if (lma) {
+ ret = x86_64_write_elf64_note(f, &cpu->env, cpuid, opaque);
+ } else {
+#endif
+ ret = x86_write_elf64_note(f, &cpu->env, cpuid, opaque);
+#ifdef TARGET_X86_64
+ }
+#endif
+
+ return ret;
+}
+
+int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ x86_elf_prstatus prstatus;
+ Elf32_Nhdr *note;
+ char *buf;
+ int descsz, note_size, name_size = 5;
+ const char *name = "CORE";
+ int ret;
+
+ x86_fill_elf_prstatus(&prstatus, &cpu->env, cpuid);
+ descsz = sizeof(x86_elf_prstatus);
+ note_size = ((sizeof(Elf32_Nhdr) + 3) / 4 + (name_size + 3) / 4 +
+ (descsz + 3) / 4) * 4;
+ note = g_malloc0(note_size);
+ note->n_namesz = cpu_to_le32(name_size);
+ note->n_descsz = cpu_to_le32(descsz);
+ note->n_type = cpu_to_le32(NT_PRSTATUS);
+ buf = (char *)note;
+ buf += ((sizeof(Elf32_Nhdr) + 3) / 4) * 4;
+ memcpy(buf, name, name_size);
+ buf += ((name_size + 3) / 4) * 4;
+ memcpy(buf, &prstatus, sizeof(prstatus));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * please count up QEMUCPUSTATE_VERSION if you have changed definition of
+ * QEMUCPUState, and modify the tools using this information accordingly.
+ */
+#define QEMUCPUSTATE_VERSION (1)
+
+struct QEMUCPUSegment {
+ uint32_t selector;
+ uint32_t limit;
+ uint32_t flags;
+ uint32_t pad;
+ uint64_t base;
+};
+
+typedef struct QEMUCPUSegment QEMUCPUSegment;
+
+struct QEMUCPUState {
+ uint32_t version;
+ uint32_t size;
+ uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp;
+ uint64_t r8, r9, r10, r11, r12, r13, r14, r15;
+ uint64_t rip, rflags;
+ QEMUCPUSegment cs, ds, es, fs, gs, ss;
+ QEMUCPUSegment ldt, tr, gdt, idt;
+ uint64_t cr[5];
+};
+
+typedef struct QEMUCPUState QEMUCPUState;
+
+static void copy_segment(QEMUCPUSegment *d, SegmentCache *s)
+{
+ d->pad = 0;
+ d->selector = s->selector;
+ d->limit = s->limit;
+ d->flags = s->flags;
+ d->base = s->base;
+}
+
+static void qemu_get_cpustate(QEMUCPUState *s, CPUX86State *env)
+{
+ memset(s, 0, sizeof(QEMUCPUState));
+
+ s->version = QEMUCPUSTATE_VERSION;
+ s->size = sizeof(QEMUCPUState);
+
+ s->rax = env->regs[R_EAX];
+ s->rbx = env->regs[R_EBX];
+ s->rcx = env->regs[R_ECX];
+ s->rdx = env->regs[R_EDX];
+ s->rsi = env->regs[R_ESI];
+ s->rdi = env->regs[R_EDI];
+ s->rsp = env->regs[R_ESP];
+ s->rbp = env->regs[R_EBP];
+#ifdef TARGET_X86_64
+ s->r8 = env->regs[8];
+ s->r9 = env->regs[9];
+ s->r10 = env->regs[10];
+ s->r11 = env->regs[11];
+ s->r12 = env->regs[12];
+ s->r13 = env->regs[13];
+ s->r14 = env->regs[14];
+ s->r15 = env->regs[15];
+#endif
+ s->rip = env->eip;
+ s->rflags = env->eflags;
+
+ copy_segment(&s->cs, &env->segs[R_CS]);
+ copy_segment(&s->ds, &env->segs[R_DS]);
+ copy_segment(&s->es, &env->segs[R_ES]);
+ copy_segment(&s->fs, &env->segs[R_FS]);
+ copy_segment(&s->gs, &env->segs[R_GS]);
+ copy_segment(&s->ss, &env->segs[R_SS]);
+ copy_segment(&s->ldt, &env->ldt);
+ copy_segment(&s->tr, &env->tr);
+ copy_segment(&s->gdt, &env->gdt);
+ copy_segment(&s->idt, &env->idt);
+
+ s->cr[0] = env->cr[0];
+ s->cr[1] = env->cr[1];
+ s->cr[2] = env->cr[2];
+ s->cr[3] = env->cr[3];
+ s->cr[4] = env->cr[4];
+}
+
+static inline int cpu_write_qemu_note(WriteCoreDumpFunction f,
+ CPUX86State *env,
+ void *opaque,
+ int type)
+{
+ QEMUCPUState state;
+ Elf64_Nhdr *note64;
+ Elf32_Nhdr *note32;
+ void *note;
+ char *buf;
+ int descsz, note_size, name_size = 5, note_head_size;
+ const char *name = "QEMU";
+ int ret;
+
+ qemu_get_cpustate(&state, env);
+
+ descsz = sizeof(state);
+ if (type == 0) {
+ note_head_size = sizeof(Elf32_Nhdr);
+ } else {
+ note_head_size = sizeof(Elf64_Nhdr);
+ }
+ note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
+ (descsz + 3) / 4) * 4;
+ note = g_malloc0(note_size);
+ if (type == 0) {
+ note32 = note;
+ note32->n_namesz = cpu_to_le32(name_size);
+ note32->n_descsz = cpu_to_le32(descsz);
+ note32->n_type = 0;
+ } else {
+ note64 = note;
+ note64->n_namesz = cpu_to_le32(name_size);
+ note64->n_descsz = cpu_to_le32(descsz);
+ note64->n_type = 0;
+ }
+ buf = note;
+ buf += ((note_head_size + 3) / 4) * 4;
+ memcpy(buf, name, name_size);
+ buf += ((name_size + 3) / 4) * 4;
+ memcpy(buf, &state, sizeof(state));
+
+ ret = f(note, note_size, opaque);
+ g_free(note);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cs,
+ void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu_write_qemu_note(f, &cpu->env, opaque, 1);
+}
+
+int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cs,
+ void *opaque)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu_write_qemu_note(f, &cpu->env, opaque, 0);
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const GuestPhysBlockList *guest_phys_blocks)
+{
+ bool lma = false;
+ GuestPhysBlock *block;
+
+#ifdef TARGET_X86_64
+ X86CPU *first_x86_cpu = X86_CPU(first_cpu);
+
+ lma = !!(first_x86_cpu->env.hflags & HF_LMA_MASK);
+#endif
+
+ if (lma) {
+ info->d_machine = EM_X86_64;
+ } else {
+ info->d_machine = EM_386;
+ }
+ info->d_endian = ELFDATA2LSB;
+
+ if (lma) {
+ info->d_class = ELFCLASS64;
+ } else {
+ info->d_class = ELFCLASS32;
+
+ QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+ if (block->target_end > UINT_MAX) {
+ /* The memory size is greater than 4G */
+ info->d_class = ELFCLASS64;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ int name_size = 5; /* "CORE" or "QEMU" */
+ size_t elf_note_size = 0;
+ size_t qemu_note_size = 0;
+ int elf_desc_size = 0;
+ int qemu_desc_size = 0;
+ int note_head_size;
+
+ if (class == ELFCLASS32) {
+ note_head_size = sizeof(Elf32_Nhdr);
+ } else {
+ note_head_size = sizeof(Elf64_Nhdr);
+ }
+
+ if (machine == EM_386) {
+ elf_desc_size = sizeof(x86_elf_prstatus);
+ }
+#ifdef TARGET_X86_64
+ else {
+ elf_desc_size = sizeof(x86_64_elf_prstatus);
+ }
+#endif
+ qemu_desc_size = sizeof(QEMUCPUState);
+
+ elf_note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
+ (elf_desc_size + 3) / 4) * 4;
+ qemu_note_size = ((note_head_size + 3) / 4 + (name_size + 3) / 4 +
+ (qemu_desc_size + 3) / 4) * 4;
+
+ return (elf_note_size + qemu_note_size) * nr_cpus;
+}
diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c
new file mode 100644
index 0000000000..88f341e1bb
--- /dev/null
+++ b/target/i386/arch_memory_mapping.c
@@ -0,0 +1,281 @@
+/*
+ * i386 memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "sysemu/memory_mapping.h"
+
+/* PAE Paging or IA-32e Paging */
+static void walk_pte(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pte_start_addr,
+ int32_t a20_mask, target_ulong start_line_addr)
+{
+ hwaddr pte_addr, start_paddr;
+ uint64_t pte;
+ target_ulong start_vaddr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pte_addr = (pte_start_addr + i * 8) & a20_mask;
+ pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pte & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+
+ start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 12);
+ }
+}
+
+/* 32-bit Paging */
+static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pte_start_addr, int32_t a20_mask,
+ target_ulong start_line_addr)
+{
+ hwaddr pte_addr, start_paddr;
+ uint32_t pte;
+ target_ulong start_vaddr;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ pte_addr = (pte_start_addr + i * 4) & a20_mask;
+ pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pte & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ start_paddr = pte & ~0xfff;
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+
+ start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 12);
+ }
+}
+
+/* PAE Paging or IA-32e Paging */
+#define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
+
+static void walk_pde(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pde_start_addr,
+ int32_t a20_mask, target_ulong start_line_addr)
+{
+ hwaddr pde_addr, pte_start_addr, start_paddr;
+ uint64_t pde;
+ target_ulong line_addr, start_vaddr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pde_addr = (pde_start_addr + i * 8) & a20_mask;
+ pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pde & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = start_line_addr | ((i & 0x1ff) << 21);
+ if (pde & PG_PSE_MASK) {
+ /* 2 MB page */
+ start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+ start_vaddr = line_addr;
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 21);
+ continue;
+ }
+
+ pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
+ walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
+ }
+}
+
+/* 32-bit Paging */
+static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pde_start_addr, int32_t a20_mask,
+ bool pse)
+{
+ hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
+ uint32_t pde;
+ target_ulong line_addr, start_vaddr;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ pde_addr = (pde_start_addr + i * 4) & a20_mask;
+ pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pde & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = (((unsigned int)i & 0x3ff) << 22);
+ if ((pde & PG_PSE_MASK) && pse) {
+ /*
+ * 4 MB page:
+ * bits 39:32 are bits 20:13 of the PDE
+ * bit3 31:22 are bits 31:22 of the PDE
+ */
+ high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
+ start_paddr = (pde & ~0x3fffff) | high_paddr;
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+ start_vaddr = line_addr;
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 22);
+ continue;
+ }
+
+ pte_start_addr = (pde & ~0xfff) & a20_mask;
+ walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
+ }
+}
+
+/* PAE Paging */
+static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pdpe_start_addr, int32_t a20_mask)
+{
+ hwaddr pdpe_addr, pde_start_addr;
+ uint64_t pdpe;
+ target_ulong line_addr;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
+ pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = (((unsigned int)i & 0x3) << 30);
+ pde_start_addr = (pdpe & ~0xfff) & a20_mask;
+ walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
+ }
+}
+
+#ifdef TARGET_X86_64
+/* IA-32e Paging */
+static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pdpe_start_addr, int32_t a20_mask,
+ target_ulong start_line_addr)
+{
+ hwaddr pdpe_addr, pde_start_addr, start_paddr;
+ uint64_t pdpe;
+ target_ulong line_addr, start_vaddr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
+ pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
+ if (pdpe & PG_PSE_MASK) {
+ /* 1 GB page */
+ start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
+ if (cpu_physical_memory_is_io(start_paddr)) {
+ /* I/O region */
+ continue;
+ }
+ start_vaddr = line_addr;
+ memory_mapping_list_add_merge_sorted(list, start_paddr,
+ start_vaddr, 1 << 30);
+ continue;
+ }
+
+ pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
+ walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
+ }
+}
+
+/* IA-32e Paging */
+static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
+ hwaddr pml4e_start_addr, int32_t a20_mask)
+{
+ hwaddr pml4e_addr, pdpe_start_addr;
+ uint64_t pml4e;
+ target_ulong line_addr;
+ int i;
+
+ for (i = 0; i < 512; i++) {
+ pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
+ pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
+ NULL);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ /* not present */
+ continue;
+ }
+
+ line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
+ pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
+ walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
+ }
+}
+#endif
+
+void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (!cpu_paging_enabled(cs)) {
+ /* paging is disabled */
+ return;
+ }
+
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ hwaddr pml4e_addr;
+
+ pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
+ walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask);
+ } else
+#endif
+ {
+ hwaddr pdpe_addr;
+
+ pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
+ walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
+ }
+ } else {
+ hwaddr pde_addr;
+ bool pse;
+
+ pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
+ pse = !!(env->cr[4] & CR4_PSE_MASK);
+ walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);
+ }
+}
+
diff --git a/target/i386/bpt_helper.c b/target/i386/bpt_helper.c
new file mode 100644
index 0000000000..6fd7fe04a0
--- /dev/null
+++ b/target/i386/bpt_helper.c
@@ -0,0 +1,328 @@
+/*
+ * i386 breakpoint helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+
+#ifndef CONFIG_USER_ONLY
+static inline bool hw_local_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 1;
+}
+
+static inline bool hw_global_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return (dr7 >> (index * 2)) & 2;
+
+}
+static inline bool hw_breakpoint_enabled(unsigned long dr7, int index)
+{
+ return hw_global_breakpoint_enabled(dr7, index) ||
+ hw_local_breakpoint_enabled(dr7, index);
+}
+
+static inline int hw_breakpoint_type(unsigned long dr7, int index)
+{
+ return (dr7 >> (DR7_TYPE_SHIFT + (index * 4))) & 3;
+}
+
+static inline int hw_breakpoint_len(unsigned long dr7, int index)
+{
+ int len = ((dr7 >> (DR7_LEN_SHIFT + (index * 4))) & 3);
+ return (len == 2) ? 8 : len + 1;
+}
+
+static int hw_breakpoint_insert(CPUX86State *env, int index)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ target_ulong dr7 = env->dr[7];
+ target_ulong drN = env->dr[index];
+ int err = 0;
+
+ switch (hw_breakpoint_type(dr7, index)) {
+ case DR7_TYPE_BP_INST:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_breakpoint_insert(cs, drN, BP_CPU,
+ &env->cpu_breakpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* Notice when we should enable calls to bpt_io. */
+ return hw_breakpoint_enabled(env->dr[7], index)
+ ? HF_IOBPT_MASK : 0;
+
+ case DR7_TYPE_DATA_WR:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_WRITE,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+
+ case DR7_TYPE_DATA_RW:
+ if (hw_breakpoint_enabled(dr7, index)) {
+ err = cpu_watchpoint_insert(cs, drN,
+ hw_breakpoint_len(dr7, index),
+ BP_CPU | BP_MEM_ACCESS,
+ &env->cpu_watchpoint[index]);
+ }
+ break;
+ }
+ if (err) {
+ env->cpu_breakpoint[index] = NULL;
+ }
+ return 0;
+}
+
+static void hw_breakpoint_remove(CPUX86State *env, int index)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ switch (hw_breakpoint_type(env->dr[7], index)) {
+ case DR7_TYPE_BP_INST:
+ if (env->cpu_breakpoint[index]) {
+ cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
+ env->cpu_breakpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_breakpoint[index]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
+ env->cpu_breakpoint[index] = NULL;
+ }
+ break;
+
+ case DR7_TYPE_IO_RW:
+ /* HF_IOBPT_MASK cleared elsewhere. */
+ break;
+ }
+}
+
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7)
+{
+ target_ulong old_dr7 = env->dr[7];
+ int iobpt = 0;
+ int i;
+
+ new_dr7 |= DR7_FIXED_1;
+
+ /* If nothing is changing except the global/local enable bits,
+ then we can make the change more efficient. */
+ if (((old_dr7 ^ new_dr7) & ~0xff) == 0) {
+ /* Fold the global and local enable bits together into the
+ global fields, then xor to show which registers have
+ changed collective enable state. */
+ int mod = ((old_dr7 | old_dr7 * 2) ^ (new_dr7 | new_dr7 * 2)) & 0xff;
+
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if ((mod & (2 << i * 2)) && !hw_breakpoint_enabled(new_dr7, i)) {
+ hw_breakpoint_remove(env, i);
+ }
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if (mod & (2 << i * 2) && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ } else if (hw_breakpoint_type(new_dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(new_dr7, i)) {
+ iobpt |= HF_IOBPT_MASK;
+ }
+ }
+ } else {
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ hw_breakpoint_remove(env, i);
+ }
+ env->dr[7] = new_dr7;
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ iobpt |= hw_breakpoint_insert(env, i);
+ }
+ }
+
+ env->hflags = (env->hflags & ~HF_IOBPT_MASK) | iobpt;
+}
+
+static bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
+{
+ target_ulong dr6;
+ int reg;
+ bool hit_enabled = false;
+
+ dr6 = env->dr[6] & ~0xf;
+ for (reg = 0; reg < DR7_MAX_BP; reg++) {
+ bool bp_match = false;
+ bool wp_match = false;
+
+ switch (hw_breakpoint_type(env->dr[7], reg)) {
+ case DR7_TYPE_BP_INST:
+ if (env->dr[reg] == env->eip) {
+ bp_match = true;
+ }
+ break;
+ case DR7_TYPE_DATA_WR:
+ case DR7_TYPE_DATA_RW:
+ if (env->cpu_watchpoint[reg] &&
+ env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
+ wp_match = true;
+ }
+ break;
+ case DR7_TYPE_IO_RW:
+ break;
+ }
+ if (bp_match || wp_match) {
+ dr6 |= 1 << reg;
+ if (hw_breakpoint_enabled(env->dr[7], reg)) {
+ hit_enabled = true;
+ }
+ }
+ }
+
+ if (hit_enabled || force_dr6_update) {
+ env->dr[6] = dr6;
+ }
+
+ return hit_enabled;
+}
+
+void breakpoint_handler(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ CPUBreakpoint *bp;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_hw_breakpoints(env, false)) {
+ raise_exception(env, EXCP01_DB);
+ } else {
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+ } else {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == env->eip) {
+ if (bp->flags & BP_CPU) {
+ check_hw_breakpoints(env, true);
+ raise_exception(env, EXCP01_DB);
+ }
+ break;
+ }
+ }
+ }
+}
+#endif
+
+void helper_single_step(CPUX86State *env)
+{
+#ifndef CONFIG_USER_ONLY
+ check_hw_breakpoints(env, true);
+ env->dr[6] |= DR6_BS;
+#endif
+ raise_exception(env, EXCP01_DB);
+}
+
+void helper_set_dr(CPUX86State *env, int reg, target_ulong t0)
+{
+#ifndef CONFIG_USER_ONLY
+ switch (reg) {
+ case 0: case 1: case 2: case 3:
+ if (hw_breakpoint_enabled(env->dr[7], reg)
+ && hw_breakpoint_type(env->dr[7], reg) != DR7_TYPE_IO_RW) {
+ hw_breakpoint_remove(env, reg);
+ env->dr[reg] = t0;
+ hw_breakpoint_insert(env, reg);
+ } else {
+ env->dr[reg] = t0;
+ }
+ return;
+ case 4:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ }
+ /* fallthru */
+ case 6:
+ env->dr[6] = t0 | DR6_FIXED_1;
+ return;
+ case 5:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ }
+ /* fallthru */
+ case 7:
+ cpu_x86_update_dr7(env, t0);
+ return;
+ }
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+#endif
+}
+
+target_ulong helper_get_dr(CPUX86State *env, int reg)
+{
+ switch (reg) {
+ case 0: case 1: case 2: case 3: case 6: case 7:
+ return env->dr[reg];
+ case 4:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ } else {
+ return env->dr[6];
+ }
+ case 5:
+ if (env->cr[4] & CR4_DE_MASK) {
+ break;
+ } else {
+ return env->dr[7];
+ }
+ }
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+}
+
+/* Check if Port I/O is trapped by a breakpoint. */
+void helper_bpt_io(CPUX86State *env, uint32_t port,
+ uint32_t size, target_ulong next_eip)
+{
+#ifndef CONFIG_USER_ONLY
+ target_ulong dr7 = env->dr[7];
+ int i, hit = 0;
+
+ for (i = 0; i < DR7_MAX_BP; ++i) {
+ if (hw_breakpoint_type(dr7, i) == DR7_TYPE_IO_RW
+ && hw_breakpoint_enabled(dr7, i)) {
+ int bpt_len = hw_breakpoint_len(dr7, i);
+ if (port + size - 1 >= env->dr[i]
+ && port <= env->dr[i] + bpt_len - 1) {
+ hit |= 1 << i;
+ }
+ }
+ }
+
+ if (hit) {
+ env->dr[6] = (env->dr[6] & ~0xf) | hit;
+ env->eip = next_eip;
+ raise_exception(env, EXCP01_DB);
+ }
+#endif
+}
diff --git a/target/i386/cc_helper.c b/target/i386/cc_helper.c
new file mode 100644
index 0000000000..83af223c9f
--- /dev/null
+++ b/target/i386/cc_helper.c
@@ -0,0 +1,385 @@
+/*
+ * x86 condition code helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+const uint8_t parity_table[256] = {
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+};
+
+#define SHIFT 0
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+
+#define SHIFT 3
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#endif
+
+static target_ulong compute_all_adcx(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ return (src1 & ~CC_C) | (dst * CC_C);
+}
+
+static target_ulong compute_all_adox(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ return (src1 & ~CC_O) | (src2 * CC_O);
+}
+
+static target_ulong compute_all_adcox(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ return (src1 & ~(CC_C | CC_O)) | (dst * CC_C) | (src2 * CC_O);
+}
+
+target_ulong helper_cc_compute_all(target_ulong dst, target_ulong src1,
+ target_ulong src2, int op)
+{
+ switch (op) {
+ default: /* should never happen */
+ return 0;
+
+ case CC_OP_EFLAGS:
+ return src1;
+ case CC_OP_CLR:
+ return CC_Z | CC_P;
+
+ case CC_OP_MULB:
+ return compute_all_mulb(dst, src1);
+ case CC_OP_MULW:
+ return compute_all_mulw(dst, src1);
+ case CC_OP_MULL:
+ return compute_all_mull(dst, src1);
+
+ case CC_OP_ADDB:
+ return compute_all_addb(dst, src1);
+ case CC_OP_ADDW:
+ return compute_all_addw(dst, src1);
+ case CC_OP_ADDL:
+ return compute_all_addl(dst, src1);
+
+ case CC_OP_ADCB:
+ return compute_all_adcb(dst, src1, src2);
+ case CC_OP_ADCW:
+ return compute_all_adcw(dst, src1, src2);
+ case CC_OP_ADCL:
+ return compute_all_adcl(dst, src1, src2);
+
+ case CC_OP_SUBB:
+ return compute_all_subb(dst, src1);
+ case CC_OP_SUBW:
+ return compute_all_subw(dst, src1);
+ case CC_OP_SUBL:
+ return compute_all_subl(dst, src1);
+
+ case CC_OP_SBBB:
+ return compute_all_sbbb(dst, src1, src2);
+ case CC_OP_SBBW:
+ return compute_all_sbbw(dst, src1, src2);
+ case CC_OP_SBBL:
+ return compute_all_sbbl(dst, src1, src2);
+
+ case CC_OP_LOGICB:
+ return compute_all_logicb(dst, src1);
+ case CC_OP_LOGICW:
+ return compute_all_logicw(dst, src1);
+ case CC_OP_LOGICL:
+ return compute_all_logicl(dst, src1);
+
+ case CC_OP_INCB:
+ return compute_all_incb(dst, src1);
+ case CC_OP_INCW:
+ return compute_all_incw(dst, src1);
+ case CC_OP_INCL:
+ return compute_all_incl(dst, src1);
+
+ case CC_OP_DECB:
+ return compute_all_decb(dst, src1);
+ case CC_OP_DECW:
+ return compute_all_decw(dst, src1);
+ case CC_OP_DECL:
+ return compute_all_decl(dst, src1);
+
+ case CC_OP_SHLB:
+ return compute_all_shlb(dst, src1);
+ case CC_OP_SHLW:
+ return compute_all_shlw(dst, src1);
+ case CC_OP_SHLL:
+ return compute_all_shll(dst, src1);
+
+ case CC_OP_SARB:
+ return compute_all_sarb(dst, src1);
+ case CC_OP_SARW:
+ return compute_all_sarw(dst, src1);
+ case CC_OP_SARL:
+ return compute_all_sarl(dst, src1);
+
+ case CC_OP_BMILGB:
+ return compute_all_bmilgb(dst, src1);
+ case CC_OP_BMILGW:
+ return compute_all_bmilgw(dst, src1);
+ case CC_OP_BMILGL:
+ return compute_all_bmilgl(dst, src1);
+
+ case CC_OP_ADCX:
+ return compute_all_adcx(dst, src1, src2);
+ case CC_OP_ADOX:
+ return compute_all_adox(dst, src1, src2);
+ case CC_OP_ADCOX:
+ return compute_all_adcox(dst, src1, src2);
+
+#ifdef TARGET_X86_64
+ case CC_OP_MULQ:
+ return compute_all_mulq(dst, src1);
+ case CC_OP_ADDQ:
+ return compute_all_addq(dst, src1);
+ case CC_OP_ADCQ:
+ return compute_all_adcq(dst, src1, src2);
+ case CC_OP_SUBQ:
+ return compute_all_subq(dst, src1);
+ case CC_OP_SBBQ:
+ return compute_all_sbbq(dst, src1, src2);
+ case CC_OP_LOGICQ:
+ return compute_all_logicq(dst, src1);
+ case CC_OP_INCQ:
+ return compute_all_incq(dst, src1);
+ case CC_OP_DECQ:
+ return compute_all_decq(dst, src1);
+ case CC_OP_SHLQ:
+ return compute_all_shlq(dst, src1);
+ case CC_OP_SARQ:
+ return compute_all_sarq(dst, src1);
+ case CC_OP_BMILGQ:
+ return compute_all_bmilgq(dst, src1);
+#endif
+ }
+}
+
+uint32_t cpu_cc_compute_all(CPUX86State *env, int op)
+{
+ return helper_cc_compute_all(CC_DST, CC_SRC, CC_SRC2, op);
+}
+
+target_ulong helper_cc_compute_c(target_ulong dst, target_ulong src1,
+ target_ulong src2, int op)
+{
+ switch (op) {
+ default: /* should never happen */
+ case CC_OP_LOGICB:
+ case CC_OP_LOGICW:
+ case CC_OP_LOGICL:
+ case CC_OP_LOGICQ:
+ case CC_OP_CLR:
+ return 0;
+
+ case CC_OP_EFLAGS:
+ case CC_OP_SARB:
+ case CC_OP_SARW:
+ case CC_OP_SARL:
+ case CC_OP_SARQ:
+ case CC_OP_ADOX:
+ return src1 & 1;
+
+ case CC_OP_INCB:
+ case CC_OP_INCW:
+ case CC_OP_INCL:
+ case CC_OP_INCQ:
+ case CC_OP_DECB:
+ case CC_OP_DECW:
+ case CC_OP_DECL:
+ case CC_OP_DECQ:
+ return src1;
+
+ case CC_OP_MULB:
+ case CC_OP_MULW:
+ case CC_OP_MULL:
+ case CC_OP_MULQ:
+ return src1 != 0;
+
+ case CC_OP_ADCX:
+ case CC_OP_ADCOX:
+ return dst;
+
+ case CC_OP_ADDB:
+ return compute_c_addb(dst, src1);
+ case CC_OP_ADDW:
+ return compute_c_addw(dst, src1);
+ case CC_OP_ADDL:
+ return compute_c_addl(dst, src1);
+
+ case CC_OP_ADCB:
+ return compute_c_adcb(dst, src1, src2);
+ case CC_OP_ADCW:
+ return compute_c_adcw(dst, src1, src2);
+ case CC_OP_ADCL:
+ return compute_c_adcl(dst, src1, src2);
+
+ case CC_OP_SUBB:
+ return compute_c_subb(dst, src1);
+ case CC_OP_SUBW:
+ return compute_c_subw(dst, src1);
+ case CC_OP_SUBL:
+ return compute_c_subl(dst, src1);
+
+ case CC_OP_SBBB:
+ return compute_c_sbbb(dst, src1, src2);
+ case CC_OP_SBBW:
+ return compute_c_sbbw(dst, src1, src2);
+ case CC_OP_SBBL:
+ return compute_c_sbbl(dst, src1, src2);
+
+ case CC_OP_SHLB:
+ return compute_c_shlb(dst, src1);
+ case CC_OP_SHLW:
+ return compute_c_shlw(dst, src1);
+ case CC_OP_SHLL:
+ return compute_c_shll(dst, src1);
+
+ case CC_OP_BMILGB:
+ return compute_c_bmilgb(dst, src1);
+ case CC_OP_BMILGW:
+ return compute_c_bmilgw(dst, src1);
+ case CC_OP_BMILGL:
+ return compute_c_bmilgl(dst, src1);
+
+#ifdef TARGET_X86_64
+ case CC_OP_ADDQ:
+ return compute_c_addq(dst, src1);
+ case CC_OP_ADCQ:
+ return compute_c_adcq(dst, src1, src2);
+ case CC_OP_SUBQ:
+ return compute_c_subq(dst, src1);
+ case CC_OP_SBBQ:
+ return compute_c_sbbq(dst, src1, src2);
+ case CC_OP_SHLQ:
+ return compute_c_shlq(dst, src1);
+ case CC_OP_BMILGQ:
+ return compute_c_bmilgq(dst, src1);
+#endif
+ }
+}
+
+void helper_write_eflags(CPUX86State *env, target_ulong t0,
+ uint32_t update_mask)
+{
+ cpu_load_eflags(env, t0, update_mask);
+}
+
+target_ulong helper_read_eflags(CPUX86State *env)
+{
+ uint32_t eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ eflags |= (env->df & DF_MASK);
+ eflags |= env->eflags & ~(VM_MASK | RF_MASK);
+ return eflags;
+}
+
+void helper_clts(CPUX86State *env)
+{
+ env->cr[0] &= ~CR0_TS_MASK;
+ env->hflags &= ~HF_TS_MASK;
+}
+
+void helper_reset_rf(CPUX86State *env)
+{
+ env->eflags &= ~RF_MASK;
+}
+
+void helper_cli(CPUX86State *env)
+{
+ env->eflags &= ~IF_MASK;
+}
+
+void helper_sti(CPUX86State *env)
+{
+ env->eflags |= IF_MASK;
+}
+
+void helper_clac(CPUX86State *env)
+{
+ env->eflags &= ~AC_MASK;
+}
+
+void helper_stac(CPUX86State *env)
+{
+ env->eflags |= AC_MASK;
+}
+
+#if 0
+/* vm86plus instructions */
+void helper_cli_vm(CPUX86State *env)
+{
+ env->eflags &= ~VIF_MASK;
+}
+
+void helper_sti_vm(CPUX86State *env)
+{
+ env->eflags |= VIF_MASK;
+ if (env->eflags & VIP_MASK) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+}
+#endif
diff --git a/target/i386/cc_helper_template.h b/target/i386/cc_helper_template.h
new file mode 100644
index 0000000000..607311f195
--- /dev/null
+++ b/target/i386/cc_helper_template.h
@@ -0,0 +1,242 @@
+/*
+ * x86 condition code helpers
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_TYPE uint8_t
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_TYPE uint16_t
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_TYPE uint64_t
+#else
+#error unhandled operand size
+#endif
+
+#define SIGN_MASK (((DATA_TYPE)1) << (DATA_BITS - 1))
+
+/* dynamic flags computation */
+
+static int glue(compute_all_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2 = dst - src1;
+
+ cf = dst < src1;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_add, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return dst < src1;
+}
+
+static int glue(compute_all_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+ DATA_TYPE src3)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2 = dst - src1 - src3;
+
+ cf = (src3 ? dst <= src1 : dst < src1);
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & 0x10;
+ zf = (dst == 0) << 6;
+ sf = lshift(dst, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2 ^ -1) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_adc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1,
+ DATA_TYPE src3)
+{
+ return src3 ? dst <= src1 : dst < src1;
+}
+
+static int glue(compute_all_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src1 = dst + src2;
+
+ cf = src1 < src2;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sub, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2)
+{
+ DATA_TYPE src1 = dst + src2;
+
+ return src1 < src2;
+}
+
+static int glue(compute_all_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+ DATA_TYPE src3)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src1 = dst + src2 + src3;
+
+ cf = (src3 ? src1 <= src2 : src1 < src2);
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & 0x10;
+ zf = (dst == 0) << 6;
+ sf = lshift(dst, 8 - DATA_BITS) & 0x80;
+ of = lshift((src1 ^ src2) & (src1 ^ dst), 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_sbb, SUFFIX)(DATA_TYPE dst, DATA_TYPE src2,
+ DATA_TYPE src3)
+{
+ DATA_TYPE src1 = dst + src2 + src3;
+
+ return (src3 ? src1 <= src2 : src1 < src2);
+}
+
+static int glue(compute_all_logic, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = 0;
+ pf = parity_table[(uint8_t)dst];
+ af = 0;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = 0;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_inc, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2;
+
+ cf = src1;
+ src1 = dst - 1;
+ src2 = 1;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = (dst == SIGN_MASK) * CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_dec, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+ DATA_TYPE src2;
+
+ cf = src1;
+ src1 = dst + 1;
+ src2 = 1;
+ pf = parity_table[(uint8_t)dst];
+ af = (dst ^ src1 ^ src2) & CC_A;
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = (dst == SIGN_MASK - 1) * CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = (src1 >> (DATA_BITS - 1)) & CC_C;
+ pf = parity_table[(uint8_t)dst];
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ /* of is defined iff shift count == 1 */
+ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_shl, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return (src1 >> (DATA_BITS - 1)) & CC_C;
+}
+
+static int glue(compute_all_sar, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = src1 & 1;
+ pf = parity_table[(uint8_t)dst];
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ /* of is defined iff shift count == 1 */
+ of = lshift(src1 ^ dst, 12 - DATA_BITS) & CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+/* NOTE: we compute the flags like the P4. On olders CPUs, only OF and
+ CF are modified and it is slower to do that. Note as well that we
+ don't truncate SRC1 for computing carry to DATA_TYPE. */
+static int glue(compute_all_mul, SUFFIX)(DATA_TYPE dst, target_long src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = (src1 != 0);
+ pf = parity_table[(uint8_t)dst];
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = cf * CC_O;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_all_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ int cf, pf, af, zf, sf, of;
+
+ cf = (src1 == 0);
+ pf = 0; /* undefined */
+ af = 0; /* undefined */
+ zf = (dst == 0) * CC_Z;
+ sf = lshift(dst, 8 - DATA_BITS) & CC_S;
+ of = 0;
+ return cf | pf | af | zf | sf | of;
+}
+
+static int glue(compute_c_bmilg, SUFFIX)(DATA_TYPE dst, DATA_TYPE src1)
+{
+ return src1 == 0;
+}
+
+#undef DATA_BITS
+#undef SIGN_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target/i386/cpu-qom.h b/target/i386/cpu-qom.h
new file mode 100644
index 0000000000..7c9a07ae65
--- /dev/null
+++ b/target/i386/cpu-qom.h
@@ -0,0 +1,77 @@
+/*
+ * QEMU x86 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_I386_CPU_QOM_H
+#define QEMU_I386_CPU_QOM_H
+
+#include "qom/cpu.h"
+#include "qemu/notify.h"
+
+#ifdef TARGET_X86_64
+#define TYPE_X86_CPU "x86_64-cpu"
+#else
+#define TYPE_X86_CPU "i386-cpu"
+#endif
+
+#define X86_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(X86CPUClass, (klass), TYPE_X86_CPU)
+#define X86_CPU(obj) \
+ OBJECT_CHECK(X86CPU, (obj), TYPE_X86_CPU)
+#define X86_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(X86CPUClass, (obj), TYPE_X86_CPU)
+
+/**
+ * X86CPUDefinition:
+ *
+ * CPU model definition data that was not converted to QOM per-subclass
+ * property defaults yet.
+ */
+typedef struct X86CPUDefinition X86CPUDefinition;
+
+/**
+ * X86CPUClass:
+ * @cpu_def: CPU model definition
+ * @kvm_required: Whether CPU model requires KVM to be enabled.
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * An x86 CPU model or family.
+ */
+typedef struct X86CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ /* Should be eventually replaced by subclass-specific property defaults. */
+ X86CPUDefinition *cpu_def;
+
+ bool kvm_required;
+
+ /* Optional description of CPU model.
+ * If unavailable, cpu_def->model_id is used */
+ const char *model_description;
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ void (*parent_reset)(CPUState *cpu);
+} X86CPUClass;
+
+typedef struct X86CPU X86CPU;
+
+#endif
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
new file mode 100644
index 0000000000..de1f30eeda
--- /dev/null
+++ b/target/i386/cpu.c
@@ -0,0 +1,3755 @@
+/*
+ * i386 CPUID helper functions
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu/cutils.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "sysemu/cpus.h"
+#include "kvm_i386.h"
+
+#include "qemu/error-report.h"
+#include "qemu/option.h"
+#include "qemu/config-file.h"
+#include "qapi/qmp/qerror.h"
+
+#include "qapi-types.h"
+#include "qapi-visit.h"
+#include "qapi/visitor.h"
+#include "sysemu/arch_init.h"
+
+#if defined(CONFIG_KVM)
+#include <linux/kvm_para.h>
+#endif
+
+#include "sysemu/sysemu.h"
+#include "hw/qdev-properties.h"
+#include "hw/i386/topology.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/address-spaces.h"
+#include "hw/hw.h"
+#include "hw/xen/xen.h"
+#include "hw/i386/apic_internal.h"
+#endif
+
+
+/* Cache topology CPUID constants: */
+
+/* CPUID Leaf 2 Descriptors */
+
+#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
+#define CPUID_2_L1I_32KB_8WAY_64B 0x30
+#define CPUID_2_L2_2MB_8WAY_64B 0x7d
+#define CPUID_2_L3_16MB_16WAY_64B 0x4d
+
+
+/* CPUID Leaf 4 constants: */
+
+/* EAX: */
+#define CPUID_4_TYPE_DCACHE 1
+#define CPUID_4_TYPE_ICACHE 2
+#define CPUID_4_TYPE_UNIFIED 3
+
+#define CPUID_4_LEVEL(l) ((l) << 5)
+
+#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
+#define CPUID_4_FULLY_ASSOC (1 << 9)
+
+/* EDX: */
+#define CPUID_4_NO_INVD_SHARING (1 << 0)
+#define CPUID_4_INCLUSIVE (1 << 1)
+#define CPUID_4_COMPLEX_IDX (1 << 2)
+
+#define ASSOC_FULL 0xFF
+
+/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
+#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
+ a == 2 ? 0x2 : \
+ a == 4 ? 0x4 : \
+ a == 8 ? 0x6 : \
+ a == 16 ? 0x8 : \
+ a == 32 ? 0xA : \
+ a == 48 ? 0xB : \
+ a == 64 ? 0xC : \
+ a == 96 ? 0xD : \
+ a == 128 ? 0xE : \
+ a == ASSOC_FULL ? 0xF : \
+ 0 /* invalid value */)
+
+
+/* Definitions of the hardcoded cache entries we expose: */
+
+/* L1 data cache: */
+#define L1D_LINE_SIZE 64
+#define L1D_ASSOCIATIVITY 8
+#define L1D_SETS 64
+#define L1D_PARTITIONS 1
+/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
+#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
+/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
+#define L1D_LINES_PER_TAG 1
+#define L1D_SIZE_KB_AMD 64
+#define L1D_ASSOCIATIVITY_AMD 2
+
+/* L1 instruction cache: */
+#define L1I_LINE_SIZE 64
+#define L1I_ASSOCIATIVITY 8
+#define L1I_SETS 64
+#define L1I_PARTITIONS 1
+/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
+#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
+/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
+#define L1I_LINES_PER_TAG 1
+#define L1I_SIZE_KB_AMD 64
+#define L1I_ASSOCIATIVITY_AMD 2
+
+/* Level 2 unified cache: */
+#define L2_LINE_SIZE 64
+#define L2_ASSOCIATIVITY 16
+#define L2_SETS 4096
+#define L2_PARTITIONS 1
+/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
+/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
+#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
+/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
+#define L2_LINES_PER_TAG 1
+#define L2_SIZE_KB_AMD 512
+
+/* Level 3 unified cache: */
+#define L3_SIZE_KB 0 /* disabled */
+#define L3_ASSOCIATIVITY 0 /* disabled */
+#define L3_LINES_PER_TAG 0 /* disabled */
+#define L3_LINE_SIZE 0 /* disabled */
+#define L3_N_LINE_SIZE 64
+#define L3_N_ASSOCIATIVITY 16
+#define L3_N_SETS 16384
+#define L3_N_PARTITIONS 1
+#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
+#define L3_N_LINES_PER_TAG 1
+#define L3_N_SIZE_KB_AMD 16384
+
+/* TLB definitions: */
+
+#define L1_DTLB_2M_ASSOC 1
+#define L1_DTLB_2M_ENTRIES 255
+#define L1_DTLB_4K_ASSOC 1
+#define L1_DTLB_4K_ENTRIES 255
+
+#define L1_ITLB_2M_ASSOC 1
+#define L1_ITLB_2M_ENTRIES 255
+#define L1_ITLB_4K_ASSOC 1
+#define L1_ITLB_4K_ENTRIES 255
+
+#define L2_DTLB_2M_ASSOC 0 /* disabled */
+#define L2_DTLB_2M_ENTRIES 0 /* disabled */
+#define L2_DTLB_4K_ASSOC 4
+#define L2_DTLB_4K_ENTRIES 512
+
+#define L2_ITLB_2M_ASSOC 0 /* disabled */
+#define L2_ITLB_2M_ENTRIES 0 /* disabled */
+#define L2_ITLB_4K_ASSOC 4
+#define L2_ITLB_4K_ENTRIES 512
+
+
+
+static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
+ uint32_t vendor2, uint32_t vendor3)
+{
+ int i;
+ for (i = 0; i < 4; i++) {
+ dst[i] = vendor1 >> (8 * i);
+ dst[i + 4] = vendor2 >> (8 * i);
+ dst[i + 8] = vendor3 >> (8 * i);
+ }
+ dst[CPUID_VENDOR_SZ] = '\0';
+}
+
+#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
+#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
+ CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
+#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
+ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
+ CPUID_PSE36 | CPUID_FXSR)
+#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
+#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
+ CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
+ CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
+ CPUID_PAE | CPUID_SEP | CPUID_APIC)
+
+#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
+ CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
+ CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
+ CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
+ CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
+ /* partly implemented:
+ CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
+ /* missing:
+ CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
+#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
+ CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
+ CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
+ CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
+ CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
+ /* missing:
+ CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
+ CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
+ CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
+ CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
+ CPUID_EXT_F16C, CPUID_EXT_RDRAND */
+
+#ifdef TARGET_X86_64
+#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
+#else
+#define TCG_EXT2_X86_64_FEATURES 0
+#endif
+
+#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
+ CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
+ CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
+ TCG_EXT2_X86_64_FEATURES)
+#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
+ CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
+#define TCG_EXT4_FEATURES 0
+#define TCG_SVM_FEATURES 0
+#define TCG_KVM_FEATURES 0
+#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
+ CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
+ CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
+ CPUID_7_0_EBX_ERMS)
+ /* missing:
+ CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
+ CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
+ CPUID_7_0_EBX_RDSEED */
+#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
+#define TCG_7_0_EDX_FEATURES 0
+#define TCG_APM_FEATURES 0
+#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
+#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
+ /* missing:
+ CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
+
+typedef struct FeatureWordInfo {
+ /* feature flags names are taken from "Intel Processor Identification and
+ * the CPUID Instruction" and AMD's "CPUID Specification".
+ * In cases of disagreement between feature naming conventions,
+ * aliases may be added.
+ */
+ const char *feat_names[32];
+ uint32_t cpuid_eax; /* Input EAX for CPUID */
+ bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
+ uint32_t cpuid_ecx; /* Input ECX value for CPUID */
+ int cpuid_reg; /* output register (R_* constant) */
+ uint32_t tcg_features; /* Feature flags supported by TCG */
+ uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
+ uint32_t migratable_flags; /* Feature flags known to be migratable */
+} FeatureWordInfo;
+
+static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
+ [FEAT_1_EDX] = {
+ .feat_names = {
+ "fpu", "vme", "de", "pse",
+ "tsc", "msr", "pae", "mce",
+ "cx8", "apic", NULL, "sep",
+ "mtrr", "pge", "mca", "cmov",
+ "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
+ NULL, "ds" /* Intel dts */, "acpi", "mmx",
+ "fxsr", "sse", "sse2", "ss",
+ "ht" /* Intel htt */, "tm", "ia64", "pbe",
+ },
+ .cpuid_eax = 1, .cpuid_reg = R_EDX,
+ .tcg_features = TCG_FEATURES,
+ },
+ [FEAT_1_ECX] = {
+ .feat_names = {
+ "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
+ "ds-cpl", "vmx", "smx", "est",
+ "tm2", "ssse3", "cid", NULL,
+ "fma", "cx16", "xtpr", "pdcm",
+ NULL, "pcid", "dca", "sse4.1",
+ "sse4.2", "x2apic", "movbe", "popcnt",
+ "tsc-deadline", "aes", "xsave", "osxsave",
+ "avx", "f16c", "rdrand", "hypervisor",
+ },
+ .cpuid_eax = 1, .cpuid_reg = R_ECX,
+ .tcg_features = TCG_EXT_FEATURES,
+ },
+ /* Feature names that are already defined on feature_name[] but
+ * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
+ * names on feat_names below. They are copied automatically
+ * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
+ */
+ [FEAT_8000_0001_EDX] = {
+ .feat_names = {
+ NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
+ NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
+ NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
+ NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
+ NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
+ "nx", NULL, "mmxext", NULL /* mmx */,
+ NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
+ NULL, "lm", "3dnowext", "3dnow",
+ },
+ .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
+ .tcg_features = TCG_EXT2_FEATURES,
+ },
+ [FEAT_8000_0001_ECX] = {
+ .feat_names = {
+ "lahf-lm", "cmp-legacy", "svm", "extapic",
+ "cr8legacy", "abm", "sse4a", "misalignsse",
+ "3dnowprefetch", "osvw", "ibs", "xop",
+ "skinit", "wdt", NULL, "lwp",
+ "fma4", "tce", NULL, "nodeid-msr",
+ NULL, "tbm", "topoext", "perfctr-core",
+ "perfctr-nb", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
+ .tcg_features = TCG_EXT3_FEATURES,
+ },
+ [FEAT_C000_0001_EDX] = {
+ .feat_names = {
+ NULL, NULL, "xstore", "xstore-en",
+ NULL, NULL, "xcrypt", "xcrypt-en",
+ "ace2", "ace2-en", "phe", "phe-en",
+ "pmm", "pmm-en", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
+ .tcg_features = TCG_EXT4_FEATURES,
+ },
+ [FEAT_KVM] = {
+ .feat_names = {
+ "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
+ "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "kvmclock-stable-bit", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
+ .tcg_features = TCG_KVM_FEATURES,
+ },
+ [FEAT_HYPERV_EAX] = {
+ .feat_names = {
+ NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
+ NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
+ NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
+ NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
+ NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
+ NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
+ },
+ [FEAT_HYPERV_EBX] = {
+ .feat_names = {
+ NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
+ NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
+ NULL /* hv_post_messages */, NULL /* hv_signal_events */,
+ NULL /* hv_create_port */, NULL /* hv_connect_port */,
+ NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
+ NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
+ NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
+ },
+ [FEAT_HYPERV_EDX] = {
+ .feat_names = {
+ NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
+ NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
+ NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
+ NULL, NULL,
+ NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
+ },
+ [FEAT_SVM] = {
+ .feat_names = {
+ "npt", "lbrv", "svm-lock", "nrip-save",
+ "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
+ NULL, NULL, "pause-filter", NULL,
+ "pfthreshold", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
+ .tcg_features = TCG_SVM_FEATURES,
+ },
+ [FEAT_7_0_EBX] = {
+ .feat_names = {
+ "fsgsbase", "tsc-adjust", NULL, "bmi1",
+ "hle", "avx2", NULL, "smep",
+ "bmi2", "erms", "invpcid", "rtm",
+ NULL, NULL, "mpx", NULL,
+ "avx512f", "avx512dq", "rdseed", "adx",
+ "smap", "avx512ifma", "pcommit", "clflushopt",
+ "clwb", NULL, "avx512pf", "avx512er",
+ "avx512cd", NULL, "avx512bw", "avx512vl",
+ },
+ .cpuid_eax = 7,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_EBX,
+ .tcg_features = TCG_7_0_EBX_FEATURES,
+ },
+ [FEAT_7_0_ECX] = {
+ .feat_names = {
+ NULL, "avx512vbmi", "umip", "pku",
+ "ospke", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, "rdpid", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 7,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_ECX,
+ .tcg_features = TCG_7_0_ECX_FEATURES,
+ },
+ [FEAT_7_0_EDX] = {
+ .feat_names = {
+ NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 7,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_EDX,
+ .tcg_features = TCG_7_0_EDX_FEATURES,
+ },
+ [FEAT_8000_0007_EDX] = {
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ "invtsc", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0x80000007,
+ .cpuid_reg = R_EDX,
+ .tcg_features = TCG_APM_FEATURES,
+ .unmigratable_flags = CPUID_APM_INVTSC,
+ },
+ [FEAT_XSAVE] = {
+ .feat_names = {
+ "xsaveopt", "xsavec", "xgetbv1", "xsaves",
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 0xd,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 1,
+ .cpuid_reg = R_EAX,
+ .tcg_features = TCG_XSAVE_FEATURES,
+ },
+ [FEAT_6_EAX] = {
+ .feat_names = {
+ NULL, NULL, "arat", NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .cpuid_eax = 6, .cpuid_reg = R_EAX,
+ .tcg_features = TCG_6_EAX_FEATURES,
+ },
+ [FEAT_XSAVE_COMP_LO] = {
+ .cpuid_eax = 0xD,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_EAX,
+ .tcg_features = ~0U,
+ .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
+ XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
+ XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
+ XSTATE_PKRU_MASK,
+ },
+ [FEAT_XSAVE_COMP_HI] = {
+ .cpuid_eax = 0xD,
+ .cpuid_needs_ecx = true, .cpuid_ecx = 0,
+ .cpuid_reg = R_EDX,
+ .tcg_features = ~0U,
+ },
+};
+
+typedef struct X86RegisterInfo32 {
+ /* Name of register */
+ const char *name;
+ /* QAPI enum value register */
+ X86CPURegister32 qapi_enum;
+} X86RegisterInfo32;
+
+#define REGISTER(reg) \
+ [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
+static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
+ REGISTER(EAX),
+ REGISTER(ECX),
+ REGISTER(EDX),
+ REGISTER(EBX),
+ REGISTER(ESP),
+ REGISTER(EBP),
+ REGISTER(ESI),
+ REGISTER(EDI),
+};
+#undef REGISTER
+
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+} ExtSaveArea;
+
+static const ExtSaveArea x86_ext_save_areas[] = {
+ [XSTATE_FP_BIT] = {
+ /* x87 FP state component is always enabled if XSAVE is supported */
+ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
+ /* x87 state is in the legacy region of the XSAVE area */
+ .offset = 0,
+ .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+ },
+ [XSTATE_SSE_BIT] = {
+ /* SSE state component is always enabled if XSAVE is supported */
+ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
+ /* SSE state is in the legacy region of the XSAVE area */
+ .offset = 0,
+ .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
+ },
+ [XSTATE_YMM_BIT] =
+ { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
+ .offset = offsetof(X86XSaveArea, avx_state),
+ .size = sizeof(XSaveAVX) },
+ [XSTATE_BNDREGS_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+ .offset = offsetof(X86XSaveArea, bndreg_state),
+ .size = sizeof(XSaveBNDREG) },
+ [XSTATE_BNDCSR_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
+ .offset = offsetof(X86XSaveArea, bndcsr_state),
+ .size = sizeof(XSaveBNDCSR) },
+ [XSTATE_OPMASK_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = offsetof(X86XSaveArea, opmask_state),
+ .size = sizeof(XSaveOpmask) },
+ [XSTATE_ZMM_Hi256_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = offsetof(X86XSaveArea, zmm_hi256_state),
+ .size = sizeof(XSaveZMM_Hi256) },
+ [XSTATE_Hi16_ZMM_BIT] =
+ { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
+ .offset = offsetof(X86XSaveArea, hi16_zmm_state),
+ .size = sizeof(XSaveHi16_ZMM) },
+ [XSTATE_PKRU_BIT] =
+ { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
+ .offset = offsetof(X86XSaveArea, pkru_state),
+ .size = sizeof(XSavePKRU) },
+};
+
+static uint32_t xsave_area_size(uint64_t mask)
+{
+ int i;
+ uint64_t ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((mask >> i) & 1) {
+ ret = MAX(ret, esa->offset + esa->size);
+ }
+ }
+ return ret;
+}
+
+static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
+{
+ return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
+ cpu->env.features[FEAT_XSAVE_COMP_LO];
+}
+
+const char *get_register_name_32(unsigned int reg)
+{
+ if (reg >= CPU_NB_REGS32) {
+ return NULL;
+ }
+ return x86_reg_info_32[reg].name;
+}
+
+/*
+ * Returns the set of feature flags that are supported and migratable by
+ * QEMU, for a given FeatureWord.
+ */
+static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
+{
+ FeatureWordInfo *wi = &feature_word_info[w];
+ uint32_t r = 0;
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ uint32_t f = 1U << i;
+
+ /* If the feature name is known, it is implicitly considered migratable,
+ * unless it is explicitly set in unmigratable_flags */
+ if ((wi->migratable_flags & f) ||
+ (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
+ r |= f;
+ }
+ }
+ return r;
+}
+
+void host_cpuid(uint32_t function, uint32_t count,
+ uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
+{
+ uint32_t vec[4];
+
+#ifdef __x86_64__
+ asm volatile("cpuid"
+ : "=a"(vec[0]), "=b"(vec[1]),
+ "=c"(vec[2]), "=d"(vec[3])
+ : "0"(function), "c"(count) : "cc");
+#elif defined(__i386__)
+ asm volatile("pusha \n\t"
+ "cpuid \n\t"
+ "mov %%eax, 0(%2) \n\t"
+ "mov %%ebx, 4(%2) \n\t"
+ "mov %%ecx, 8(%2) \n\t"
+ "mov %%edx, 12(%2) \n\t"
+ "popa"
+ : : "a"(function), "c"(count), "S"(vec)
+ : "memory", "cc");
+#else
+ abort();
+#endif
+
+ if (eax)
+ *eax = vec[0];
+ if (ebx)
+ *ebx = vec[1];
+ if (ecx)
+ *ecx = vec[2];
+ if (edx)
+ *edx = vec[3];
+}
+
+/* CPU class name definitions: */
+
+#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
+#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
+
+/* Return type name for a given CPU model name
+ * Caller is responsible for freeing the returned string.
+ */
+static char *x86_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
+}
+
+static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = x86_cpu_type_name(cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
+{
+ const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
+ assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
+ return g_strndup(class_name,
+ strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
+}
+
+struct X86CPUDefinition {
+ const char *name;
+ uint32_t level;
+ uint32_t xlevel;
+ /* vendor is zero-terminated, 12 character ASCII string */
+ char vendor[CPUID_VENDOR_SZ + 1];
+ int family;
+ int model;
+ int stepping;
+ FeatureWordArray features;
+ char model_id[48];
+};
+
+static X86CPUDefinition builtin_x86_defs[] = {
+ {
+ .name = "qemu64",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 6,
+ .model = 6,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_CX16,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
+ .xlevel = 0x8000000A,
+ .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+ },
+ {
+ .name = "phenom",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 16,
+ .model = 2,
+ .stepping = 3,
+ /* Missing: CPUID_HT */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36 | CPUID_VME,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
+ CPUID_EXT_POPCNT,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
+ CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
+ CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
+ /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
+ CPUID_EXT3_CR8LEG,
+ CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
+ CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
+ CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
+ /* Missing: CPUID_SVM_LBRV */
+ .features[FEAT_SVM] =
+ CPUID_SVM_NPT,
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
+ },
+ {
+ .name = "core2duo",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 15,
+ .stepping = 11,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
+ /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
+ * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
+ CPUID_EXT_CX16,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
+ },
+ {
+ .name = "kvm64",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ /* Missing: CPUID_HT */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_VME |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
+ CPUID_PSE36,
+ /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_CX16,
+ /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
+ CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
+ CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
+ CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
+ .features[FEAT_8000_0001_ECX] =
+ 0,
+ .xlevel = 0x80000008,
+ .model_id = "Common KVM processor"
+ },
+ {
+ .name = "qemu32",
+ .level = 4,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 6,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3,
+ .xlevel = 0x80000004,
+ .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+ },
+ {
+ .name = "kvm32",
+ .level = 5,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_VME |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_ECX] =
+ 0,
+ .xlevel = 0x80000008,
+ .model_id = "Common 32-bit KVM processor"
+ },
+ {
+ .name = "coreduo",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 14,
+ .stepping = 8,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_VME |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
+ CPUID_SS,
+ /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
+ * CPUID_EXT_PDCM, CPUID_EXT_VMX */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_NX,
+ .xlevel = 0x80000008,
+ .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
+ },
+ {
+ .name = "486",
+ .level = 1,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 4,
+ .model = 8,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ I486_FEATURES,
+ .xlevel = 0,
+ },
+ {
+ .name = "pentium",
+ .level = 1,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 5,
+ .model = 4,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PENTIUM_FEATURES,
+ .xlevel = 0,
+ },
+ {
+ .name = "pentium2",
+ .level = 2,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 5,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ PENTIUM2_FEATURES,
+ .xlevel = 0,
+ },
+ {
+ .name = "pentium3",
+ .level = 3,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 7,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PENTIUM3_FEATURES,
+ .xlevel = 0,
+ },
+ {
+ .name = "athlon",
+ .level = 2,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 6,
+ .model = 2,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
+ CPUID_MCA,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
+ .xlevel = 0x80000008,
+ .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
+ },
+ {
+ .name = "n270",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 28,
+ .stepping = 2,
+ /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
+ .features[FEAT_1_EDX] =
+ PPRO_FEATURES |
+ CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
+ CPUID_ACPI | CPUID_SS,
+ /* Some CPUs got no CPUID_SEP */
+ /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
+ * CPUID_EXT_XTPR */
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
+ CPUID_EXT_MOVBE,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
+ },
+ {
+ .name = "Conroe",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 15,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
+ },
+ {
+ .name = "Penryn",
+ .level = 10,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 23,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
+ },
+ {
+ .name = "Nehalem",
+ .level = 11,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 26,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
+ },
+ {
+ .name = "Westmere",
+ .level = 11,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 44,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
+ },
+ {
+ .name = "SandyBridge",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 42,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon E312xx (Sandy Bridge)",
+ },
+ {
+ .name = "IvyBridge",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 58,
+ .stepping = 9,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
+ CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_ERMS,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_LAHF_LM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
+ },
+ {
+ .name = "Haswell-noTSX",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 60,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Haswell, no TSX)",
+ }, {
+ .name = "Haswell",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 60,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Haswell)",
+ },
+ {
+ .name = "Broadwell-noTSX",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 61,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Broadwell, no TSX)",
+ },
+ {
+ .name = "Broadwell",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 61,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP,
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Broadwell)",
+ },
+ {
+ .name = "Skylake-Client",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_INTEL,
+ .family = 6,
+ .model = 94,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
+ CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
+ CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
+ CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
+ CPUID_EXT2_SYSCALL,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
+ .features[FEAT_7_0_EBX] =
+ CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
+ CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
+ CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
+ CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
+ CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
+ /* Missing: XSAVES (not supported by some Linux versions,
+ * including v4.1 to v4.6).
+ * KVM doesn't yet expose any XSAVES state save component,
+ * and the only one defined in Skylake (processor tracing)
+ * probably will block migration anyway.
+ */
+ .features[FEAT_XSAVE] =
+ CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
+ CPUID_XSAVE_XGETBV1,
+ .features[FEAT_6_EAX] =
+ CPUID_6_EAX_ARAT,
+ .xlevel = 0x80000008,
+ .model_id = "Intel Core Processor (Skylake)",
+ },
+ {
+ .name = "Opteron_G1",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_SSE3,
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G2",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 15,
+ .model = 6,
+ .stepping = 1,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_CX16 | CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_FXSR |
+ CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+ CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+ CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+ CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+ CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G3",
+ .level = 5,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 16,
+ .model = 2,
+ .stepping = 3,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
+ CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM | CPUID_EXT2_FXSR |
+ CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
+ CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
+ CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
+ CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
+ CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
+ CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
+ CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
+ .xlevel = 0x80000008,
+ .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
+ },
+ {
+ .name = "Opteron_G4",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 21,
+ .model = 1,
+ .stepping = 2,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
+ CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
+ CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
+ CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ /* no xsaveopt! */
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 62xx class CPU",
+ },
+ {
+ .name = "Opteron_G5",
+ .level = 0xd,
+ .vendor = CPUID_VENDOR_AMD,
+ .family = 21,
+ .model = 2,
+ .stepping = 0,
+ .features[FEAT_1_EDX] =
+ CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
+ CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
+ CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
+ CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
+ CPUID_DE | CPUID_FP87,
+ .features[FEAT_1_ECX] =
+ CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
+ CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
+ CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
+ CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
+ /* Missing: CPUID_EXT2_RDTSCP */
+ .features[FEAT_8000_0001_EDX] =
+ CPUID_EXT2_LM |
+ CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
+ CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
+ CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
+ CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
+ CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
+ CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
+ .features[FEAT_8000_0001_ECX] =
+ CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
+ CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
+ CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
+ CPUID_EXT3_LAHF_LM,
+ /* no xsaveopt! */
+ .xlevel = 0x8000001A,
+ .model_id = "AMD Opteron 63xx class CPU",
+ },
+};
+
+typedef struct PropValue {
+ const char *prop, *value;
+} PropValue;
+
+/* KVM-specific features that are automatically added/removed
+ * from all CPU models when KVM is enabled.
+ */
+static PropValue kvm_default_props[] = {
+ { "kvmclock", "on" },
+ { "kvm-nopiodelay", "on" },
+ { "kvm-asyncpf", "on" },
+ { "kvm-steal-time", "on" },
+ { "kvm-pv-eoi", "on" },
+ { "kvmclock-stable-bit", "on" },
+ { "x2apic", "on" },
+ { "acpi", "off" },
+ { "monitor", "off" },
+ { "svm", "off" },
+ { NULL, NULL },
+};
+
+/* TCG-specific defaults that override all CPU models when using TCG
+ */
+static PropValue tcg_default_props[] = {
+ { "vme", "off" },
+ { NULL, NULL },
+};
+
+
+void x86_cpu_change_kvm_default(const char *prop, const char *value)
+{
+ PropValue *pv;
+ for (pv = kvm_default_props; pv->prop; pv++) {
+ if (!strcmp(pv->prop, prop)) {
+ pv->value = value;
+ break;
+ }
+ }
+
+ /* It is valid to call this function only for properties that
+ * are already present in the kvm_default_props table.
+ */
+ assert(pv->prop);
+}
+
+static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
+ bool migratable_only);
+
+#ifdef CONFIG_KVM
+
+static bool lmce_supported(void)
+{
+ uint64_t mce_cap;
+
+ if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
+ return false;
+ }
+
+ return !!(mce_cap & MCG_LMCE_P);
+}
+
+static int cpu_x86_fill_model_id(char *str)
+{
+ uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
+ memcpy(str + i * 16 + 0, &eax, 4);
+ memcpy(str + i * 16 + 4, &ebx, 4);
+ memcpy(str + i * 16 + 8, &ecx, 4);
+ memcpy(str + i * 16 + 12, &edx, 4);
+ }
+ return 0;
+}
+
+static X86CPUDefinition host_cpudef;
+
+static Property host_x86_cpu_properties[] = {
+ DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
+ DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+/* class_init for the "host" CPU model
+ *
+ * This function may be called before KVM is initialized.
+ */
+static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+ uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
+
+ xcc->kvm_required = true;
+
+ host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
+ x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
+
+ host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
+ host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
+ host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
+ host_cpudef.stepping = eax & 0x0F;
+
+ cpu_x86_fill_model_id(host_cpudef.model_id);
+
+ xcc->cpu_def = &host_cpudef;
+ xcc->model_description =
+ "KVM processor with all supported host features "
+ "(only available in KVM mode)";
+
+ /* level, xlevel, xlevel2, and the feature words are initialized on
+ * instance_init, because they require KVM to be initialized.
+ */
+
+ dc->props = host_x86_cpu_properties;
+ /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
+ dc->cannot_destroy_with_object_finalize_yet = true;
+}
+
+static void host_x86_cpu_initfn(Object *obj)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ KVMState *s = kvm_state;
+
+ /* We can't fill the features array here because we don't know yet if
+ * "migratable" is true or false.
+ */
+ cpu->host_features = true;
+
+ /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
+ if (kvm_enabled()) {
+ env->cpuid_min_level =
+ kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
+ env->cpuid_min_xlevel =
+ kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
+ env->cpuid_min_xlevel2 =
+ kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
+
+ if (lmce_supported()) {
+ object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
+ }
+ }
+
+ object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
+}
+
+static const TypeInfo host_x86_cpu_type_info = {
+ .name = X86_CPU_TYPE_NAME("host"),
+ .parent = TYPE_X86_CPU,
+ .instance_init = host_x86_cpu_initfn,
+ .class_init = host_x86_cpu_class_init,
+};
+
+#endif
+
+static void report_unavailable_features(FeatureWord w, uint32_t mask)
+{
+ FeatureWordInfo *f = &feature_word_info[w];
+ int i;
+
+ for (i = 0; i < 32; ++i) {
+ if ((1UL << i) & mask) {
+ const char *reg = get_register_name_32(f->cpuid_reg);
+ assert(reg);
+ fprintf(stderr, "warning: %s doesn't support requested feature: "
+ "CPUID.%02XH:%s%s%s [bit %d]\n",
+ kvm_enabled() ? "host" : "TCG",
+ f->cpuid_eax, reg,
+ f->feat_names[i] ? "." : "",
+ f->feat_names[i] ? f->feat_names[i] : "", i);
+ }
+ }
+}
+
+static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int64_t value;
+
+ value = (env->cpuid_version >> 8) & 0xf;
+ if (value == 0xf) {
+ value += (env->cpuid_version >> 20) & 0xff;
+ }
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ const int64_t min = 0;
+ const int64_t max = 0xff + 0xf;
+ Error *local_err = NULL;
+ int64_t value;
+
+ visit_type_int(v, name, &value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ env->cpuid_version &= ~0xff00f00;
+ if (value > 0x0f) {
+ env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
+ } else {
+ env->cpuid_version |= value << 8;
+ }
+}
+
+static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int64_t value;
+
+ value = (env->cpuid_version >> 4) & 0xf;
+ value |= ((env->cpuid_version >> 16) & 0xf) << 4;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ const int64_t min = 0;
+ const int64_t max = 0xff;
+ Error *local_err = NULL;
+ int64_t value;
+
+ visit_type_int(v, name, &value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ env->cpuid_version &= ~0xf00f0;
+ env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
+}
+
+static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int64_t value;
+
+ value = env->cpuid_version & 0xf;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ const int64_t min = 0;
+ const int64_t max = 0xf;
+ Error *local_err = NULL;
+ int64_t value;
+
+ visit_type_int(v, name, &value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ env->cpuid_version &= ~0xf;
+ env->cpuid_version |= value & 0xf;
+}
+
+static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ char *value;
+
+ value = g_malloc(CPUID_VENDOR_SZ + 1);
+ x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
+ env->cpuid_vendor3);
+ return value;
+}
+
+static void x86_cpuid_set_vendor(Object *obj, const char *value,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (strlen(value) != CPUID_VENDOR_SZ) {
+ error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
+ return;
+ }
+
+ env->cpuid_vendor1 = 0;
+ env->cpuid_vendor2 = 0;
+ env->cpuid_vendor3 = 0;
+ for (i = 0; i < 4; i++) {
+ env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
+ env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
+ env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
+ }
+}
+
+static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ char *value;
+ int i;
+
+ value = g_malloc(48 + 1);
+ for (i = 0; i < 48; i++) {
+ value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
+ }
+ value[48] = '\0';
+ return value;
+}
+
+static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
+ Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ CPUX86State *env = &cpu->env;
+ int c, len, i;
+
+ if (model_id == NULL) {
+ model_id = "";
+ }
+ len = strlen(model_id);
+ memset(env->cpuid_model, 0, 48);
+ for (i = 0; i < 48; i++) {
+ if (i >= len) {
+ c = '\0';
+ } else {
+ c = (uint8_t)model_id[i];
+ }
+ env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
+ }
+}
+
+static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ int64_t value;
+
+ value = cpu->env.tsc_khz * 1000;
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ const int64_t min = 0;
+ const int64_t max = INT64_MAX;
+ Error *local_err = NULL;
+ int64_t value;
+
+ visit_type_int(v, name, &value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
+ name ? name : "null", value, min, max);
+ return;
+ }
+
+ cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
+}
+
+/* Generic getter for "feature-words" and "filtered-features" properties */
+static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
+ const char *name, void *opaque,
+ Error **errp)
+{
+ uint32_t *array = (uint32_t *)opaque;
+ FeatureWord w;
+ X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
+ X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
+ X86CPUFeatureWordInfoList *list = NULL;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ FeatureWordInfo *wi = &feature_word_info[w];
+ X86CPUFeatureWordInfo *qwi = &word_infos[w];
+ qwi->cpuid_input_eax = wi->cpuid_eax;
+ qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
+ qwi->cpuid_input_ecx = wi->cpuid_ecx;
+ qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
+ qwi->features = array[w];
+
+ /* List will be in reverse order, but order shouldn't matter */
+ list_entries[w].next = list;
+ list_entries[w].value = &word_infos[w];
+ list = &list_entries[w];
+ }
+
+ visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
+}
+
+static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(obj);
+ int64_t value = cpu->hyperv_spinlock_attempts;
+
+ visit_type_int(v, name, &value, errp);
+}
+
+static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ const int64_t min = 0xFFF;
+ const int64_t max = UINT_MAX;
+ X86CPU *cpu = X86_CPU(obj);
+ Error *err = NULL;
+ int64_t value;
+
+ visit_type_int(v, name, &value, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ if (value < min || value > max) {
+ error_setg(errp, "Property %s.%s doesn't take value %" PRId64
+ " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
+ object_get_typename(obj), name ? name : "null",
+ value, min, max);
+ return;
+ }
+ cpu->hyperv_spinlock_attempts = value;
+}
+
+static PropertyInfo qdev_prop_spinlocks = {
+ .name = "int",
+ .get = x86_get_hv_spinlocks,
+ .set = x86_set_hv_spinlocks,
+};
+
+/* Convert all '_' in a feature string option name to '-', to make feature
+ * name conform to QOM property naming rule, which uses '-' instead of '_'.
+ */
+static inline void feat2prop(char *s)
+{
+ while ((s = strchr(s, '_'))) {
+ *s = '-';
+ }
+}
+
+/* Return the feature property name for a feature flag bit */
+static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
+{
+ /* XSAVE components are automatically enabled by other features,
+ * so return the original feature name instead
+ */
+ if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
+ int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
+
+ if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
+ x86_ext_save_areas[comp].bits) {
+ w = x86_ext_save_areas[comp].feature;
+ bitnr = ctz32(x86_ext_save_areas[comp].bits);
+ }
+ }
+
+ assert(bitnr < 32);
+ assert(w < FEATURE_WORDS);
+ return feature_word_info[w].feat_names[bitnr];
+}
+
+/* Compatibily hack to maintain legacy +-feat semantic,
+ * where +-feat overwrites any feature set by
+ * feat=on|feat even if the later is parsed after +-feat
+ * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
+ */
+static GList *plus_features, *minus_features;
+
+static gint compare_string(gconstpointer a, gconstpointer b)
+{
+ return g_strcmp0(a, b);
+}
+
+/* Parse "+feature,-feature,feature=foo" CPU feature string
+ */
+static void x86_cpu_parse_featurestr(const char *typename, char *features,
+ Error **errp)
+{
+ char *featurestr; /* Single 'key=value" string being parsed */
+ static bool cpu_globals_initialized;
+ bool ambiguous = false;
+
+ if (cpu_globals_initialized) {
+ return;
+ }
+ cpu_globals_initialized = true;
+
+ if (!features) {
+ return;
+ }
+
+ for (featurestr = strtok(features, ",");
+ featurestr;
+ featurestr = strtok(NULL, ",")) {
+ const char *name;
+ const char *val = NULL;
+ char *eq = NULL;
+ char num[32];
+ GlobalProperty *prop;
+
+ /* Compatibility syntax: */
+ if (featurestr[0] == '+') {
+ plus_features = g_list_append(plus_features,
+ g_strdup(featurestr + 1));
+ continue;
+ } else if (featurestr[0] == '-') {
+ minus_features = g_list_append(minus_features,
+ g_strdup(featurestr + 1));
+ continue;
+ }
+
+ eq = strchr(featurestr, '=');
+ if (eq) {
+ *eq++ = 0;
+ val = eq;
+ } else {
+ val = "on";
+ }
+
+ feat2prop(featurestr);
+ name = featurestr;
+
+ if (g_list_find_custom(plus_features, name, compare_string)) {
+ error_report("warning: Ambiguous CPU model string. "
+ "Don't mix both \"+%s\" and \"%s=%s\"",
+ name, name, val);
+ ambiguous = true;
+ }
+ if (g_list_find_custom(minus_features, name, compare_string)) {
+ error_report("warning: Ambiguous CPU model string. "
+ "Don't mix both \"-%s\" and \"%s=%s\"",
+ name, name, val);
+ ambiguous = true;
+ }
+
+ /* Special case: */
+ if (!strcmp(name, "tsc-freq")) {
+ int64_t tsc_freq;
+ char *err;
+
+ tsc_freq = qemu_strtosz_suffix_unit(val, &err,
+ QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
+ if (tsc_freq < 0 || *err) {
+ error_setg(errp, "bad numerical value %s", val);
+ return;
+ }
+ snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
+ val = num;
+ name = "tsc-frequency";
+ }
+
+ prop = g_new0(typeof(*prop), 1);
+ prop->driver = typename;
+ prop->property = g_strdup(name);
+ prop->value = g_strdup(val);
+ prop->errp = &error_fatal;
+ qdev_prop_register_global(prop);
+ }
+
+ if (ambiguous) {
+ error_report("warning: Compatibility of ambiguous CPU model "
+ "strings won't be kept on future QEMU versions");
+ }
+}
+
+static void x86_cpu_load_features(X86CPU *cpu, Error **errp);
+static int x86_cpu_filter_features(X86CPU *cpu);
+
+/* Check for missing features that may prevent the CPU class from
+ * running using the current machine and accelerator.
+ */
+static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
+ strList **missing_feats)
+{
+ X86CPU *xc;
+ FeatureWord w;
+ Error *err = NULL;
+ strList **next = missing_feats;
+
+ if (xcc->kvm_required && !kvm_enabled()) {
+ strList *new = g_new0(strList, 1);
+ new->value = g_strdup("kvm");;
+ *missing_feats = new;
+ return;
+ }
+
+ xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
+
+ x86_cpu_load_features(xc, &err);
+ if (err) {
+ /* Errors at x86_cpu_load_features should never happen,
+ * but in case it does, just report the model as not
+ * runnable at all using the "type" property.
+ */
+ strList *new = g_new0(strList, 1);
+ new->value = g_strdup("type");
+ *next = new;
+ next = &new->next;
+ }
+
+ x86_cpu_filter_features(xc);
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ uint32_t filtered = xc->filtered_features[w];
+ int i;
+ for (i = 0; i < 32; i++) {
+ if (filtered & (1UL << i)) {
+ strList *new = g_new0(strList, 1);
+ new->value = g_strdup(x86_cpu_feature_name(w, i));
+ *next = new;
+ next = &new->next;
+ }
+ }
+ }
+
+ object_unref(OBJECT(xc));
+}
+
+/* Print all cpuid feature names in featureset
+ */
+static void listflags(FILE *f, fprintf_function print, const char **featureset)
+{
+ int bit;
+ bool first = true;
+
+ for (bit = 0; bit < 32; bit++) {
+ if (featureset[bit]) {
+ print(f, "%s%s", first ? "" : " ", featureset[bit]);
+ first = false;
+ }
+ }
+}
+
+/* Sort alphabetically by type name, listing kvm_required models last. */
+static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
+ X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
+ const char *name_a, *name_b;
+
+ if (cc_a->kvm_required != cc_b->kvm_required) {
+ /* kvm_required items go last */
+ return cc_a->kvm_required ? 1 : -1;
+ } else {
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+ }
+}
+
+static GSList *get_sorted_cpu_model_list(void)
+{
+ GSList *list = object_class_get_list(TYPE_X86_CPU, false);
+ list = g_slist_sort(list, x86_cpu_list_compare);
+ return list;
+}
+
+static void x86_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ X86CPUClass *cc = X86_CPU_CLASS(oc);
+ CPUListState *s = user_data;
+ char *name = x86_cpu_class_get_model_name(cc);
+ const char *desc = cc->model_description;
+ if (!desc) {
+ desc = cc->cpu_def->model_id;
+ }
+
+ (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
+ name, desc);
+ g_free(name);
+}
+
+/* list available CPU models and flags */
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ int i;
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ list = get_sorted_cpu_model_list();
+ g_slist_foreach(list, x86_cpu_list_entry, &s);
+ g_slist_free(list);
+
+ (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
+ for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
+ FeatureWordInfo *fw = &feature_word_info[i];
+
+ (*cpu_fprintf)(f, " ");
+ listflags(f, cpu_fprintf, fw->feat_names);
+ (*cpu_fprintf)(f, "\n");
+ }
+}
+
+static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ X86CPUClass *cc = X86_CPU_CLASS(oc);
+ CpuDefinitionInfoList **cpu_list = user_data;
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+
+ info = g_malloc0(sizeof(*info));
+ info->name = x86_cpu_class_get_model_name(cc);
+ x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
+ info->has_unavailable_features = true;
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = *cpu_list;
+ *cpu_list = entry;
+}
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list = get_sorted_cpu_model_list();
+ g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
+ g_slist_free(list);
+ return cpu_list;
+}
+
+static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
+ bool migratable_only)
+{
+ FeatureWordInfo *wi = &feature_word_info[w];
+ uint32_t r;
+
+ if (kvm_enabled()) {
+ r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
+ wi->cpuid_ecx,
+ wi->cpuid_reg);
+ } else if (tcg_enabled()) {
+ r = wi->tcg_features;
+ } else {
+ return ~0;
+ }
+ if (migratable_only) {
+ r &= x86_cpu_get_migratable_flags(w);
+ }
+ return r;
+}
+
+/*
+ * Filters CPU feature words based on host availability of each feature.
+ *
+ * Returns: 0 if all flags are supported by the host, non-zero otherwise.
+ */
+static int x86_cpu_filter_features(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWord w;
+ int rv = 0;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ uint32_t host_feat =
+ x86_cpu_get_supported_feature_word(w, false);
+ uint32_t requested_features = env->features[w];
+ env->features[w] &= host_feat;
+ cpu->filtered_features[w] = requested_features & ~env->features[w];
+ if (cpu->filtered_features[w]) {
+ rv = 1;
+ }
+ }
+
+ return rv;
+}
+
+static void x86_cpu_report_filtered_features(X86CPU *cpu)
+{
+ FeatureWord w;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ report_unavailable_features(w, cpu->filtered_features[w]);
+ }
+}
+
+static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
+{
+ PropValue *pv;
+ for (pv = props; pv->prop; pv++) {
+ if (!pv->value) {
+ continue;
+ }
+ object_property_parse(OBJECT(cpu), pv->value, pv->prop,
+ &error_abort);
+ }
+}
+
+/* Load data from X86CPUDefinition
+ */
+static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
+{
+ CPUX86State *env = &cpu->env;
+ const char *vendor;
+ char host_vendor[CPUID_VENDOR_SZ + 1];
+ FeatureWord w;
+
+ /* CPU models only set _minimum_ values for level/xlevel: */
+ object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
+ object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
+
+ object_property_set_int(OBJECT(cpu), def->family, "family", errp);
+ object_property_set_int(OBJECT(cpu), def->model, "model", errp);
+ object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
+ object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ env->features[w] = def->features[w];
+ }
+
+ /* Special cases not set in the X86CPUDefinition structs: */
+ if (kvm_enabled()) {
+ if (!kvm_irqchip_in_kernel()) {
+ x86_cpu_change_kvm_default("x2apic", "off");
+ }
+
+ x86_cpu_apply_props(cpu, kvm_default_props);
+ } else if (tcg_enabled()) {
+ x86_cpu_apply_props(cpu, tcg_default_props);
+ }
+
+ env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
+
+ /* sysenter isn't supported in compatibility mode on AMD,
+ * syscall isn't supported in compatibility mode on Intel.
+ * Normally we advertise the actual CPU vendor, but you can
+ * override this using the 'vendor' property if you want to use
+ * KVM's sysenter/syscall emulation in compatibility mode and
+ * when doing cross vendor migration
+ */
+ vendor = def->vendor;
+ if (kvm_enabled()) {
+ uint32_t ebx = 0, ecx = 0, edx = 0;
+ host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
+ x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
+ vendor = host_vendor;
+ }
+
+ object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
+
+}
+
+X86CPU *cpu_x86_init(const char *cpu_model)
+{
+ return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
+}
+
+static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUDefinition *cpudef = data;
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+
+ xcc->cpu_def = cpudef;
+}
+
+static void x86_register_cpudef_type(X86CPUDefinition *def)
+{
+ char *typename = x86_cpu_type_name(def->name);
+ TypeInfo ti = {
+ .name = typename,
+ .parent = TYPE_X86_CPU,
+ .class_init = x86_cpu_cpudef_class_init,
+ .class_data = def,
+ };
+
+ type_register(&ti);
+ g_free(typename);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void cpu_clear_apic_feature(CPUX86State *env)
+{
+ env->features[FEAT_1_EDX] &= ~CPUID_APIC;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint32_t pkg_offset;
+
+ /* test if maximum index reached */
+ if (index & 0x80000000) {
+ if (index > env->cpuid_xlevel) {
+ if (env->cpuid_xlevel2 > 0) {
+ /* Handle the Centaur's CPUID instruction. */
+ if (index > env->cpuid_xlevel2) {
+ index = env->cpuid_xlevel2;
+ } else if (index < 0xC0000000) {
+ index = env->cpuid_xlevel;
+ }
+ } else {
+ /* Intel documentation states that invalid EAX input will
+ * return the same information as EAX=cpuid_level
+ * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
+ */
+ index = env->cpuid_level;
+ }
+ }
+ } else {
+ if (index > env->cpuid_level)
+ index = env->cpuid_level;
+ }
+
+ switch(index) {
+ case 0:
+ *eax = env->cpuid_level;
+ *ebx = env->cpuid_vendor1;
+ *edx = env->cpuid_vendor2;
+ *ecx = env->cpuid_vendor3;
+ break;
+ case 1:
+ *eax = env->cpuid_version;
+ *ebx = (cpu->apic_id << 24) |
+ 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
+ *ecx = env->features[FEAT_1_ECX];
+ if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
+ *ecx |= CPUID_EXT_OSXSAVE;
+ }
+ *edx = env->features[FEAT_1_EDX];
+ if (cs->nr_cores * cs->nr_threads > 1) {
+ *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
+ *edx |= CPUID_HT;
+ }
+ break;
+ case 2:
+ /* cache info: needed for Pentium Pro compatibility */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, 0, eax, ebx, ecx, edx);
+ break;
+ }
+ *eax = 1; /* Number of CPUID[EAX=2] calls required */
+ *ebx = 0;
+ if (!cpu->enable_l3_cache) {
+ *ecx = 0;
+ } else {
+ *ecx = L3_N_DESCRIPTOR;
+ }
+ *edx = (L1D_DESCRIPTOR << 16) | \
+ (L1I_DESCRIPTOR << 8) | \
+ (L2_DESCRIPTOR);
+ break;
+ case 4:
+ /* cache info: needed for Core compatibility */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, count, eax, ebx, ecx, edx);
+ *eax &= ~0xFC000000;
+ } else {
+ *eax = 0;
+ switch (count) {
+ case 0: /* L1 dcache info */
+ *eax |= CPUID_4_TYPE_DCACHE | \
+ CPUID_4_LEVEL(1) | \
+ CPUID_4_SELF_INIT_LEVEL;
+ *ebx = (L1D_LINE_SIZE - 1) | \
+ ((L1D_PARTITIONS - 1) << 12) | \
+ ((L1D_ASSOCIATIVITY - 1) << 22);
+ *ecx = L1D_SETS - 1;
+ *edx = CPUID_4_NO_INVD_SHARING;
+ break;
+ case 1: /* L1 icache info */
+ *eax |= CPUID_4_TYPE_ICACHE | \
+ CPUID_4_LEVEL(1) | \
+ CPUID_4_SELF_INIT_LEVEL;
+ *ebx = (L1I_LINE_SIZE - 1) | \
+ ((L1I_PARTITIONS - 1) << 12) | \
+ ((L1I_ASSOCIATIVITY - 1) << 22);
+ *ecx = L1I_SETS - 1;
+ *edx = CPUID_4_NO_INVD_SHARING;
+ break;
+ case 2: /* L2 cache info */
+ *eax |= CPUID_4_TYPE_UNIFIED | \
+ CPUID_4_LEVEL(2) | \
+ CPUID_4_SELF_INIT_LEVEL;
+ if (cs->nr_threads > 1) {
+ *eax |= (cs->nr_threads - 1) << 14;
+ }
+ *ebx = (L2_LINE_SIZE - 1) | \
+ ((L2_PARTITIONS - 1) << 12) | \
+ ((L2_ASSOCIATIVITY - 1) << 22);
+ *ecx = L2_SETS - 1;
+ *edx = CPUID_4_NO_INVD_SHARING;
+ break;
+ case 3: /* L3 cache info */
+ if (!cpu->enable_l3_cache) {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+ *eax |= CPUID_4_TYPE_UNIFIED | \
+ CPUID_4_LEVEL(3) | \
+ CPUID_4_SELF_INIT_LEVEL;
+ pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ *eax |= ((1 << pkg_offset) - 1) << 14;
+ *ebx = (L3_N_LINE_SIZE - 1) | \
+ ((L3_N_PARTITIONS - 1) << 12) | \
+ ((L3_N_ASSOCIATIVITY - 1) << 22);
+ *ecx = L3_N_SETS - 1;
+ *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
+ break;
+ default: /* end of info */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+ }
+
+ /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
+ if ((*eax & 31) && cs->nr_cores > 1) {
+ *eax |= (cs->nr_cores - 1) << 26;
+ }
+ break;
+ case 5:
+ /* mwait info: needed for Core compatibility */
+ *eax = 0; /* Smallest monitor-line size in bytes */
+ *ebx = 0; /* Largest monitor-line size in bytes */
+ *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
+ *edx = 0;
+ break;
+ case 6:
+ /* Thermal and Power Leaf */
+ *eax = env->features[FEAT_6_EAX];
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 7:
+ /* Structured Extended Feature Flags Enumeration Leaf */
+ if (count == 0) {
+ *eax = 0; /* Maximum ECX value for sub-leaves */
+ *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
+ *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
+ if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
+ *ecx |= CPUID_7_0_ECX_OSPKE;
+ }
+ *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 9:
+ /* Direct Cache Access Information Leaf */
+ *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0xA:
+ /* Architectural Performance Monitoring Leaf */
+ if (kvm_enabled() && cpu->enable_pmu) {
+ KVMState *s = cs->kvm_state;
+
+ *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
+ *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
+ *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
+ *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 0xB:
+ /* Extended Topology Enumeration Leaf */
+ if (!cpu->enable_cpuid_0xb) {
+ *eax = *ebx = *ecx = *edx = 0;
+ break;
+ }
+
+ *ecx = count & 0xff;
+ *edx = cpu->apic_id;
+
+ switch (count) {
+ case 0:
+ *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
+ *ebx = cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
+ break;
+ case 1:
+ *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
+ *ebx = cs->nr_cores * cs->nr_threads;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
+ break;
+ default:
+ *eax = 0;
+ *ebx = 0;
+ *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
+ }
+
+ assert(!(*eax & ~0x1f));
+ *ebx &= 0xffff; /* The count doesn't need to be reliable. */
+ break;
+ case 0xD: {
+ /* Processor Extended State */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ break;
+ }
+
+ if (count == 0) {
+ *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
+ *eax = env->features[FEAT_XSAVE_COMP_LO];
+ *edx = env->features[FEAT_XSAVE_COMP_HI];
+ *ebx = *ecx;
+ } else if (count == 1) {
+ *eax = env->features[FEAT_XSAVE];
+ } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
+ if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
+ *eax = esa->size;
+ *ebx = esa->offset;
+ }
+ }
+ break;
+ }
+ case 0x80000000:
+ *eax = env->cpuid_xlevel;
+ *ebx = env->cpuid_vendor1;
+ *edx = env->cpuid_vendor2;
+ *ecx = env->cpuid_vendor3;
+ break;
+ case 0x80000001:
+ *eax = env->cpuid_version;
+ *ebx = 0;
+ *ecx = env->features[FEAT_8000_0001_ECX];
+ *edx = env->features[FEAT_8000_0001_EDX];
+
+ /* The Linux kernel checks for the CMPLegacy bit and
+ * discards multiple thread information if it is set.
+ * So don't set it here for Intel to make Linux guests happy.
+ */
+ if (cs->nr_cores * cs->nr_threads > 1) {
+ if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
+ env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
+ env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
+ *ecx |= 1 << 1; /* CmpLegacy bit */
+ }
+ }
+ break;
+ case 0x80000002:
+ case 0x80000003:
+ case 0x80000004:
+ *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
+ *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
+ *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
+ *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
+ break;
+ case 0x80000005:
+ /* cache info (L1 cache) */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, 0, eax, ebx, ecx, edx);
+ break;
+ }
+ *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
+ (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
+ *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
+ (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
+ *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
+ (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
+ *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
+ (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
+ break;
+ case 0x80000006:
+ /* cache info (L2 cache) */
+ if (cpu->cache_info_passthrough) {
+ host_cpuid(index, 0, eax, ebx, ecx, edx);
+ break;
+ }
+ *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
+ (L2_DTLB_2M_ENTRIES << 16) | \
+ (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
+ (L2_ITLB_2M_ENTRIES);
+ *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
+ (L2_DTLB_4K_ENTRIES << 16) | \
+ (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
+ (L2_ITLB_4K_ENTRIES);
+ *ecx = (L2_SIZE_KB_AMD << 16) | \
+ (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
+ (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
+ if (!cpu->enable_l3_cache) {
+ *edx = ((L3_SIZE_KB / 512) << 18) | \
+ (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
+ (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
+ } else {
+ *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
+ (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
+ (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
+ }
+ break;
+ case 0x80000007:
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = env->features[FEAT_8000_0007_EDX];
+ break;
+ case 0x80000008:
+ /* virtual & phys address size in low 2 bytes. */
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ /* 64 bit processor, 48 bits virtual, configurable
+ * physical bits.
+ */
+ *eax = 0x00003000 + cpu->phys_bits;
+ } else {
+ *eax = cpu->phys_bits;
+ }
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ if (cs->nr_cores * cs->nr_threads > 1) {
+ *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
+ }
+ break;
+ case 0x8000000A:
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ *eax = 0x00000001; /* SVM Revision */
+ *ebx = 0x00000010; /* nr of ASIDs */
+ *ecx = 0;
+ *edx = env->features[FEAT_SVM]; /* optional features */
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ }
+ break;
+ case 0xC0000000:
+ *eax = env->cpuid_xlevel2;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ case 0xC0000001:
+ /* Support for VIA CPU's CPUID instruction */
+ *eax = env->cpuid_version;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = env->features[FEAT_C000_0001_EDX];
+ break;
+ case 0xC0000002:
+ case 0xC0000003:
+ case 0xC0000004:
+ /* Reserved for the future, and now filled with zero */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ default:
+ /* reserved values: zero */
+ *eax = 0;
+ *ebx = 0;
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
+}
+
+/* CPUClass::reset() */
+static void x86_cpu_reset(CPUState *s)
+{
+ X86CPU *cpu = X86_CPU(s);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
+ CPUX86State *env = &cpu->env;
+ target_ulong cr4;
+ uint64_t xcr0;
+ int i;
+
+ xcc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUX86State, end_reset_fields));
+
+ tlb_flush(s, 1);
+
+ env->old_exception = -1;
+
+ /* init to reset state */
+
+ env->hflags2 |= HF2_GIF_MASK;
+
+ cpu_x86_update_cr0(env, 0x60000010);
+ env->a20_mask = ~0x0;
+ env->smbase = 0x30000;
+
+ env->idt.limit = 0xffff;
+ env->gdt.limit = 0xffff;
+ env->ldt.limit = 0xffff;
+ env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
+ env->tr.limit = 0xffff;
+ env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
+
+ cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
+ DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK);
+
+ env->eip = 0xfff0;
+ env->regs[R_EDX] = env->cpuid_version;
+
+ env->eflags = 0x2;
+
+ /* FPU init */
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = 1;
+ }
+ cpu_set_fpuc(env, 0x37f);
+
+ env->mxcsr = 0x1f80;
+ /* All units are in INIT state. */
+ env->xstate_bv = 0;
+
+ env->pat = 0x0007040600070406ULL;
+ env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
+
+ memset(env->dr, 0, sizeof(env->dr));
+ env->dr[6] = DR6_FIXED_1;
+ env->dr[7] = DR7_FIXED_1;
+ cpu_breakpoint_remove_all(s, BP_CPU);
+ cpu_watchpoint_remove_all(s, BP_CPU);
+
+ cr4 = 0;
+ xcr0 = XSTATE_FP_MASK;
+
+#ifdef CONFIG_USER_ONLY
+ /* Enable all the features for user-mode. */
+ if (env->features[FEAT_1_EDX] & CPUID_SSE) {
+ xcr0 |= XSTATE_SSE_MASK;
+ }
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (env->features[esa->feature] & esa->bits) {
+ xcr0 |= 1ull << i;
+ }
+ }
+
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+ cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
+ }
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
+ cr4 |= CR4_FSGSBASE_MASK;
+ }
+#endif
+
+ env->xcr0 = xcr0;
+ cpu_x86_update_cr4(env, cr4);
+
+ /*
+ * SDM 11.11.5 requires:
+ * - IA32_MTRR_DEF_TYPE MSR.E = 0
+ * - IA32_MTRR_PHYSMASKn.V = 0
+ * All other bits are undefined. For simplification, zero it all.
+ */
+ env->mtrr_deftype = 0;
+ memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
+ memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
+
+#if !defined(CONFIG_USER_ONLY)
+ /* We hard-wire the BSP to the first CPU. */
+ apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
+
+ s->halted = !cpu_is_bsp(cpu);
+
+ if (kvm_enabled()) {
+ kvm_arch_reset_vcpu(cpu);
+ }
+#endif
+}
+
+#ifndef CONFIG_USER_ONLY
+bool cpu_is_bsp(X86CPU *cpu)
+{
+ return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
+}
+
+/* TODO: remove me, when reset over QOM tree is implemented */
+static void x86_cpu_machine_reset_cb(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ cpu_reset(CPU(cpu));
+}
+#endif
+
+static void mce_init(X86CPU *cpu)
+{
+ CPUX86State *cenv = &cpu->env;
+ unsigned int bank;
+
+ if (((cenv->cpuid_version >> 8) & 0xf) >= 6
+ && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
+ (CPUID_MCE | CPUID_MCA)) {
+ cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
+ (cpu->enable_lmce ? MCG_LMCE_P : 0);
+ cenv->mcg_ctl = ~(uint64_t)0;
+ for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
+ cenv->mce_banks[bank * 4] = ~(uint64_t)0;
+ }
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+APICCommonClass *apic_get_class(void)
+{
+ const char *apic_type = "apic";
+
+ if (kvm_apic_in_kernel()) {
+ apic_type = "kvm-apic";
+ } else if (xen_enabled()) {
+ apic_type = "xen-apic";
+ }
+
+ return APIC_COMMON_CLASS(object_class_by_name(apic_type));
+}
+
+static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
+{
+ APICCommonState *apic;
+ ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
+
+ cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
+
+ object_property_add_child(OBJECT(cpu), "lapic",
+ OBJECT(cpu->apic_state), &error_abort);
+ object_unref(OBJECT(cpu->apic_state));
+
+ qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
+ /* TODO: convert to link<> */
+ apic = APIC_COMMON(cpu->apic_state);
+ apic->cpu = cpu;
+ apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
+}
+
+static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
+{
+ APICCommonState *apic;
+ static bool apic_mmio_map_once;
+
+ if (cpu->apic_state == NULL) {
+ return;
+ }
+ object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
+ errp);
+
+ /* Map APIC MMIO area */
+ apic = APIC_COMMON(cpu->apic_state);
+ if (!apic_mmio_map_once) {
+ memory_region_add_subregion_overlap(get_system_memory(),
+ apic->apicbase &
+ MSR_IA32_APICBASE_BASE,
+ &apic->io_memory,
+ 0x1000);
+ apic_mmio_map_once = true;
+ }
+}
+
+static void x86_cpu_machine_done(Notifier *n, void *unused)
+{
+ X86CPU *cpu = container_of(n, X86CPU, machine_done);
+ MemoryRegion *smram =
+ (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+ if (smram) {
+ cpu->smram = g_new(MemoryRegion, 1);
+ memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
+ smram, 0, 1ull << 32);
+ memory_region_set_enabled(cpu->smram, false);
+ memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
+ }
+}
+#else
+static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
+{
+}
+#endif
+
+/* Note: Only safe for use on x86(-64) hosts */
+static uint32_t x86_host_phys_bits(void)
+{
+ uint32_t eax;
+ uint32_t host_phys_bits;
+
+ host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
+ if (eax >= 0x80000008) {
+ host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
+ /* Note: According to AMD doc 25481 rev 2.34 they have a field
+ * at 23:16 that can specify a maximum physical address bits for
+ * the guest that can override this value; but I've not seen
+ * anything with that set.
+ */
+ host_phys_bits = eax & 0xff;
+ } else {
+ /* It's an odd 64 bit machine that doesn't have the leaf for
+ * physical address bits; fall back to 36 that's most older
+ * Intel.
+ */
+ host_phys_bits = 36;
+ }
+
+ return host_phys_bits;
+}
+
+static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
+{
+ if (*min < value) {
+ *min = value;
+ }
+}
+
+/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
+static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWordInfo *fi = &feature_word_info[w];
+ uint32_t eax = fi->cpuid_eax;
+ uint32_t region = eax & 0xF0000000;
+
+ if (!env->features[w]) {
+ return;
+ }
+
+ switch (region) {
+ case 0x00000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
+ break;
+ case 0x80000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
+ break;
+ case 0xC0000000:
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
+ break;
+ }
+}
+
+/* Calculate XSAVE components based on the configured CPU feature flags */
+static void x86_cpu_enable_xsave_components(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int i;
+ uint64_t mask;
+
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
+ return;
+ }
+
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if (env->features[esa->feature] & esa->bits) {
+ mask |= (1ULL << i);
+ }
+ }
+
+ env->features[FEAT_XSAVE_COMP_LO] = mask;
+ env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
+}
+
+/* Load CPUID data based on configured features */
+static void x86_cpu_load_features(X86CPU *cpu, Error **errp)
+{
+ CPUX86State *env = &cpu->env;
+ FeatureWord w;
+ GList *l;
+ Error *local_err = NULL;
+
+ /*TODO: cpu->host_features incorrectly overwrites features
+ * set using "feat=on|off". Once we fix this, we can convert
+ * plus_features & minus_features to global properties
+ * inside x86_cpu_parse_featurestr() too.
+ */
+ if (cpu->host_features) {
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ env->features[w] =
+ x86_cpu_get_supported_feature_word(w, cpu->migratable);
+ }
+ }
+
+ for (l = plus_features; l; l = l->next) {
+ const char *prop = l->data;
+ object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ }
+
+ for (l = minus_features; l; l = l->next) {
+ const char *prop = l->data;
+ object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
+ if (local_err) {
+ goto out;
+ }
+ }
+
+ if (!kvm_enabled() || !cpu->expose_kvm) {
+ env->features[FEAT_KVM] = 0;
+ }
+
+ x86_cpu_enable_xsave_components(cpu);
+
+ /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
+ if (cpu->full_cpuid_auto_level) {
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
+ x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
+ x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
+ /* SVM requires CPUID[0x8000000A] */
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
+ }
+ }
+
+ /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
+ if (env->cpuid_level == UINT32_MAX) {
+ env->cpuid_level = env->cpuid_min_level;
+ }
+ if (env->cpuid_xlevel == UINT32_MAX) {
+ env->cpuid_xlevel = env->cpuid_min_xlevel;
+ }
+ if (env->cpuid_xlevel2 == UINT32_MAX) {
+ env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
+ }
+
+out:
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ }
+}
+
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ X86CPU *cpu = X86_CPU(dev);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
+ CPUX86State *env = &cpu->env;
+ Error *local_err = NULL;
+ static bool ht_warned;
+
+ if (xcc->kvm_required && !kvm_enabled()) {
+ char *name = x86_cpu_class_get_model_name(xcc);
+ error_setg(&local_err, "CPU model '%s' requires KVM", name);
+ g_free(name);
+ goto out;
+ }
+
+ if (cpu->apic_id == UNASSIGNED_APIC_ID) {
+ error_setg(errp, "apic-id property was not initialized properly");
+ return;
+ }
+
+ x86_cpu_load_features(cpu, &local_err);
+ if (local_err) {
+ goto out;
+ }
+
+ if (x86_cpu_filter_features(cpu) &&
+ (cpu->check_cpuid || cpu->enforce_cpuid)) {
+ x86_cpu_report_filtered_features(cpu);
+ if (cpu->enforce_cpuid) {
+ error_setg(&local_err,
+ kvm_enabled() ?
+ "Host doesn't support requested features" :
+ "TCG doesn't support requested features");
+ goto out;
+ }
+ }
+
+ /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
+ * CPUID[1].EDX.
+ */
+ if (IS_AMD_CPU(env)) {
+ env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
+ env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
+ & CPUID_EXT2_AMD_ALIASES);
+ }
+
+ /* For 64bit systems think about the number of physical bits to present.
+ * ideally this should be the same as the host; anything other than matching
+ * the host can cause incorrect guest behaviour.
+ * QEMU used to pick the magic value of 40 bits that corresponds to
+ * consumer AMD devices but nothing else.
+ */
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ if (kvm_enabled()) {
+ uint32_t host_phys_bits = x86_host_phys_bits();
+ static bool warned;
+
+ if (cpu->host_phys_bits) {
+ /* The user asked for us to use the host physical bits */
+ cpu->phys_bits = host_phys_bits;
+ }
+
+ /* Print a warning if the user set it to a value that's not the
+ * host value.
+ */
+ if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
+ !warned) {
+ error_report("Warning: Host physical bits (%u)"
+ " does not match phys-bits property (%u)",
+ host_phys_bits, cpu->phys_bits);
+ warned = true;
+ }
+
+ if (cpu->phys_bits &&
+ (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
+ cpu->phys_bits < 32)) {
+ error_setg(errp, "phys-bits should be between 32 and %u "
+ " (but is %u)",
+ TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
+ return;
+ }
+ } else {
+ if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
+ error_setg(errp, "TCG only supports phys-bits=%u",
+ TCG_PHYS_ADDR_BITS);
+ return;
+ }
+ }
+ /* 0 means it was not explicitly set by the user (or by machine
+ * compat_props or by the host code above). In this case, the default
+ * is the value used by TCG (40).
+ */
+ if (cpu->phys_bits == 0) {
+ cpu->phys_bits = TCG_PHYS_ADDR_BITS;
+ }
+ } else {
+ /* For 32 bit systems don't use the user set value, but keep
+ * phys_bits consistent with what we tell the guest.
+ */
+ if (cpu->phys_bits != 0) {
+ error_setg(errp, "phys-bits is not user-configurable in 32 bit");
+ return;
+ }
+
+ if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
+ cpu->phys_bits = 36;
+ } else {
+ cpu->phys_bits = 32;
+ }
+ }
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (tcg_enabled()) {
+ tcg_x86_init();
+ }
+
+#ifndef CONFIG_USER_ONLY
+ qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
+
+ if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
+ x86_cpu_apic_create(cpu, &local_err);
+ if (local_err != NULL) {
+ goto out;
+ }
+ }
+#endif
+
+ mce_init(cpu);
+
+#ifndef CONFIG_USER_ONLY
+ if (tcg_enabled()) {
+ AddressSpace *newas = g_new(AddressSpace, 1);
+
+ cpu->cpu_as_mem = g_new(MemoryRegion, 1);
+ cpu->cpu_as_root = g_new(MemoryRegion, 1);
+
+ /* Outer container... */
+ memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
+ memory_region_set_enabled(cpu->cpu_as_root, true);
+
+ /* ... with two regions inside: normal system memory with low
+ * priority, and...
+ */
+ memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
+ get_system_memory(), 0, ~0ull);
+ memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
+ memory_region_set_enabled(cpu->cpu_as_mem, true);
+ address_space_init(newas, cpu->cpu_as_root, "CPU");
+ cs->num_ases = 1;
+ cpu_address_space_init(cs, newas, 0);
+
+ /* ... SMRAM with higher priority, linked from /machine/smram. */
+ cpu->machine_done.notify = x86_cpu_machine_done;
+ qemu_add_machine_init_done_notifier(&cpu->machine_done);
+ }
+#endif
+
+ qemu_init_vcpu(cs);
+
+ /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
+ * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
+ * based on inputs (sockets,cores,threads), it is still better to gives
+ * users a warning.
+ *
+ * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
+ * cs->nr_threads hasn't be populated yet and the checking is incorrect.
+ */
+ if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
+ error_report("AMD CPU doesn't support hyperthreading. Please configure"
+ " -smp options properly.");
+ ht_warned = true;
+ }
+
+ x86_cpu_apic_realize(cpu, &local_err);
+ if (local_err != NULL) {
+ goto out;
+ }
+ cpu_reset(cs);
+
+ xcc->parent_realize(dev, &local_err);
+
+out:
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+}
+
+static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
+{
+ X86CPU *cpu = X86_CPU(dev);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+#ifndef CONFIG_USER_ONLY
+ cpu_remove_sync(CPU(dev));
+ qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
+#endif
+
+ if (cpu->apic_state) {
+ object_unparent(OBJECT(cpu->apic_state));
+ cpu->apic_state = NULL;
+ }
+
+ xcc->parent_unrealize(dev, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+}
+
+typedef struct BitProperty {
+ uint32_t *ptr;
+ uint32_t mask;
+} BitProperty;
+
+static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ BitProperty *fp = opaque;
+ bool value = (*fp->ptr & fp->mask) == fp->mask;
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ BitProperty *fp = opaque;
+ Error *local_err = NULL;
+ bool value;
+
+ if (dev->realized) {
+ qdev_prop_set_after_realize(dev, name, errp);
+ return;
+ }
+
+ visit_type_bool(v, name, &value, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (value) {
+ *fp->ptr |= fp->mask;
+ } else {
+ *fp->ptr &= ~fp->mask;
+ }
+}
+
+static void x86_cpu_release_bit_prop(Object *obj, const char *name,
+ void *opaque)
+{
+ BitProperty *prop = opaque;
+ g_free(prop);
+}
+
+/* Register a boolean property to get/set a single bit in a uint32_t field.
+ *
+ * The same property name can be registered multiple times to make it affect
+ * multiple bits in the same FeatureWord. In that case, the getter will return
+ * true only if all bits are set.
+ */
+static void x86_cpu_register_bit_prop(X86CPU *cpu,
+ const char *prop_name,
+ uint32_t *field,
+ int bitnr)
+{
+ BitProperty *fp;
+ ObjectProperty *op;
+ uint32_t mask = (1UL << bitnr);
+
+ op = object_property_find(OBJECT(cpu), prop_name, NULL);
+ if (op) {
+ fp = op->opaque;
+ assert(fp->ptr == field);
+ fp->mask |= mask;
+ } else {
+ fp = g_new0(BitProperty, 1);
+ fp->ptr = field;
+ fp->mask = mask;
+ object_property_add(OBJECT(cpu), prop_name, "bool",
+ x86_cpu_get_bit_prop,
+ x86_cpu_set_bit_prop,
+ x86_cpu_release_bit_prop, fp, &error_abort);
+ }
+}
+
+static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
+ FeatureWord w,
+ int bitnr)
+{
+ FeatureWordInfo *fi = &feature_word_info[w];
+ const char *name = fi->feat_names[bitnr];
+
+ if (!name) {
+ return;
+ }
+
+ /* Property names should use "-" instead of "_".
+ * Old names containing underscores are registered as aliases
+ * using object_property_add_alias()
+ */
+ assert(!strchr(name, '_'));
+ /* aliases don't use "|" delimiters anymore, they are registered
+ * manually using object_property_add_alias() */
+ assert(!strchr(name, '|'));
+ x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
+}
+
+static void x86_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ X86CPU *cpu = X86_CPU(obj);
+ X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
+ CPUX86State *env = &cpu->env;
+ FeatureWord w;
+
+ cs->env_ptr = env;
+
+ object_property_add(obj, "family", "int",
+ x86_cpuid_version_get_family,
+ x86_cpuid_version_set_family, NULL, NULL, NULL);
+ object_property_add(obj, "model", "int",
+ x86_cpuid_version_get_model,
+ x86_cpuid_version_set_model, NULL, NULL, NULL);
+ object_property_add(obj, "stepping", "int",
+ x86_cpuid_version_get_stepping,
+ x86_cpuid_version_set_stepping, NULL, NULL, NULL);
+ object_property_add_str(obj, "vendor",
+ x86_cpuid_get_vendor,
+ x86_cpuid_set_vendor, NULL);
+ object_property_add_str(obj, "model-id",
+ x86_cpuid_get_model_id,
+ x86_cpuid_set_model_id, NULL);
+ object_property_add(obj, "tsc-frequency", "int",
+ x86_cpuid_get_tsc_freq,
+ x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
+ object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
+ x86_cpu_get_feature_words,
+ NULL, NULL, (void *)env->features, NULL);
+ object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
+ x86_cpu_get_feature_words,
+ NULL, NULL, (void *)cpu->filtered_features, NULL);
+
+ cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ int bitnr;
+
+ for (bitnr = 0; bitnr < 32; bitnr++) {
+ x86_cpu_register_feature_bit_props(cpu, w, bitnr);
+ }
+ }
+
+ object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
+ object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
+ object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
+ object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
+ object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
+ object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
+ object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
+
+ object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
+ object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
+ object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
+ object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
+ object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
+ object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
+ object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
+ object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
+ object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
+ object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
+ object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
+ object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
+ object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
+ object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
+ object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
+ object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
+ object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
+ object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
+ object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
+ object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
+ object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
+
+ x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
+}
+
+static int64_t x86_cpu_get_arch_id(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu->apic_id;
+}
+
+static bool x86_cpu_get_paging_enabled(const CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ return cpu->env.cr[0] & CR0_PG_MASK;
+}
+
+static void x86_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ cpu->env.eip = value;
+}
+
+static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ X86CPU *cpu = X86_CPU(cs);
+
+ cpu->env.eip = tb->pc - tb->cs_base;
+}
+
+static bool x86_cpu_has_work(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_POLL)) &&
+ (env->eflags & IF_MASK)) ||
+ (cs->interrupt_request & (CPU_INTERRUPT_NMI |
+ CPU_INTERRUPT_INIT |
+ CPU_INTERRUPT_SIPI |
+ CPU_INTERRUPT_MCE)) ||
+ ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK));
+}
+
+static Property x86_cpu_properties[] = {
+#ifdef CONFIG_USER_ONLY
+ /* apic_id = 0 by default for *-user, see commit 9886e834 */
+ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
+ DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
+ DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
+ DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
+#else
+ DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
+ DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
+ DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
+ DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
+#endif
+ DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
+ { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
+ DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
+ DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
+ DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
+ DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
+ DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
+ DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
+ DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
+ DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
+ DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
+ DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
+ DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
+ DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
+ DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
+ DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
+ DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
+ DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
+ DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
+ DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
+ DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
+ DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
+ DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
+ DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
+ DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
+ DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
+ DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
+ DEFINE_PROP_END_OF_LIST()
+};
+
+static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
+{
+ X86CPUClass *xcc = X86_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ xcc->parent_realize = dc->realize;
+ xcc->parent_unrealize = dc->unrealize;
+ dc->realize = x86_cpu_realizefn;
+ dc->unrealize = x86_cpu_unrealizefn;
+ dc->props = x86_cpu_properties;
+
+ xcc->parent_reset = cc->reset;
+ cc->reset = x86_cpu_reset;
+ cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
+
+ cc->class_by_name = x86_cpu_class_by_name;
+ cc->parse_features = x86_cpu_parse_featurestr;
+ cc->has_work = x86_cpu_has_work;
+ cc->do_interrupt = x86_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
+ cc->dump_state = x86_cpu_dump_state;
+ cc->set_pc = x86_cpu_set_pc;
+ cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
+ cc->gdb_read_register = x86_cpu_gdb_read_register;
+ cc->gdb_write_register = x86_cpu_gdb_write_register;
+ cc->get_arch_id = x86_cpu_get_arch_id;
+ cc->get_paging_enabled = x86_cpu_get_paging_enabled;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
+#else
+ cc->get_memory_mapping = x86_cpu_get_memory_mapping;
+ cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
+ cc->write_elf64_note = x86_cpu_write_elf64_note;
+ cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
+ cc->write_elf32_note = x86_cpu_write_elf32_note;
+ cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
+ cc->vmsd = &vmstate_x86_cpu;
+#endif
+ /* CPU_NB_REGS * 2 = general regs + xmm regs
+ * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
+ */
+ cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
+#ifndef CONFIG_USER_ONLY
+ cc->debug_excp_handler = breakpoint_handler;
+#endif
+ cc->cpu_exec_enter = x86_cpu_exec_enter;
+ cc->cpu_exec_exit = x86_cpu_exec_exit;
+
+ dc->cannot_instantiate_with_device_add_yet = false;
+}
+
+static const TypeInfo x86_cpu_type_info = {
+ .name = TYPE_X86_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(X86CPU),
+ .instance_init = x86_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(X86CPUClass),
+ .class_init = x86_cpu_common_class_init,
+};
+
+static void x86_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&x86_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
+ x86_register_cpudef_type(&builtin_x86_defs[i]);
+ }
+#ifdef CONFIG_KVM
+ type_register_static(&host_x86_cpu_type_info);
+#endif
+}
+
+type_init(x86_cpu_register_types)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
new file mode 100644
index 0000000000..c605724022
--- /dev/null
+++ b/target/i386/cpu.h
@@ -0,0 +1,1656 @@
+/*
+ * i386 virtual CPU header
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef I386_CPU_H
+#define I386_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "standard-headers/asm-x86/hyperv.h"
+
+#ifdef TARGET_X86_64
+#define TARGET_LONG_BITS 64
+#else
+#define TARGET_LONG_BITS 32
+#endif
+
+/* Maximum instruction code size */
+#define TARGET_MAX_INSN_SIZE 16
+
+/* support for self modifying code even if the modified instruction is
+ close to the modifying instruction */
+#define TARGET_HAS_PRECISE_SMC
+
+#ifdef TARGET_X86_64
+#define I386_ELF_MACHINE EM_X86_64
+#define ELF_MACHINE_UNAME "x86_64"
+#else
+#define I386_ELF_MACHINE EM_386
+#define ELF_MACHINE_UNAME "i686"
+#endif
+
+#define CPUArchState struct CPUX86State
+
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+#define R_EAX 0
+#define R_ECX 1
+#define R_EDX 2
+#define R_EBX 3
+#define R_ESP 4
+#define R_EBP 5
+#define R_ESI 6
+#define R_EDI 7
+
+#define R_AL 0
+#define R_CL 1
+#define R_DL 2
+#define R_BL 3
+#define R_AH 4
+#define R_CH 5
+#define R_DH 6
+#define R_BH 7
+
+#define R_ES 0
+#define R_CS 1
+#define R_SS 2
+#define R_DS 3
+#define R_FS 4
+#define R_GS 5
+
+/* segment descriptor fields */
+#define DESC_G_MASK (1 << 23)
+#define DESC_B_SHIFT 22
+#define DESC_B_MASK (1 << DESC_B_SHIFT)
+#define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
+#define DESC_L_MASK (1 << DESC_L_SHIFT)
+#define DESC_AVL_MASK (1 << 20)
+#define DESC_P_MASK (1 << 15)
+#define DESC_DPL_SHIFT 13
+#define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
+#define DESC_S_MASK (1 << 12)
+#define DESC_TYPE_SHIFT 8
+#define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
+#define DESC_A_MASK (1 << 8)
+
+#define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
+#define DESC_C_MASK (1 << 10) /* code: conforming */
+#define DESC_R_MASK (1 << 9) /* code: readable */
+
+#define DESC_E_MASK (1 << 10) /* data: expansion direction */
+#define DESC_W_MASK (1 << 9) /* data: writable */
+
+#define DESC_TSS_BUSY_MASK (1 << 9)
+
+/* eflags masks */
+#define CC_C 0x0001
+#define CC_P 0x0004
+#define CC_A 0x0010
+#define CC_Z 0x0040
+#define CC_S 0x0080
+#define CC_O 0x0800
+
+#define TF_SHIFT 8
+#define IOPL_SHIFT 12
+#define VM_SHIFT 17
+
+#define TF_MASK 0x00000100
+#define IF_MASK 0x00000200
+#define DF_MASK 0x00000400
+#define IOPL_MASK 0x00003000
+#define NT_MASK 0x00004000
+#define RF_MASK 0x00010000
+#define VM_MASK 0x00020000
+#define AC_MASK 0x00040000
+#define VIF_MASK 0x00080000
+#define VIP_MASK 0x00100000
+#define ID_MASK 0x00200000
+
+/* hidden flags - used internally by qemu to represent additional cpu
+ states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
+ avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
+ positions to ease oring with eflags. */
+/* current cpl */
+#define HF_CPL_SHIFT 0
+/* true if hardware interrupts must be disabled for next instruction */
+#define HF_INHIBIT_IRQ_SHIFT 3
+/* 16 or 32 segments */
+#define HF_CS32_SHIFT 4
+#define HF_SS32_SHIFT 5
+/* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
+#define HF_ADDSEG_SHIFT 6
+/* copy of CR0.PE (protected mode) */
+#define HF_PE_SHIFT 7
+#define HF_TF_SHIFT 8 /* must be same as eflags */
+#define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
+#define HF_EM_SHIFT 10
+#define HF_TS_SHIFT 11
+#define HF_IOPL_SHIFT 12 /* must be same as eflags */
+#define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
+#define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
+#define HF_RF_SHIFT 16 /* must be same as eflags */
+#define HF_VM_SHIFT 17 /* must be same as eflags */
+#define HF_AC_SHIFT 18 /* must be same as eflags */
+#define HF_SMM_SHIFT 19 /* CPU in SMM mode */
+#define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
+#define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
+#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
+#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
+#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
+#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
+
+#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
+#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
+#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
+#define HF_SS32_MASK (1 << HF_SS32_SHIFT)
+#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
+#define HF_PE_MASK (1 << HF_PE_SHIFT)
+#define HF_TF_MASK (1 << HF_TF_SHIFT)
+#define HF_MP_MASK (1 << HF_MP_SHIFT)
+#define HF_EM_MASK (1 << HF_EM_SHIFT)
+#define HF_TS_MASK (1 << HF_TS_SHIFT)
+#define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
+#define HF_LMA_MASK (1 << HF_LMA_SHIFT)
+#define HF_CS64_MASK (1 << HF_CS64_SHIFT)
+#define HF_RF_MASK (1 << HF_RF_SHIFT)
+#define HF_VM_MASK (1 << HF_VM_SHIFT)
+#define HF_AC_MASK (1 << HF_AC_SHIFT)
+#define HF_SMM_MASK (1 << HF_SMM_SHIFT)
+#define HF_SVME_MASK (1 << HF_SVME_SHIFT)
+#define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
+#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
+#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
+#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
+#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
+
+/* hflags2 */
+
+#define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
+#define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
+#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
+#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
+#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
+#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
+
+#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
+#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
+#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
+#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
+#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
+#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
+
+#define CR0_PE_SHIFT 0
+#define CR0_MP_SHIFT 1
+
+#define CR0_PE_MASK (1U << 0)
+#define CR0_MP_MASK (1U << 1)
+#define CR0_EM_MASK (1U << 2)
+#define CR0_TS_MASK (1U << 3)
+#define CR0_ET_MASK (1U << 4)
+#define CR0_NE_MASK (1U << 5)
+#define CR0_WP_MASK (1U << 16)
+#define CR0_AM_MASK (1U << 18)
+#define CR0_PG_MASK (1U << 31)
+
+#define CR4_VME_MASK (1U << 0)
+#define CR4_PVI_MASK (1U << 1)
+#define CR4_TSD_MASK (1U << 2)
+#define CR4_DE_MASK (1U << 3)
+#define CR4_PSE_MASK (1U << 4)
+#define CR4_PAE_MASK (1U << 5)
+#define CR4_MCE_MASK (1U << 6)
+#define CR4_PGE_MASK (1U << 7)
+#define CR4_PCE_MASK (1U << 8)
+#define CR4_OSFXSR_SHIFT 9
+#define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
+#define CR4_OSXMMEXCPT_MASK (1U << 10)
+#define CR4_VMXE_MASK (1U << 13)
+#define CR4_SMXE_MASK (1U << 14)
+#define CR4_FSGSBASE_MASK (1U << 16)
+#define CR4_PCIDE_MASK (1U << 17)
+#define CR4_OSXSAVE_MASK (1U << 18)
+#define CR4_SMEP_MASK (1U << 20)
+#define CR4_SMAP_MASK (1U << 21)
+#define CR4_PKE_MASK (1U << 22)
+
+#define DR6_BD (1 << 13)
+#define DR6_BS (1 << 14)
+#define DR6_BT (1 << 15)
+#define DR6_FIXED_1 0xffff0ff0
+
+#define DR7_GD (1 << 13)
+#define DR7_TYPE_SHIFT 16
+#define DR7_LEN_SHIFT 18
+#define DR7_FIXED_1 0x00000400
+#define DR7_GLOBAL_BP_MASK 0xaa
+#define DR7_LOCAL_BP_MASK 0x55
+#define DR7_MAX_BP 4
+#define DR7_TYPE_BP_INST 0x0
+#define DR7_TYPE_DATA_WR 0x1
+#define DR7_TYPE_IO_RW 0x2
+#define DR7_TYPE_DATA_RW 0x3
+
+#define PG_PRESENT_BIT 0
+#define PG_RW_BIT 1
+#define PG_USER_BIT 2
+#define PG_PWT_BIT 3
+#define PG_PCD_BIT 4
+#define PG_ACCESSED_BIT 5
+#define PG_DIRTY_BIT 6
+#define PG_PSE_BIT 7
+#define PG_GLOBAL_BIT 8
+#define PG_PSE_PAT_BIT 12
+#define PG_PKRU_BIT 59
+#define PG_NX_BIT 63
+
+#define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
+#define PG_RW_MASK (1 << PG_RW_BIT)
+#define PG_USER_MASK (1 << PG_USER_BIT)
+#define PG_PWT_MASK (1 << PG_PWT_BIT)
+#define PG_PCD_MASK (1 << PG_PCD_BIT)
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
+#define PG_PSE_MASK (1 << PG_PSE_BIT)
+#define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
+#define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
+#define PG_ADDRESS_MASK 0x000ffffffffff000LL
+#define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
+#define PG_HI_USER_MASK 0x7ff0000000000000LL
+#define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
+#define PG_NX_MASK (1ULL << PG_NX_BIT)
+
+#define PG_ERROR_W_BIT 1
+
+#define PG_ERROR_P_MASK 0x01
+#define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
+#define PG_ERROR_U_MASK 0x04
+#define PG_ERROR_RSVD_MASK 0x08
+#define PG_ERROR_I_D_MASK 0x10
+#define PG_ERROR_PK_MASK 0x20
+
+#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
+#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
+#define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
+
+#define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
+#define MCE_BANKS_DEF 10
+
+#define MCG_CAP_BANKS_MASK 0xff
+
+#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
+#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
+#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
+#define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
+
+#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
+
+#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
+#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
+#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
+#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
+#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
+#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
+#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
+#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
+#define MCI_STATUS_AR (1ULL<<55) /* Action required */
+
+/* MISC register defines */
+#define MCM_ADDR_SEGOFF 0 /* segment offset */
+#define MCM_ADDR_LINEAR 1 /* linear address */
+#define MCM_ADDR_PHYS 2 /* physical address */
+#define MCM_ADDR_MEM 3 /* memory address */
+#define MCM_ADDR_GENERIC 7 /* generic */
+
+#define MSR_IA32_TSC 0x10
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_EXTD (1 << 10)
+#define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
+#define MSR_IA32_FEATURE_CONTROL 0x0000003a
+#define MSR_TSC_ADJUST 0x0000003b
+#define MSR_IA32_TSCDEADLINE 0x6e0
+
+#define FEATURE_CONTROL_LOCKED (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
+#define FEATURE_CONTROL_LMCE (1<<20)
+
+#define MSR_P6_PERFCTR0 0xc1
+
+#define MSR_IA32_SMBASE 0x9e
+#define MSR_MTRRcap 0xfe
+#define MSR_MTRRcap_VCNT 8
+#define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
+#define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
+
+#define MSR_IA32_SYSENTER_CS 0x174
+#define MSR_IA32_SYSENTER_ESP 0x175
+#define MSR_IA32_SYSENTER_EIP 0x176
+
+#define MSR_MCG_CAP 0x179
+#define MSR_MCG_STATUS 0x17a
+#define MSR_MCG_CTL 0x17b
+#define MSR_MCG_EXT_CTL 0x4d0
+
+#define MSR_P6_EVNTSEL0 0x186
+
+#define MSR_IA32_PERF_STATUS 0x198
+
+#define MSR_IA32_MISC_ENABLE 0x1a0
+/* Indicates good rep/movs microcode on some processors: */
+#define MSR_IA32_MISC_ENABLE_DEFAULT 1
+
+#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
+#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
+
+#define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
+
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix16K_A0000 0x259
+#define MSR_MTRRfix4K_C0000 0x268
+#define MSR_MTRRfix4K_C8000 0x269
+#define MSR_MTRRfix4K_D0000 0x26a
+#define MSR_MTRRfix4K_D8000 0x26b
+#define MSR_MTRRfix4K_E0000 0x26c
+#define MSR_MTRRfix4K_E8000 0x26d
+#define MSR_MTRRfix4K_F0000 0x26e
+#define MSR_MTRRfix4K_F8000 0x26f
+
+#define MSR_PAT 0x277
+
+#define MSR_MTRRdefType 0x2ff
+
+#define MSR_CORE_PERF_FIXED_CTR0 0x309
+#define MSR_CORE_PERF_FIXED_CTR1 0x30a
+#define MSR_CORE_PERF_FIXED_CTR2 0x30b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
+#define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
+
+#define MSR_MC0_CTL 0x400
+#define MSR_MC0_STATUS 0x401
+#define MSR_MC0_ADDR 0x402
+#define MSR_MC0_MISC 0x403
+
+#define MSR_EFER 0xc0000080
+
+#define MSR_EFER_SCE (1 << 0)
+#define MSR_EFER_LME (1 << 8)
+#define MSR_EFER_LMA (1 << 10)
+#define MSR_EFER_NXE (1 << 11)
+#define MSR_EFER_SVME (1 << 12)
+#define MSR_EFER_FFXSR (1 << 14)
+
+#define MSR_STAR 0xc0000081
+#define MSR_LSTAR 0xc0000082
+#define MSR_CSTAR 0xc0000083
+#define MSR_FMASK 0xc0000084
+#define MSR_FSBASE 0xc0000100
+#define MSR_GSBASE 0xc0000101
+#define MSR_KERNELGSBASE 0xc0000102
+#define MSR_TSC_AUX 0xc0000103
+
+#define MSR_VM_HSAVE_PA 0xc0010117
+
+#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_XSS 0x00000da0
+
+#define XSTATE_FP_BIT 0
+#define XSTATE_SSE_BIT 1
+#define XSTATE_YMM_BIT 2
+#define XSTATE_BNDREGS_BIT 3
+#define XSTATE_BNDCSR_BIT 4
+#define XSTATE_OPMASK_BIT 5
+#define XSTATE_ZMM_Hi256_BIT 6
+#define XSTATE_Hi16_ZMM_BIT 7
+#define XSTATE_PKRU_BIT 9
+
+#define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
+#define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
+#define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
+#define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
+#define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
+#define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
+#define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
+#define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
+#define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
+
+/* CPUID feature words */
+typedef enum FeatureWord {
+ FEAT_1_EDX, /* CPUID[1].EDX */
+ FEAT_1_ECX, /* CPUID[1].ECX */
+ FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
+ FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
+ FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
+ FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
+ FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
+ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
+ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
+ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
+ FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */
+ FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */
+ FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */
+ FEAT_SVM, /* CPUID[8000_000A].EDX */
+ FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
+ FEAT_6_EAX, /* CPUID[6].EAX */
+ FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
+ FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
+ FEATURE_WORDS,
+} FeatureWord;
+
+typedef uint32_t FeatureWordArray[FEATURE_WORDS];
+
+/* cpuid_features bits */
+#define CPUID_FP87 (1U << 0)
+#define CPUID_VME (1U << 1)
+#define CPUID_DE (1U << 2)
+#define CPUID_PSE (1U << 3)
+#define CPUID_TSC (1U << 4)
+#define CPUID_MSR (1U << 5)
+#define CPUID_PAE (1U << 6)
+#define CPUID_MCE (1U << 7)
+#define CPUID_CX8 (1U << 8)
+#define CPUID_APIC (1U << 9)
+#define CPUID_SEP (1U << 11) /* sysenter/sysexit */
+#define CPUID_MTRR (1U << 12)
+#define CPUID_PGE (1U << 13)
+#define CPUID_MCA (1U << 14)
+#define CPUID_CMOV (1U << 15)
+#define CPUID_PAT (1U << 16)
+#define CPUID_PSE36 (1U << 17)
+#define CPUID_PN (1U << 18)
+#define CPUID_CLFLUSH (1U << 19)
+#define CPUID_DTS (1U << 21)
+#define CPUID_ACPI (1U << 22)
+#define CPUID_MMX (1U << 23)
+#define CPUID_FXSR (1U << 24)
+#define CPUID_SSE (1U << 25)
+#define CPUID_SSE2 (1U << 26)
+#define CPUID_SS (1U << 27)
+#define CPUID_HT (1U << 28)
+#define CPUID_TM (1U << 29)
+#define CPUID_IA64 (1U << 30)
+#define CPUID_PBE (1U << 31)
+
+#define CPUID_EXT_SSE3 (1U << 0)
+#define CPUID_EXT_PCLMULQDQ (1U << 1)
+#define CPUID_EXT_DTES64 (1U << 2)
+#define CPUID_EXT_MONITOR (1U << 3)
+#define CPUID_EXT_DSCPL (1U << 4)
+#define CPUID_EXT_VMX (1U << 5)
+#define CPUID_EXT_SMX (1U << 6)
+#define CPUID_EXT_EST (1U << 7)
+#define CPUID_EXT_TM2 (1U << 8)
+#define CPUID_EXT_SSSE3 (1U << 9)
+#define CPUID_EXT_CID (1U << 10)
+#define CPUID_EXT_FMA (1U << 12)
+#define CPUID_EXT_CX16 (1U << 13)
+#define CPUID_EXT_XTPR (1U << 14)
+#define CPUID_EXT_PDCM (1U << 15)
+#define CPUID_EXT_PCID (1U << 17)
+#define CPUID_EXT_DCA (1U << 18)
+#define CPUID_EXT_SSE41 (1U << 19)
+#define CPUID_EXT_SSE42 (1U << 20)
+#define CPUID_EXT_X2APIC (1U << 21)
+#define CPUID_EXT_MOVBE (1U << 22)
+#define CPUID_EXT_POPCNT (1U << 23)
+#define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
+#define CPUID_EXT_AES (1U << 25)
+#define CPUID_EXT_XSAVE (1U << 26)
+#define CPUID_EXT_OSXSAVE (1U << 27)
+#define CPUID_EXT_AVX (1U << 28)
+#define CPUID_EXT_F16C (1U << 29)
+#define CPUID_EXT_RDRAND (1U << 30)
+#define CPUID_EXT_HYPERVISOR (1U << 31)
+
+#define CPUID_EXT2_FPU (1U << 0)
+#define CPUID_EXT2_VME (1U << 1)
+#define CPUID_EXT2_DE (1U << 2)
+#define CPUID_EXT2_PSE (1U << 3)
+#define CPUID_EXT2_TSC (1U << 4)
+#define CPUID_EXT2_MSR (1U << 5)
+#define CPUID_EXT2_PAE (1U << 6)
+#define CPUID_EXT2_MCE (1U << 7)
+#define CPUID_EXT2_CX8 (1U << 8)
+#define CPUID_EXT2_APIC (1U << 9)
+#define CPUID_EXT2_SYSCALL (1U << 11)
+#define CPUID_EXT2_MTRR (1U << 12)
+#define CPUID_EXT2_PGE (1U << 13)
+#define CPUID_EXT2_MCA (1U << 14)
+#define CPUID_EXT2_CMOV (1U << 15)
+#define CPUID_EXT2_PAT (1U << 16)
+#define CPUID_EXT2_PSE36 (1U << 17)
+#define CPUID_EXT2_MP (1U << 19)
+#define CPUID_EXT2_NX (1U << 20)
+#define CPUID_EXT2_MMXEXT (1U << 22)
+#define CPUID_EXT2_MMX (1U << 23)
+#define CPUID_EXT2_FXSR (1U << 24)
+#define CPUID_EXT2_FFXSR (1U << 25)
+#define CPUID_EXT2_PDPE1GB (1U << 26)
+#define CPUID_EXT2_RDTSCP (1U << 27)
+#define CPUID_EXT2_LM (1U << 29)
+#define CPUID_EXT2_3DNOWEXT (1U << 30)
+#define CPUID_EXT2_3DNOW (1U << 31)
+
+/* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
+#define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
+ CPUID_EXT2_DE | CPUID_EXT2_PSE | \
+ CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
+ CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
+ CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
+ CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
+ CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
+ CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
+ CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
+
+#define CPUID_EXT3_LAHF_LM (1U << 0)
+#define CPUID_EXT3_CMP_LEG (1U << 1)
+#define CPUID_EXT3_SVM (1U << 2)
+#define CPUID_EXT3_EXTAPIC (1U << 3)
+#define CPUID_EXT3_CR8LEG (1U << 4)
+#define CPUID_EXT3_ABM (1U << 5)
+#define CPUID_EXT3_SSE4A (1U << 6)
+#define CPUID_EXT3_MISALIGNSSE (1U << 7)
+#define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
+#define CPUID_EXT3_OSVW (1U << 9)
+#define CPUID_EXT3_IBS (1U << 10)
+#define CPUID_EXT3_XOP (1U << 11)
+#define CPUID_EXT3_SKINIT (1U << 12)
+#define CPUID_EXT3_WDT (1U << 13)
+#define CPUID_EXT3_LWP (1U << 15)
+#define CPUID_EXT3_FMA4 (1U << 16)
+#define CPUID_EXT3_TCE (1U << 17)
+#define CPUID_EXT3_NODEID (1U << 19)
+#define CPUID_EXT3_TBM (1U << 21)
+#define CPUID_EXT3_TOPOEXT (1U << 22)
+#define CPUID_EXT3_PERFCORE (1U << 23)
+#define CPUID_EXT3_PERFNB (1U << 24)
+
+#define CPUID_SVM_NPT (1U << 0)
+#define CPUID_SVM_LBRV (1U << 1)
+#define CPUID_SVM_SVMLOCK (1U << 2)
+#define CPUID_SVM_NRIPSAVE (1U << 3)
+#define CPUID_SVM_TSCSCALE (1U << 4)
+#define CPUID_SVM_VMCBCLEAN (1U << 5)
+#define CPUID_SVM_FLUSHASID (1U << 6)
+#define CPUID_SVM_DECODEASSIST (1U << 7)
+#define CPUID_SVM_PAUSEFILTER (1U << 10)
+#define CPUID_SVM_PFTHRESHOLD (1U << 12)
+
+#define CPUID_7_0_EBX_FSGSBASE (1U << 0)
+#define CPUID_7_0_EBX_BMI1 (1U << 3)
+#define CPUID_7_0_EBX_HLE (1U << 4)
+#define CPUID_7_0_EBX_AVX2 (1U << 5)
+#define CPUID_7_0_EBX_SMEP (1U << 7)
+#define CPUID_7_0_EBX_BMI2 (1U << 8)
+#define CPUID_7_0_EBX_ERMS (1U << 9)
+#define CPUID_7_0_EBX_INVPCID (1U << 10)
+#define CPUID_7_0_EBX_RTM (1U << 11)
+#define CPUID_7_0_EBX_MPX (1U << 14)
+#define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
+#define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
+#define CPUID_7_0_EBX_RDSEED (1U << 18)
+#define CPUID_7_0_EBX_ADX (1U << 19)
+#define CPUID_7_0_EBX_SMAP (1U << 20)
+#define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
+#define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
+#define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
+#define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
+#define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
+#define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
+#define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
+#define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
+#define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
+
+#define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
+#define CPUID_7_0_ECX_UMIP (1U << 2)
+#define CPUID_7_0_ECX_PKU (1U << 3)
+#define CPUID_7_0_ECX_OSPKE (1U << 4)
+#define CPUID_7_0_ECX_RDPID (1U << 22)
+
+#define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */
+#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
+
+#define CPUID_XSAVE_XSAVEOPT (1U << 0)
+#define CPUID_XSAVE_XSAVEC (1U << 1)
+#define CPUID_XSAVE_XGETBV1 (1U << 2)
+#define CPUID_XSAVE_XSAVES (1U << 3)
+
+#define CPUID_6_EAX_ARAT (1U << 2)
+
+/* CPUID[0x80000007].EDX flags: */
+#define CPUID_APM_INVTSC (1U << 8)
+
+#define CPUID_VENDOR_SZ 12
+
+#define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
+#define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
+#define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
+#define CPUID_VENDOR_INTEL "GenuineIntel"
+
+#define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
+#define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
+#define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
+#define CPUID_VENDOR_AMD "AuthenticAMD"
+
+#define CPUID_VENDOR_VIA "CentaurHauls"
+
+#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
+#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
+
+/* CPUID[0xB].ECX level types */
+#define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
+#define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
+#define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
+
+#ifndef HYPERV_SPINLOCK_NEVER_RETRY
+#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
+#endif
+
+#define EXCP00_DIVZ 0
+#define EXCP01_DB 1
+#define EXCP02_NMI 2
+#define EXCP03_INT3 3
+#define EXCP04_INTO 4
+#define EXCP05_BOUND 5
+#define EXCP06_ILLOP 6
+#define EXCP07_PREX 7
+#define EXCP08_DBLE 8
+#define EXCP09_XERR 9
+#define EXCP0A_TSS 10
+#define EXCP0B_NOSEG 11
+#define EXCP0C_STACK 12
+#define EXCP0D_GPF 13
+#define EXCP0E_PAGE 14
+#define EXCP10_COPR 16
+#define EXCP11_ALGN 17
+#define EXCP12_MCHK 18
+
+#define EXCP_SYSCALL 0x100 /* only happens in user only emulation
+ for syscall instruction */
+
+/* i386-specific interrupt pending bits. */
+#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
+#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
+#define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
+#define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
+#define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
+
+/* Use a clearer name for this. */
+#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
+
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+typedef enum {
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+ CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
+
+ CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
+ CC_OP_MULW,
+ CC_OP_MULL,
+ CC_OP_MULQ,
+
+ CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADDW,
+ CC_OP_ADDL,
+ CC_OP_ADDQ,
+
+ CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADCW,
+ CC_OP_ADCL,
+ CC_OP_ADCQ,
+
+ CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUBW,
+ CC_OP_SUBL,
+ CC_OP_SUBQ,
+
+ CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SBBW,
+ CC_OP_SBBL,
+ CC_OP_SBBQ,
+
+ CC_OP_LOGICB, /* modify all flags, CC_DST = res */
+ CC_OP_LOGICW,
+ CC_OP_LOGICL,
+ CC_OP_LOGICQ,
+
+ CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+ CC_OP_INCW,
+ CC_OP_INCL,
+ CC_OP_INCQ,
+
+ CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
+ CC_OP_DECW,
+ CC_OP_DECL,
+ CC_OP_DECQ,
+
+ CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
+ CC_OP_SHLW,
+ CC_OP_SHLL,
+ CC_OP_SHLQ,
+
+ CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
+ CC_OP_SARW,
+ CC_OP_SARL,
+ CC_OP_SARQ,
+
+ CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
+ CC_OP_BMILGW,
+ CC_OP_BMILGL,
+ CC_OP_BMILGQ,
+
+ CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
+ CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
+ CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
+
+ CC_OP_CLR, /* Z set, all other flags clear. */
+
+ CC_OP_NB,
+} CCOp;
+
+typedef struct SegmentCache {
+ uint32_t selector;
+ target_ulong base;
+ uint32_t limit;
+ uint32_t flags;
+} SegmentCache;
+
+#define MMREG_UNION(n, bits) \
+ union n { \
+ uint8_t _b_##n[(bits)/8]; \
+ uint16_t _w_##n[(bits)/16]; \
+ uint32_t _l_##n[(bits)/32]; \
+ uint64_t _q_##n[(bits)/64]; \
+ float32 _s_##n[(bits)/32]; \
+ float64 _d_##n[(bits)/64]; \
+ }
+
+typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
+typedef MMREG_UNION(MMXReg, 64) MMXReg;
+
+typedef struct BNDReg {
+ uint64_t lb;
+ uint64_t ub;
+} BNDReg;
+
+typedef struct BNDCSReg {
+ uint64_t cfgu;
+ uint64_t sts;
+} BNDCSReg;
+
+#define BNDCFG_ENABLE 1ULL
+#define BNDCFG_BNDPRESERVE 2ULL
+#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define ZMM_B(n) _b_ZMMReg[63 - (n)]
+#define ZMM_W(n) _w_ZMMReg[31 - (n)]
+#define ZMM_L(n) _l_ZMMReg[15 - (n)]
+#define ZMM_S(n) _s_ZMMReg[15 - (n)]
+#define ZMM_Q(n) _q_ZMMReg[7 - (n)]
+#define ZMM_D(n) _d_ZMMReg[7 - (n)]
+
+#define MMX_B(n) _b_MMXReg[7 - (n)]
+#define MMX_W(n) _w_MMXReg[3 - (n)]
+#define MMX_L(n) _l_MMXReg[1 - (n)]
+#define MMX_S(n) _s_MMXReg[1 - (n)]
+#else
+#define ZMM_B(n) _b_ZMMReg[n]
+#define ZMM_W(n) _w_ZMMReg[n]
+#define ZMM_L(n) _l_ZMMReg[n]
+#define ZMM_S(n) _s_ZMMReg[n]
+#define ZMM_Q(n) _q_ZMMReg[n]
+#define ZMM_D(n) _d_ZMMReg[n]
+
+#define MMX_B(n) _b_MMXReg[n]
+#define MMX_W(n) _w_MMXReg[n]
+#define MMX_L(n) _l_MMXReg[n]
+#define MMX_S(n) _s_MMXReg[n]
+#endif
+#define MMX_Q(n) _q_MMXReg[n]
+
+typedef union {
+ floatx80 d __attribute__((aligned(16)));
+ MMXReg mmx;
+} FPReg;
+
+typedef struct {
+ uint64_t base;
+ uint64_t mask;
+} MTRRVar;
+
+#define CPU_NB_REGS64 16
+#define CPU_NB_REGS32 8
+
+#ifdef TARGET_X86_64
+#define CPU_NB_REGS CPU_NB_REGS64
+#else
+#define CPU_NB_REGS CPU_NB_REGS32
+#endif
+
+#define MAX_FIXED_COUNTERS 3
+#define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
+
+#define NB_MMU_MODES 3
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+#define NB_OPMASK_REGS 8
+
+/* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
+ * that APIC ID hasn't been set yet
+ */
+#define UNASSIGNED_APIC_ID 0xFFFFFFFF
+
+typedef union X86LegacyXSaveArea {
+ struct {
+ uint16_t fcw;
+ uint16_t fsw;
+ uint8_t ftw;
+ uint8_t reserved;
+ uint16_t fpop;
+ uint64_t fpip;
+ uint64_t fpdp;
+ uint32_t mxcsr;
+ uint32_t mxcsr_mask;
+ FPReg fpregs[8];
+ uint8_t xmm_regs[16][16];
+ };
+ uint8_t data[512];
+} X86LegacyXSaveArea;
+
+typedef struct X86XSaveHeader {
+ uint64_t xstate_bv;
+ uint64_t xcomp_bv;
+ uint64_t reserve0;
+ uint8_t reserved[40];
+} X86XSaveHeader;
+
+/* Ext. save area 2: AVX State */
+typedef struct XSaveAVX {
+ uint8_t ymmh[16][16];
+} XSaveAVX;
+
+/* Ext. save area 3: BNDREG */
+typedef struct XSaveBNDREG {
+ BNDReg bnd_regs[4];
+} XSaveBNDREG;
+
+/* Ext. save area 4: BNDCSR */
+typedef union XSaveBNDCSR {
+ BNDCSReg bndcsr;
+ uint8_t data[64];
+} XSaveBNDCSR;
+
+/* Ext. save area 5: Opmask */
+typedef struct XSaveOpmask {
+ uint64_t opmask_regs[NB_OPMASK_REGS];
+} XSaveOpmask;
+
+/* Ext. save area 6: ZMM_Hi256 */
+typedef struct XSaveZMM_Hi256 {
+ uint8_t zmm_hi256[16][32];
+} XSaveZMM_Hi256;
+
+/* Ext. save area 7: Hi16_ZMM */
+typedef struct XSaveHi16_ZMM {
+ uint8_t hi16_zmm[16][64];
+} XSaveHi16_ZMM;
+
+/* Ext. save area 9: PKRU state */
+typedef struct XSavePKRU {
+ uint32_t pkru;
+ uint32_t padding;
+} XSavePKRU;
+
+typedef struct X86XSaveArea {
+ X86LegacyXSaveArea legacy;
+ X86XSaveHeader header;
+
+ /* Extended save areas: */
+
+ /* AVX State: */
+ XSaveAVX avx_state;
+ uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
+ /* MPX State: */
+ XSaveBNDREG bndreg_state;
+ XSaveBNDCSR bndcsr_state;
+ /* AVX-512 State: */
+ XSaveOpmask opmask_state;
+ XSaveZMM_Hi256 zmm_hi256_state;
+ XSaveHi16_ZMM hi16_zmm_state;
+ /* PKRU State: */
+ XSavePKRU pkru_state;
+} X86XSaveArea;
+
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
+QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
+QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
+QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
+QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
+QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
+QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
+QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
+QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
+
+typedef enum TPRAccess {
+ TPR_ACCESS_READ,
+ TPR_ACCESS_WRITE,
+} TPRAccess;
+
+typedef struct CPUX86State {
+ /* standard registers */
+ target_ulong regs[CPU_NB_REGS];
+ target_ulong eip;
+ target_ulong eflags; /* eflags register. During CPU emulation, CC
+ flags and DF are set to zero because they are
+ stored elsewhere */
+
+ /* emulator internal eflags handling */
+ target_ulong cc_dst;
+ target_ulong cc_src;
+ target_ulong cc_src2;
+ uint32_t cc_op;
+ int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
+ uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
+ are known at translation time. */
+ uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
+
+ /* segments */
+ SegmentCache segs[6]; /* selector values */
+ SegmentCache ldt;
+ SegmentCache tr;
+ SegmentCache gdt; /* only base and limit are used */
+ SegmentCache idt; /* only base and limit are used */
+
+ target_ulong cr[5]; /* NOTE: cr1 is unused */
+ int32_t a20_mask;
+
+ BNDReg bnd_regs[4];
+ BNDCSReg bndcs_regs;
+ uint64_t msr_bndcfgs;
+ uint64_t efer;
+
+ /* Beginning of state preserved by INIT (dummy marker). */
+ struct {} start_init_save;
+
+ /* FPU state */
+ unsigned int fpstt; /* top of stack index */
+ uint16_t fpus;
+ uint16_t fpuc;
+ uint8_t fptags[8]; /* 0 = valid, 1 = empty */
+ FPReg fpregs[8];
+ /* KVM-only so far */
+ uint16_t fpop;
+ uint64_t fpip;
+ uint64_t fpdp;
+
+ /* emulator internal variables */
+ float_status fp_status;
+ floatx80 ft0;
+
+ float_status mmx_status; /* for 3DNow! float ops */
+ float_status sse_status;
+ uint32_t mxcsr;
+ ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
+ ZMMReg xmm_t0;
+ MMXReg mmx_t0;
+
+ uint64_t opmask_regs[NB_OPMASK_REGS];
+
+ /* sysenter registers */
+ uint32_t sysenter_cs;
+ target_ulong sysenter_esp;
+ target_ulong sysenter_eip;
+ uint64_t star;
+
+ uint64_t vm_hsave;
+
+#ifdef TARGET_X86_64
+ target_ulong lstar;
+ target_ulong cstar;
+ target_ulong fmask;
+ target_ulong kernelgsbase;
+#endif
+
+ uint64_t tsc;
+ uint64_t tsc_adjust;
+ uint64_t tsc_deadline;
+ uint64_t tsc_aux;
+
+ uint64_t xcr0;
+
+ uint64_t mcg_status;
+ uint64_t msr_ia32_misc_enable;
+ uint64_t msr_ia32_feature_control;
+
+ uint64_t msr_fixed_ctr_ctrl;
+ uint64_t msr_global_ctrl;
+ uint64_t msr_global_status;
+ uint64_t msr_global_ovf_ctrl;
+ uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
+ uint64_t msr_gp_counters[MAX_GP_COUNTERS];
+ uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
+
+ uint64_t pat;
+ uint32_t smbase;
+
+ uint32_t pkru;
+
+ /* End of state preserved by INIT (dummy marker). */
+ struct {} end_init_save;
+
+ uint64_t system_time_msr;
+ uint64_t wall_clock_msr;
+ uint64_t steal_time_msr;
+ uint64_t async_pf_en_msr;
+ uint64_t pv_eoi_en_msr;
+
+ uint64_t msr_hv_hypercall;
+ uint64_t msr_hv_guest_os_id;
+ uint64_t msr_hv_vapic;
+ uint64_t msr_hv_tsc;
+ uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
+ uint64_t msr_hv_runtime;
+ uint64_t msr_hv_synic_control;
+ uint64_t msr_hv_synic_version;
+ uint64_t msr_hv_synic_evt_page;
+ uint64_t msr_hv_synic_msg_page;
+ uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
+ uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT];
+ uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT];
+
+ /* exception/interrupt handling */
+ int error_code;
+ int exception_is_int;
+ target_ulong exception_next_eip;
+ target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
+ union {
+ struct CPUBreakpoint *cpu_breakpoint[4];
+ struct CPUWatchpoint *cpu_watchpoint[4];
+ }; /* break/watchpoints for dr[0..3] */
+ int old_exception; /* exception in flight */
+
+ uint64_t vm_vmcb;
+ uint64_t tsc_offset;
+ uint64_t intercept;
+ uint16_t intercept_cr_read;
+ uint16_t intercept_cr_write;
+ uint16_t intercept_dr_read;
+ uint16_t intercept_dr_write;
+ uint32_t intercept_exceptions;
+ uint8_t v_tpr;
+
+ /* KVM states, automatically cleared on reset */
+ uint8_t nmi_injected;
+ uint8_t nmi_pending;
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved across CPU reset. */
+ struct {} end_reset_fields;
+
+ /* processor features (e.g. for CPUID insn) */
+ /* Minimum level/xlevel/xlevel2, based on CPU model + features */
+ uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
+ /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
+ uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
+ /* Actual level/xlevel/xlevel2 value: */
+ uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
+ uint32_t cpuid_vendor1;
+ uint32_t cpuid_vendor2;
+ uint32_t cpuid_vendor3;
+ uint32_t cpuid_version;
+ FeatureWordArray features;
+ uint32_t cpuid_model[12];
+
+ /* MTRRs */
+ uint64_t mtrr_fixed[11];
+ uint64_t mtrr_deftype;
+ MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
+
+ /* For KVM */
+ uint32_t mp_state;
+ int32_t exception_injected;
+ int32_t interrupt_injected;
+ uint8_t soft_interrupt;
+ uint8_t has_error_code;
+ uint32_t sipi_vector;
+ bool tsc_valid;
+ int64_t tsc_khz;
+ int64_t user_tsc_khz; /* for sanity check only */
+ void *kvm_xsave_buf;
+
+ uint64_t mcg_cap;
+ uint64_t mcg_ctl;
+ uint64_t mcg_ext_ctl;
+ uint64_t mce_banks[MCE_BANKS_DEF*4];
+ uint64_t xstate_bv;
+
+ /* vmstate */
+ uint16_t fpus_vmstate;
+ uint16_t fptag_vmstate;
+ uint16_t fpregs_format_vmstate;
+
+ uint64_t xss;
+
+ TPRAccess tpr_access_type;
+} CPUX86State;
+
+struct kvm_msrs;
+
+/**
+ * X86CPU:
+ * @env: #CPUX86State
+ * @migratable: If set, only migratable flags will be accepted when "enforce"
+ * mode is used, and only migratable flags will be included in the "host"
+ * CPU model.
+ *
+ * An x86 CPU.
+ */
+struct X86CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUX86State env;
+
+ bool hyperv_vapic;
+ bool hyperv_relaxed_timing;
+ int hyperv_spinlock_attempts;
+ char *hyperv_vendor_id;
+ bool hyperv_time;
+ bool hyperv_crash;
+ bool hyperv_reset;
+ bool hyperv_vpindex;
+ bool hyperv_runtime;
+ bool hyperv_synic;
+ bool hyperv_stimer;
+ bool check_cpuid;
+ bool enforce_cpuid;
+ bool expose_kvm;
+ bool migratable;
+ bool host_features;
+ uint32_t apic_id;
+
+ /* if true the CPUID code directly forward host cache leaves to the guest */
+ bool cache_info_passthrough;
+
+ /* Features that were filtered out because of missing host capabilities */
+ uint32_t filtered_features[FEATURE_WORDS];
+
+ /* Enable PMU CPUID bits. This can't be enabled by default yet because
+ * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
+ * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
+ * capabilities) directly to the guest.
+ */
+ bool enable_pmu;
+
+ /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
+ * disabled by default to avoid breaking migration between QEMU with
+ * different LMCE configurations.
+ */
+ bool enable_lmce;
+
+ /* Compatibility bits for old machine types.
+ * If true present virtual l3 cache for VM, the vcpus in the same virtual
+ * socket share an virtual l3 cache.
+ */
+ bool enable_l3_cache;
+
+ /* Compatibility bits for old machine types: */
+ bool enable_cpuid_0xb;
+
+ /* Enable auto level-increase for all CPUID leaves */
+ bool full_cpuid_auto_level;
+
+ /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
+ bool fill_mtrr_mask;
+
+ /* if true override the phys_bits value with a value read from the host */
+ bool host_phys_bits;
+
+ /* Number of physical address bits supported */
+ uint32_t phys_bits;
+
+ /* in order to simplify APIC support, we leave this pointer to the
+ user */
+ struct DeviceState *apic_state;
+ struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
+ Notifier machine_done;
+
+ struct kvm_msrs *kvm_msr_buf;
+
+ int32_t socket_id;
+ int32_t core_id;
+ int32_t thread_id;
+};
+
+static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
+{
+ return container_of(env, X86CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(X86CPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern struct VMStateDescription vmstate_x86_cpu;
+#endif
+
+/**
+ * x86_cpu_do_interrupt:
+ * @cpu: vCPU the interrupt is to be handled by.
+ */
+void x86_cpu_do_interrupt(CPUState *cpu);
+bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
+
+int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, void *opaque);
+int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
+ int cpuid, void *opaque);
+int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+ void *opaque);
+int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
+ void *opaque);
+
+void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
+ Error **errp);
+
+void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+
+hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void x86_cpu_exec_enter(CPUState *cpu);
+void x86_cpu_exec_exit(CPUState *cpu);
+
+X86CPU *cpu_x86_init(const char *cpu_model);
+void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+int cpu_x86_support_mca_broadcast(CPUX86State *env);
+
+int cpu_get_pic_interrupt(CPUX86State *s);
+/* MSDOS compatibility mode FPU exception support */
+void cpu_set_ferr(CPUX86State *s);
+
+/* this function must always be used to load data in the segment
+ cache: it synchronizes the hflags with the segment cache values */
+static inline void cpu_x86_load_seg_cache(CPUX86State *env,
+ int seg_reg, unsigned int selector,
+ target_ulong base,
+ unsigned int limit,
+ unsigned int flags)
+{
+ SegmentCache *sc;
+ unsigned int new_hflags;
+
+ sc = &env->segs[seg_reg];
+ sc->selector = selector;
+ sc->base = base;
+ sc->limit = limit;
+ sc->flags = flags;
+
+ /* update the hidden flags */
+ {
+ if (seg_reg == R_CS) {
+#ifdef TARGET_X86_64
+ if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
+ /* long mode */
+ env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ env->hflags &= ~(HF_ADDSEG_MASK);
+ } else
+#endif
+ {
+ /* legacy / compatibility case */
+ new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
+ >> (DESC_B_SHIFT - HF_CS32_SHIFT);
+ env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
+ new_hflags;
+ }
+ }
+ if (seg_reg == R_SS) {
+ int cpl = (flags >> DESC_DPL_SHIFT) & 3;
+#if HF_CPL_MASK != 3
+#error HF_CPL_MASK is hardcoded
+#endif
+ env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
+ }
+ new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
+ >> (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (env->hflags & HF_CS64_MASK) {
+ /* zero base assumed for DS, ES and SS in long mode */
+ } else if (!(env->cr[0] & CR0_PE_MASK) ||
+ (env->eflags & VM_MASK) ||
+ !(env->hflags & HF_CS32_MASK)) {
+ /* XXX: try to avoid this test. The problem comes from the
+ fact that is real mode or vm86 mode we only modify the
+ 'base' and 'selector' fields of the segment cache to go
+ faster. A solution may be to force addseg to one in
+ translate-i386.c. */
+ new_hflags |= HF_ADDSEG_MASK;
+ } else {
+ new_hflags |= ((env->segs[R_DS].base |
+ env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) <<
+ HF_ADDSEG_SHIFT;
+ }
+ env->hflags = (env->hflags &
+ ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
+ }
+}
+
+static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
+ uint8_t sipi_vector)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+
+ env->eip = 0;
+ cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
+ sipi_vector << 12,
+ env->segs[R_CS].limit,
+ env->segs[R_CS].flags);
+ cs->halted = 0;
+}
+
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+ target_ulong *base, unsigned int *limit,
+ unsigned int *flags);
+
+/* op_helper.c */
+/* used for debug or cpu save/restore */
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
+floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
+
+/* cpu-exec.c */
+/* the following helpers are only usable in user mode simulation as
+ they can trigger unexpected exceptions */
+void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
+void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
+void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
+
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_x86_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+
+/* cpu.c */
+void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
+ uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+void cpu_clear_apic_feature(CPUX86State *env);
+void host_cpuid(uint32_t function, uint32_t count,
+ uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
+
+/* helper.c */
+int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
+ int is_write, int mmu_idx);
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
+
+#ifndef CONFIG_USER_ONLY
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
+#endif
+
+void breakpoint_handler(CPUState *cs);
+
+/* will be suppressed */
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
+void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
+
+/* hw/pc.c */
+uint64_t cpu_get_tsc(CPUX86State *env);
+
+#define TARGET_PAGE_BITS 12
+
+#ifdef TARGET_X86_64
+#define TARGET_PHYS_ADDR_SPACE_BITS 52
+/* ??? This is really 48 bits, sign-extended, but the only thing
+ accessible to userland with bit 48 set is the VSYSCALL, and that
+ is handled via other mechanisms. */
+#define TARGET_VIRT_ADDR_SPACE_BITS 47
+#else
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+/* XXX: This value should match the one returned by CPUID
+ * and in exec.c */
+# if defined(TARGET_X86_64)
+# define TCG_PHYS_ADDR_BITS 40
+# else
+# define TCG_PHYS_ADDR_BITS 36
+# endif
+
+#define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
+
+#define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
+
+#define cpu_signal_handler cpu_x86_signal_handler
+#define cpu_list x86_cpu_list
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _ksmap
+#define MMU_MODE1_SUFFIX _user
+#define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
+#define MMU_KSMAP_IDX 0
+#define MMU_USER_IDX 1
+#define MMU_KNOSMAP_IDX 2
+static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
+{
+ return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
+ (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
+ ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+static inline int cpu_mmu_index_kernel(CPUX86State *env)
+{
+ return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
+ ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
+ ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+}
+
+#define CC_DST (env->cc_dst)
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_OP (env->cc_op)
+
+/* n must be a constant to be efficient */
+static inline target_long lshift(target_long x, int n)
+{
+ if (n >= 0) {
+ return x << n;
+ } else {
+ return x >> (-n);
+ }
+}
+
+/* float macros */
+#define FT0 (env->ft0)
+#define ST0 (env->fpregs[env->fpstt].d)
+#define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
+#define ST1 ST(1)
+
+/* translate.c */
+void tcg_x86_init(void);
+
+#include "exec/cpu-all.h"
+#include "svm.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/i386/apic.h"
+#endif
+
+static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *cs_base = env->segs[R_CS].base;
+ *pc = *cs_base + env->eip;
+ *flags = env->hflags |
+ (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
+}
+
+void do_cpu_init(X86CPU *cpu);
+void do_cpu_sipi(X86CPU *cpu);
+
+#define MCE_INJECT_BROADCAST 1
+#define MCE_INJECT_UNCOND_AO 2
+
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
+ uint64_t status, uint64_t mcg_status, uint64_t addr,
+ uint64_t misc, int flags);
+
+/* excp_helper.c */
+void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
+void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
+ uintptr_t retaddr);
+void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr);
+void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
+ int error_code, int next_eip_addend);
+
+/* cc_helper.c */
+extern const uint8_t parity_table[256];
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
+void update_fp_status(CPUX86State *env);
+
+static inline uint32_t cpu_compute_eflags(CPUX86State *env)
+{
+ return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
+}
+
+/* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
+ * after generating a call to a helper that uses this.
+ */
+static inline void cpu_load_eflags(CPUX86State *env, int eflags,
+ int update_mask)
+{
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ CC_OP = CC_OP_EFLAGS;
+ env->df = 1 - (2 * ((eflags >> 10) & 1));
+ env->eflags = (env->eflags & ~update_mask) |
+ (eflags & update_mask) | 0x2;
+}
+
+/* load efer and update the corresponding hflags. XXX: do consistency
+ checks with cpuid bits? */
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
+{
+ env->efer = val;
+ env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
+ if (env->efer & MSR_EFER_LMA) {
+ env->hflags |= HF_LMA_MASK;
+ }
+ if (env->efer & MSR_EFER_SVME) {
+ env->hflags |= HF_SVME_MASK;
+ }
+}
+
+static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
+{
+ return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
+}
+
+/* fpu_helper.c */
+void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
+void cpu_set_fpuc(CPUX86State *env, uint16_t val);
+
+/* mem_helper.c */
+void helper_lock_init(void);
+
+/* svm_helper.c */
+void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param);
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
+
+/* seg_helper.c */
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
+
+/* smm_helper.c */
+void do_smm_enter(X86CPU *cpu);
+void cpu_smm_update(X86CPU *cpu);
+
+/* apic.c */
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
+void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
+ TPRAccess access);
+
+
+/* Change the value of a KVM-specific default
+ *
+ * If value is NULL, no default will be set and the original
+ * value from the CPU model table will be kept.
+ *
+ * It is valid to call this function only for properties that
+ * are already present in the kvm_default_props table.
+ */
+void x86_cpu_change_kvm_default(const char *prop, const char *value);
+
+/* mpx_helper.c */
+void cpu_sync_bndcs_hflags(CPUX86State *env);
+
+/* Return name of 32-bit register, from a R_* constant */
+const char *get_register_name_32(unsigned int reg);
+
+void enable_compat_apic_id_mode(void);
+
+#define APIC_DEFAULT_ADDRESS 0xfee00000
+#define APIC_SPACE_SIZE 0x100000
+
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+
+/* cpu.c */
+bool cpu_is_bsp(X86CPU *cpu);
+
+#endif /* I386_CPU_H */
diff --git a/target/i386/excp_helper.c b/target/i386/excp_helper.c
new file mode 100644
index 0000000000..f0dc4996c1
--- /dev/null
+++ b/target/i386/excp_helper.c
@@ -0,0 +1,137 @@
+/*
+ * x86 exception helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "exec/helper-proto.h"
+
+void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
+{
+ raise_interrupt(env, intno, 1, 0, next_eip_addend);
+}
+
+void helper_raise_exception(CPUX86State *env, int exception_index)
+{
+ raise_exception(env, exception_index);
+}
+
+/*
+ * Check nested exceptions and change to double or triple fault if
+ * needed. It should only be called, if this is not an interrupt.
+ * Returns the new exception number.
+ */
+static int check_exception(CPUX86State *env, int intno, int *error_code)
+{
+ int first_contributory = env->old_exception == 0 ||
+ (env->old_exception >= 10 &&
+ env->old_exception <= 13);
+ int second_contributory = intno == 0 ||
+ (intno >= 10 && intno <= 13);
+
+ qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
+ env->old_exception, intno);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (env->old_exception == EXCP08_DBLE) {
+ if (env->hflags & HF_SVMI_MASK) {
+ cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */
+ }
+
+ qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
+
+ qemu_system_reset_request();
+ return EXCP_HLT;
+ }
+#endif
+
+ if ((first_contributory && second_contributory)
+ || (env->old_exception == EXCP0E_PAGE &&
+ (second_contributory || (intno == EXCP0E_PAGE)))) {
+ intno = EXCP08_DBLE;
+ *error_code = 0;
+ }
+
+ if (second_contributory || (intno == EXCP0E_PAGE) ||
+ (intno == EXCP08_DBLE)) {
+ env->old_exception = intno;
+ }
+
+ return intno;
+}
+
+/*
+ * Signal an interruption. It is executed in the main CPU loop.
+ * is_int is TRUE if coming from the int instruction. next_eip is the
+ * env->eip value AFTER the interrupt instruction. It is only relevant if
+ * is_int is TRUE.
+ */
+static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
+ int is_int, int error_code,
+ int next_eip_addend,
+ uintptr_t retaddr)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ if (!is_int) {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
+ error_code);
+ intno = check_exception(env, intno, &error_code);
+ } else {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
+ }
+
+ cs->exception_index = intno;
+ env->error_code = error_code;
+ env->exception_is_int = is_int;
+ env->exception_next_eip = env->eip + next_eip_addend;
+ cpu_loop_exit_restore(cs, retaddr);
+}
+
+/* shortcuts to generate exceptions */
+
+void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
+ int error_code, int next_eip_addend)
+{
+ raise_interrupt2(env, intno, is_int, error_code, next_eip_addend, 0);
+}
+
+void raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code)
+{
+ raise_interrupt2(env, exception_index, 0, error_code, 0, 0);
+}
+
+void raise_exception_err_ra(CPUX86State *env, int exception_index,
+ int error_code, uintptr_t retaddr)
+{
+ raise_interrupt2(env, exception_index, 0, error_code, 0, retaddr);
+}
+
+void raise_exception(CPUX86State *env, int exception_index)
+{
+ raise_interrupt2(env, exception_index, 0, 0, 0, 0);
+}
+
+void raise_exception_ra(CPUX86State *env, int exception_index, uintptr_t retaddr)
+{
+ raise_interrupt2(env, exception_index, 0, 0, 0, retaddr);
+}
diff --git a/target/i386/fpu_helper.c b/target/i386/fpu_helper.c
new file mode 100644
index 0000000000..2049a8c01d
--- /dev/null
+++ b/target/i386/fpu_helper.c
@@ -0,0 +1,1625 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <math.h>
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define FPU_RC_MASK 0xc00
+#define FPU_RC_NEAR 0x000
+#define FPU_RC_DOWN 0x400
+#define FPU_RC_UP 0x800
+#define FPU_RC_CHOP 0xc00
+
+#define MAXTAN 9223372036854775808.0
+
+/* the following deal with x86 long double-precision numbers */
+#define MAXEXPD 0x7fff
+#define EXPBIAS 16383
+#define EXPD(fp) (fp.l.upper & 0x7fff)
+#define SIGND(fp) ((fp.l.upper) & 0x8000)
+#define MANTD(fp) (fp.l.lower)
+#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
+
+#define FPUS_IE (1 << 0)
+#define FPUS_DE (1 << 1)
+#define FPUS_ZE (1 << 2)
+#define FPUS_OE (1 << 3)
+#define FPUS_UE (1 << 4)
+#define FPUS_PE (1 << 5)
+#define FPUS_SF (1 << 6)
+#define FPUS_SE (1 << 7)
+#define FPUS_B (1 << 15)
+
+#define FPUC_EM 0x3f
+
+#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL)
+#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL)
+#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL)
+
+static inline void fpush(CPUX86State *env)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fptags[env->fpstt] = 0; /* validate stack entry */
+}
+
+static inline void fpop(CPUX86State *env)
+{
+ env->fptags[env->fpstt] = 1; /* invalidate stack entry */
+ env->fpstt = (env->fpstt + 1) & 7;
+}
+
+static inline floatx80 helper_fldt(CPUX86State *env, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.lower = cpu_ldq_data_ra(env, ptr, retaddr);
+ temp.l.upper = cpu_lduw_data_ra(env, ptr + 8, retaddr);
+ return temp.d;
+}
+
+static inline void helper_fstt(CPUX86State *env, floatx80 f, target_ulong ptr,
+ uintptr_t retaddr)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ cpu_stq_data_ra(env, ptr, temp.l.lower, retaddr);
+ cpu_stw_data_ra(env, ptr + 8, temp.l.upper, retaddr);
+}
+
+/* x87 FPU helpers */
+
+static inline double floatx80_to_double(CPUX86State *env, floatx80 a)
+{
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.f64 = floatx80_to_float64(a, &env->fp_status);
+ return u.d;
+}
+
+static inline floatx80 double_to_floatx80(CPUX86State *env, double a)
+{
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.d = a;
+ return float64_to_floatx80(u.f64, &env->fp_status);
+}
+
+static void fpu_set_exception(CPUX86State *env, int mask)
+{
+ env->fpus |= mask;
+ if (env->fpus & (~env->fpuc & FPUC_EM)) {
+ env->fpus |= FPUS_SE | FPUS_B;
+ }
+}
+
+static inline floatx80 helper_fdiv(CPUX86State *env, floatx80 a, floatx80 b)
+{
+ if (floatx80_is_zero(b)) {
+ fpu_set_exception(env, FPUS_ZE);
+ }
+ return floatx80_div(a, b, &env->fp_status);
+}
+
+static void fpu_raise_exception(CPUX86State *env, uintptr_t retaddr)
+{
+ if (env->cr[0] & CR0_NE_MASK) {
+ raise_exception_ra(env, EXCP10_COPR, retaddr);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ else {
+ cpu_set_ferr(env);
+ }
+#endif
+}
+
+void helper_flds_FT0(CPUX86State *env, uint32_t val)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ u.i = val;
+ FT0 = float32_to_floatx80(u.f, &env->fp_status);
+}
+
+void helper_fldl_FT0(CPUX86State *env, uint64_t val)
+{
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ u.i = val;
+ FT0 = float64_to_floatx80(u.f, &env->fp_status);
+}
+
+void helper_fildl_FT0(CPUX86State *env, int32_t val)
+{
+ FT0 = int32_to_floatx80(val, &env->fp_status);
+}
+
+void helper_flds_ST0(CPUX86State *env, uint32_t val)
+{
+ int new_fpstt;
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ u.i = val;
+ env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fldl_ST0(CPUX86State *env, uint64_t val)
+{
+ int new_fpstt;
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ u.i = val;
+ env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildl_ST0(CPUX86State *env, int32_t val)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildll_ST0(CPUX86State *env, int64_t val)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+uint32_t helper_fsts_ST0(CPUX86State *env)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ u.f = floatx80_to_float32(ST0, &env->fp_status);
+ return u.i;
+}
+
+uint64_t helper_fstl_ST0(CPUX86State *env)
+{
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ u.f = floatx80_to_float64(ST0, &env->fp_status);
+ return u.i;
+}
+
+int32_t helper_fist_ST0(CPUX86State *env)
+{
+ int32_t val;
+
+ val = floatx80_to_int32(ST0, &env->fp_status);
+ if (val != (int16_t)val) {
+ val = -32768;
+ }
+ return val;
+}
+
+int32_t helper_fistl_ST0(CPUX86State *env)
+{
+ int32_t val;
+ signed char old_exp_flags;
+
+ old_exp_flags = get_float_exception_flags(&env->fp_status);
+ set_float_exception_flags(0, &env->fp_status);
+
+ val = floatx80_to_int32(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x80000000;
+ }
+ set_float_exception_flags(get_float_exception_flags(&env->fp_status)
+ | old_exp_flags, &env->fp_status);
+ return val;
+}
+
+int64_t helper_fistll_ST0(CPUX86State *env)
+{
+ int64_t val;
+ signed char old_exp_flags;
+
+ old_exp_flags = get_float_exception_flags(&env->fp_status);
+ set_float_exception_flags(0, &env->fp_status);
+
+ val = floatx80_to_int64(ST0, &env->fp_status);
+ if (get_float_exception_flags(&env->fp_status) & float_flag_invalid) {
+ val = 0x8000000000000000ULL;
+ }
+ set_float_exception_flags(get_float_exception_flags(&env->fp_status)
+ | old_exp_flags, &env->fp_status);
+ return val;
+}
+
+int32_t helper_fistt_ST0(CPUX86State *env)
+{
+ int32_t val;
+
+ val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ if (val != (int16_t)val) {
+ val = -32768;
+ }
+ return val;
+}
+
+int32_t helper_fisttl_ST0(CPUX86State *env)
+{
+ return floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+}
+
+int64_t helper_fisttll_ST0(CPUX86State *env)
+{
+ return floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
+}
+
+void helper_fldt_ST0(CPUX86State *env, target_ulong ptr)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = helper_fldt(env, ptr, GETPC());
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fstt_ST0(CPUX86State *env, target_ulong ptr)
+{
+ helper_fstt(env, ST0, ptr, GETPC());
+}
+
+void helper_fpush(CPUX86State *env)
+{
+ fpush(env);
+}
+
+void helper_fpop(CPUX86State *env)
+{
+ fpop(env);
+}
+
+void helper_fdecstp(CPUX86State *env)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fpus &= ~0x4700;
+}
+
+void helper_fincstp(CPUX86State *env)
+{
+ env->fpstt = (env->fpstt + 1) & 7;
+ env->fpus &= ~0x4700;
+}
+
+/* FPU move */
+
+void helper_ffree_STN(CPUX86State *env, int st_index)
+{
+ env->fptags[(env->fpstt + st_index) & 7] = 1;
+}
+
+void helper_fmov_ST0_FT0(CPUX86State *env)
+{
+ ST0 = FT0;
+}
+
+void helper_fmov_FT0_STN(CPUX86State *env, int st_index)
+{
+ FT0 = ST(st_index);
+}
+
+void helper_fmov_ST0_STN(CPUX86State *env, int st_index)
+{
+ ST0 = ST(st_index);
+}
+
+void helper_fmov_STN_ST0(CPUX86State *env, int st_index)
+{
+ ST(st_index) = ST0;
+}
+
+void helper_fxchg_ST0_STN(CPUX86State *env, int st_index)
+{
+ floatx80 tmp;
+
+ tmp = ST(st_index);
+ ST(st_index) = ST0;
+ ST0 = tmp;
+}
+
+/* FPU operations */
+
+static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
+
+void helper_fcom_ST0_FT0(CPUX86State *env)
+{
+ int ret;
+
+ ret = floatx80_compare(ST0, FT0, &env->fp_status);
+ env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+}
+
+void helper_fucom_ST0_FT0(CPUX86State *env)
+{
+ int ret;
+
+ ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+ env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+}
+
+static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_fcomi_ST0_FT0(CPUX86State *env)
+{
+ int eflags;
+ int ret;
+
+ ret = floatx80_compare(ST0, FT0, &env->fp_status);
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+ CC_SRC = eflags;
+}
+
+void helper_fucomi_ST0_FT0(CPUX86State *env)
+{
+ int eflags;
+ int ret;
+
+ ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+ CC_SRC = eflags;
+}
+
+void helper_fadd_ST0_FT0(CPUX86State *env)
+{
+ ST0 = floatx80_add(ST0, FT0, &env->fp_status);
+}
+
+void helper_fmul_ST0_FT0(CPUX86State *env)
+{
+ ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
+}
+
+void helper_fsub_ST0_FT0(CPUX86State *env)
+{
+ ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
+}
+
+void helper_fsubr_ST0_FT0(CPUX86State *env)
+{
+ ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
+}
+
+void helper_fdiv_ST0_FT0(CPUX86State *env)
+{
+ ST0 = helper_fdiv(env, ST0, FT0);
+}
+
+void helper_fdivr_ST0_FT0(CPUX86State *env)
+{
+ ST0 = helper_fdiv(env, FT0, ST0);
+}
+
+/* fp operations between STN and ST0 */
+
+void helper_fadd_STN_ST0(CPUX86State *env, int st_index)
+{
+ ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fmul_STN_ST0(CPUX86State *env, int st_index)
+{
+ ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fsub_STN_ST0(CPUX86State *env, int st_index)
+{
+ ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fsubr_STN_ST0(CPUX86State *env, int st_index)
+{
+ ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
+}
+
+void helper_fdiv_STN_ST0(CPUX86State *env, int st_index)
+{
+ floatx80 *p;
+
+ p = &ST(st_index);
+ *p = helper_fdiv(env, *p, ST0);
+}
+
+void helper_fdivr_STN_ST0(CPUX86State *env, int st_index)
+{
+ floatx80 *p;
+
+ p = &ST(st_index);
+ *p = helper_fdiv(env, ST0, *p);
+}
+
+/* misc FPU operations */
+void helper_fchs_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_chs(ST0);
+}
+
+void helper_fabs_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_abs(ST0);
+}
+
+void helper_fld1_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_one;
+}
+
+void helper_fldl2t_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_l2t;
+}
+
+void helper_fldl2e_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_l2e;
+}
+
+void helper_fldpi_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_pi;
+}
+
+void helper_fldlg2_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_lg2;
+}
+
+void helper_fldln2_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_ln2;
+}
+
+void helper_fldz_ST0(CPUX86State *env)
+{
+ ST0 = floatx80_zero;
+}
+
+void helper_fldz_FT0(CPUX86State *env)
+{
+ FT0 = floatx80_zero;
+}
+
+uint32_t helper_fnstsw(CPUX86State *env)
+{
+ return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+}
+
+uint32_t helper_fnstcw(CPUX86State *env)
+{
+ return env->fpuc;
+}
+
+void update_fp_status(CPUX86State *env)
+{
+ int rnd_type;
+
+ /* set rounding mode */
+ switch (env->fpuc & FPU_RC_MASK) {
+ default:
+ case FPU_RC_NEAR:
+ rnd_type = float_round_nearest_even;
+ break;
+ case FPU_RC_DOWN:
+ rnd_type = float_round_down;
+ break;
+ case FPU_RC_UP:
+ rnd_type = float_round_up;
+ break;
+ case FPU_RC_CHOP:
+ rnd_type = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->fp_status);
+ switch ((env->fpuc >> 8) & 3) {
+ case 0:
+ rnd_type = 32;
+ break;
+ case 2:
+ rnd_type = 64;
+ break;
+ case 3:
+ default:
+ rnd_type = 80;
+ break;
+ }
+ set_floatx80_rounding_precision(rnd_type, &env->fp_status);
+}
+
+void helper_fldcw(CPUX86State *env, uint32_t val)
+{
+ cpu_set_fpuc(env, val);
+}
+
+void helper_fclex(CPUX86State *env)
+{
+ env->fpus &= 0x7f00;
+}
+
+void helper_fwait(CPUX86State *env)
+{
+ if (env->fpus & FPUS_SE) {
+ fpu_raise_exception(env, GETPC());
+ }
+}
+
+void helper_fninit(CPUX86State *env)
+{
+ env->fpus = 0;
+ env->fpstt = 0;
+ cpu_set_fpuc(env, 0x37f);
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+/* BCD ops */
+
+void helper_fbld_ST0(CPUX86State *env, target_ulong ptr)
+{
+ floatx80 tmp;
+ uint64_t val;
+ unsigned int v;
+ int i;
+
+ val = 0;
+ for (i = 8; i >= 0; i--) {
+ v = cpu_ldub_data_ra(env, ptr + i, GETPC());
+ val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
+ }
+ tmp = int64_to_floatx80(val, &env->fp_status);
+ if (cpu_ldub_data_ra(env, ptr + 9, GETPC()) & 0x80) {
+ tmp = floatx80_chs(tmp);
+ }
+ fpush(env);
+ ST0 = tmp;
+}
+
+void helper_fbst_ST0(CPUX86State *env, target_ulong ptr)
+{
+ int v;
+ target_ulong mem_ref, mem_end;
+ int64_t val;
+
+ val = floatx80_to_int64(ST0, &env->fp_status);
+ mem_ref = ptr;
+ mem_end = mem_ref + 9;
+ if (val < 0) {
+ cpu_stb_data_ra(env, mem_end, 0x80, GETPC());
+ val = -val;
+ } else {
+ cpu_stb_data_ra(env, mem_end, 0x00, GETPC());
+ }
+ while (mem_ref < mem_end) {
+ if (val == 0) {
+ break;
+ }
+ v = val % 100;
+ val = val / 100;
+ v = ((v / 10) << 4) | (v % 10);
+ cpu_stb_data_ra(env, mem_ref++, v, GETPC());
+ }
+ while (mem_ref < mem_end) {
+ cpu_stb_data_ra(env, mem_ref++, 0, GETPC());
+ }
+}
+
+void helper_f2xm1(CPUX86State *env)
+{
+ double val = floatx80_to_double(env, ST0);
+
+ val = pow(2.0, val) - 1.0;
+ ST0 = double_to_floatx80(env, val);
+}
+
+void helper_fyl2x(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if (fptemp > 0.0) {
+ fptemp = log(fptemp) / log(2.0); /* log2(ST) */
+ fptemp *= floatx80_to_double(env, ST1);
+ ST1 = double_to_floatx80(env, fptemp);
+ fpop(env);
+ } else {
+ env->fpus &= ~0x4700;
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fptan(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ fptemp = tan(fptemp);
+ ST0 = double_to_floatx80(env, fptemp);
+ fpush(env);
+ ST0 = floatx80_one;
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**52 only */
+ }
+}
+
+void helper_fpatan(CPUX86State *env)
+{
+ double fptemp, fpsrcop;
+
+ fpsrcop = floatx80_to_double(env, ST1);
+ fptemp = floatx80_to_double(env, ST0);
+ ST1 = double_to_floatx80(env, atan2(fpsrcop, fptemp));
+ fpop(env);
+}
+
+void helper_fxtract(CPUX86State *env)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = ST0;
+
+ if (floatx80_is_zero(ST0)) {
+ /* Easy way to generate -inf and raising division by 0 exception */
+ ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero,
+ &env->fp_status);
+ fpush(env);
+ ST0 = temp.d;
+ } else {
+ int expdif;
+
+ expdif = EXPD(temp) - EXPBIAS;
+ /* DP exponent bias */
+ ST0 = int32_to_floatx80(expdif, &env->fp_status);
+ fpush(env);
+ BIASEXPONENT(temp);
+ ST0 = temp.d;
+ }
+}
+
+void helper_fprem1(CPUX86State *env)
+{
+ double st0, st1, dblq, fpsrcop, fptemp;
+ CPU_LDoubleU fpsrcop1, fptemp1;
+ int expdif;
+ signed long long int q;
+
+ st0 = floatx80_to_double(env, ST0);
+ st1 = floatx80_to_double(env, ST1);
+
+ if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
+ ST0 = double_to_floatx80(env, 0.0 / 0.0); /* NaN */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ return;
+ }
+
+ fpsrcop = st0;
+ fptemp = st1;
+ fpsrcop1.d = ST0;
+ fptemp1.d = ST1;
+ expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+
+ if (expdif < 0) {
+ /* optimisation? taken from the AMD docs */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* ST0 is unchanged */
+ return;
+ }
+
+ if (expdif < 53) {
+ dblq = fpsrcop / fptemp;
+ /* round dblq towards nearest integer */
+ dblq = rint(dblq);
+ st0 = fpsrcop - fptemp * dblq;
+
+ /* convert dblq to q by truncating towards zero */
+ if (dblq < 0.0) {
+ q = (signed long long int)(-dblq);
+ } else {
+ q = (signed long long int)dblq;
+ }
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* (C0,C3,C1) <-- (q2,q1,q0) */
+ env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
+ env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
+ env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
+ } else {
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ fptemp = pow(2.0, expdif - 50);
+ fpsrcop = (st0 / st1) / fptemp;
+ /* fpsrcop = integer obtained by chopping */
+ fpsrcop = (fpsrcop < 0.0) ?
+ -(floor(fabs(fpsrcop))) : floor(fpsrcop);
+ st0 -= (st1 * fpsrcop * fptemp);
+ }
+ ST0 = double_to_floatx80(env, st0);
+}
+
+void helper_fprem(CPUX86State *env)
+{
+ double st0, st1, dblq, fpsrcop, fptemp;
+ CPU_LDoubleU fpsrcop1, fptemp1;
+ int expdif;
+ signed long long int q;
+
+ st0 = floatx80_to_double(env, ST0);
+ st1 = floatx80_to_double(env, ST1);
+
+ if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
+ ST0 = double_to_floatx80(env, 0.0 / 0.0); /* NaN */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ return;
+ }
+
+ fpsrcop = st0;
+ fptemp = st1;
+ fpsrcop1.d = ST0;
+ fptemp1.d = ST1;
+ expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+
+ if (expdif < 0) {
+ /* optimisation? taken from the AMD docs */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* ST0 is unchanged */
+ return;
+ }
+
+ if (expdif < 53) {
+ dblq = fpsrcop / fptemp; /* ST0 / ST1 */
+ /* round dblq towards zero */
+ dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
+ st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */
+
+ /* convert dblq to q by truncating towards zero */
+ if (dblq < 0.0) {
+ q = (signed long long int)(-dblq);
+ } else {
+ q = (signed long long int)dblq;
+ }
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* (C0,C3,C1) <-- (q2,q1,q0) */
+ env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
+ env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
+ env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
+ } else {
+ int N = 32 + (expdif % 32); /* as per AMD docs */
+
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ fptemp = pow(2.0, (double)(expdif - N));
+ fpsrcop = (st0 / st1) / fptemp;
+ /* fpsrcop = integer obtained by chopping */
+ fpsrcop = (fpsrcop < 0.0) ?
+ -(floor(fabs(fpsrcop))) : floor(fpsrcop);
+ st0 -= (st1 * fpsrcop * fptemp);
+ }
+ ST0 = double_to_floatx80(env, st0);
+}
+
+void helper_fyl2xp1(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp + 1.0) > 0.0) {
+ fptemp = log(fptemp + 1.0) / log(2.0); /* log2(ST + 1.0) */
+ fptemp *= floatx80_to_double(env, ST1);
+ ST1 = double_to_floatx80(env, fptemp);
+ fpop(env);
+ } else {
+ env->fpus &= ~0x4700;
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fsqrt(CPUX86State *env)
+{
+ if (floatx80_is_neg(ST0)) {
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ env->fpus |= 0x400;
+ }
+ ST0 = floatx80_sqrt(ST0, &env->fp_status);
+}
+
+void helper_fsincos(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(env, sin(fptemp));
+ fpush(env);
+ ST0 = double_to_floatx80(env, cos(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_frndint(CPUX86State *env)
+{
+ ST0 = floatx80_round_to_int(ST0, &env->fp_status);
+}
+
+void helper_fscale(CPUX86State *env)
+{
+ if (floatx80_is_any_nan(ST1)) {
+ ST0 = ST1;
+ } else {
+ int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
+ ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
+ }
+}
+
+void helper_fsin(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(env, sin(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**53 only */
+ }
+}
+
+void helper_fcos(CPUX86State *env)
+{
+ double fptemp = floatx80_to_double(env, ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(env, cos(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_fxam_ST0(CPUX86State *env)
+{
+ CPU_LDoubleU temp;
+ int expdif;
+
+ temp.d = ST0;
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ if (SIGND(temp)) {
+ env->fpus |= 0x200; /* C1 <-- 1 */
+ }
+
+ /* XXX: test fptags too */
+ expdif = EXPD(temp);
+ if (expdif == MAXEXPD) {
+ if (MANTD(temp) == 0x8000000000000000ULL) {
+ env->fpus |= 0x500; /* Infinity */
+ } else {
+ env->fpus |= 0x100; /* NaN */
+ }
+ } else if (expdif == 0) {
+ if (MANTD(temp) == 0) {
+ env->fpus |= 0x4000; /* Zero */
+ } else {
+ env->fpus |= 0x4400; /* Denormal */
+ }
+ } else {
+ env->fpus |= 0x400;
+ }
+}
+
+static void do_fstenv(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
+{
+ int fpus, fptag, exp, i;
+ uint64_t mant;
+ CPU_LDoubleU tmp;
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i = 7; i >= 0; i--) {
+ fptag <<= 2;
+ if (env->fptags[i]) {
+ fptag |= 3;
+ } else {
+ tmp.d = env->fpregs[i].d;
+ exp = EXPD(tmp);
+ mant = MANTD(tmp);
+ if (exp == 0 && mant == 0) {
+ /* zero */
+ fptag |= 1;
+ } else if (exp == 0 || exp == MAXEXPD
+ || (mant & (1LL << 63)) == 0) {
+ /* NaNs, infinity, denormal */
+ fptag |= 2;
+ }
+ }
+ }
+ if (data32) {
+ /* 32 bit */
+ cpu_stl_data_ra(env, ptr, env->fpuc, retaddr);
+ cpu_stl_data_ra(env, ptr + 4, fpus, retaddr);
+ cpu_stl_data_ra(env, ptr + 8, fptag, retaddr);
+ cpu_stl_data_ra(env, ptr + 12, 0, retaddr); /* fpip */
+ cpu_stl_data_ra(env, ptr + 16, 0, retaddr); /* fpcs */
+ cpu_stl_data_ra(env, ptr + 20, 0, retaddr); /* fpoo */
+ cpu_stl_data_ra(env, ptr + 24, 0, retaddr); /* fpos */
+ } else {
+ /* 16 bit */
+ cpu_stw_data_ra(env, ptr, env->fpuc, retaddr);
+ cpu_stw_data_ra(env, ptr + 2, fpus, retaddr);
+ cpu_stw_data_ra(env, ptr + 4, fptag, retaddr);
+ cpu_stw_data_ra(env, ptr + 6, 0, retaddr);
+ cpu_stw_data_ra(env, ptr + 8, 0, retaddr);
+ cpu_stw_data_ra(env, ptr + 10, 0, retaddr);
+ cpu_stw_data_ra(env, ptr + 12, 0, retaddr);
+ }
+}
+
+void helper_fstenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fstenv(env, ptr, data32, GETPC());
+}
+
+static void do_fldenv(CPUX86State *env, target_ulong ptr, int data32,
+ uintptr_t retaddr)
+{
+ int i, fpus, fptag;
+
+ if (data32) {
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+ fpus = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+ fptag = cpu_lduw_data_ra(env, ptr + 8, retaddr);
+ } else {
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
+ fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr);
+ fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+ }
+ env->fpstt = (fpus >> 11) & 7;
+ env->fpus = fpus & ~0x3800;
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = ((fptag & 3) == 3);
+ fptag >>= 2;
+ }
+}
+
+void helper_fldenv(CPUX86State *env, target_ulong ptr, int data32)
+{
+ do_fldenv(env, ptr, data32, GETPC());
+}
+
+void helper_fsave(CPUX86State *env, target_ulong ptr, int data32)
+{
+ floatx80 tmp;
+ int i;
+
+ do_fstenv(env, ptr, data32, GETPC());
+
+ ptr += (14 << data32);
+ for (i = 0; i < 8; i++) {
+ tmp = ST(i);
+ helper_fstt(env, tmp, ptr, GETPC());
+ ptr += 10;
+ }
+
+ /* fninit */
+ env->fpus = 0;
+ env->fpstt = 0;
+ cpu_set_fpuc(env, 0x37f);
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+void helper_frstor(CPUX86State *env, target_ulong ptr, int data32)
+{
+ floatx80 tmp;
+ int i;
+
+ do_fldenv(env, ptr, data32, GETPC());
+ ptr += (14 << data32);
+
+ for (i = 0; i < 8; i++) {
+ tmp = helper_fldt(env, ptr, GETPC());
+ ST(i) = tmp;
+ ptr += 10;
+ }
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_fsave(CPUX86State *env, target_ulong ptr, int data32)
+{
+ helper_fsave(env, ptr, data32);
+}
+
+void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
+{
+ helper_frstor(env, ptr, data32);
+}
+#endif
+
+#define XO(X) offsetof(X86XSaveArea, X)
+
+static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int fpus, fptag, i;
+ target_ulong addr;
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i = 0; i < 8; i++) {
+ fptag |= (env->fptags[i] << i);
+ }
+
+ cpu_stw_data_ra(env, ptr + XO(legacy.fcw), env->fpuc, ra);
+ cpu_stw_data_ra(env, ptr + XO(legacy.fsw), fpus, ra);
+ cpu_stw_data_ra(env, ptr + XO(legacy.ftw), fptag ^ 0xff, ra);
+
+ /* In 32-bit mode this is eip, sel, dp, sel.
+ In 64-bit mode this is rip, rdp.
+ But in either case we don't write actual data, just zeros. */
+ cpu_stq_data_ra(env, ptr + XO(legacy.fpip), 0, ra); /* eip+sel; rip */
+ cpu_stq_data_ra(env, ptr + XO(legacy.fpdp), 0, ra); /* edp+sel; rdp */
+
+ addr = ptr + XO(legacy.fpregs);
+ for (i = 0; i < 8; i++) {
+ floatx80 tmp = ST(i);
+ helper_fstt(env, tmp, addr, ra);
+ addr += 16;
+ }
+}
+
+static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr), env->mxcsr, ra);
+ cpu_stl_data_ra(env, ptr + XO(legacy.mxcsr_mask), 0x0000ffff, ra);
+}
+
+static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + XO(legacy.xmm_regs);
+ for (i = 0; i < nb_xmm_regs; i++) {
+ cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra);
+ cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra);
+ addr += 16;
+ }
+}
+
+static void do_xsave_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra);
+ cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra);
+ }
+}
+
+static void do_xsave_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu),
+ env->bndcs_regs.cfgu, ra);
+ cpu_stq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts),
+ env->bndcs_regs.sts, ra);
+}
+
+static void do_xsave_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, ptr, env->pkru, ra);
+}
+
+void helper_fxsave(CPUX86State *env, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xsave_fpu(env, ptr, ra);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ do_xsave_mxcsr(env, ptr, ra);
+ /* Fast FXSAVE leaves out the XMM registers */
+ if (!(env->efer & MSR_EFER_FFXSR)
+ || (env->hflags & HF_CPL_MASK)
+ || !(env->hflags & HF_LMA_MASK)) {
+ do_xsave_sse(env, ptr, ra);
+ }
+ }
+}
+
+static uint64_t get_xinuse(CPUX86State *env)
+{
+ uint64_t inuse = -1;
+
+ /* For the most part, we don't track XINUSE. We could calculate it
+ here for all components, but it's probably less work to simply
+ indicate in use. That said, the state of BNDREGS is important
+ enough to track in HFLAGS, so we might as well use that here. */
+ if ((env->hflags & HF_MPX_IU_MASK) == 0) {
+ inuse &= ~XSTATE_BNDREGS_MASK;
+ }
+ return inuse;
+}
+
+static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
+ uint64_t inuse, uint64_t opt, uintptr_t ra)
+{
+ uint64_t old_bv, new_bv;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Never save anything not enabled by XCR0. */
+ rfbm &= env->xcr0;
+ opt &= rfbm;
+
+ if (opt & XSTATE_FP_MASK) {
+ do_xsave_fpu(env, ptr, ra);
+ }
+ if (rfbm & XSTATE_SSE_MASK) {
+ /* Note that saving MXCSR is not suppressed by XSAVEOPT. */
+ do_xsave_mxcsr(env, ptr, ra);
+ }
+ if (opt & XSTATE_SSE_MASK) {
+ do_xsave_sse(env, ptr, ra);
+ }
+ if (opt & XSTATE_BNDREGS_MASK) {
+ do_xsave_bndregs(env, ptr + XO(bndreg_state), ra);
+ }
+ if (opt & XSTATE_BNDCSR_MASK) {
+ do_xsave_bndcsr(env, ptr + XO(bndcsr_state), ra);
+ }
+ if (opt & XSTATE_PKRU_MASK) {
+ do_xsave_pkru(env, ptr + XO(pkru_state), ra);
+ }
+
+ /* Update the XSTATE_BV field. */
+ old_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
+ new_bv = (old_bv & ~rfbm) | (inuse & rfbm);
+ cpu_stq_data_ra(env, ptr + XO(header.xstate_bv), new_bv, ra);
+}
+
+void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC());
+}
+
+void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uint64_t inuse = get_xinuse(env);
+ do_xsave(env, ptr, rfbm, inuse, inuse, GETPC());
+}
+
+static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, fpuc, fpus, fptag;
+ target_ulong addr;
+
+ fpuc = cpu_lduw_data_ra(env, ptr + XO(legacy.fcw), ra);
+ fpus = cpu_lduw_data_ra(env, ptr + XO(legacy.fsw), ra);
+ fptag = cpu_lduw_data_ra(env, ptr + XO(legacy.ftw), ra);
+ cpu_set_fpuc(env, fpuc);
+ env->fpstt = (fpus >> 11) & 7;
+ env->fpus = fpus & ~0x3800;
+ fptag ^= 0xff;
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = ((fptag >> i) & 1);
+ }
+
+ addr = ptr + XO(legacy.fpregs);
+ for (i = 0; i < 8; i++) {
+ floatx80 tmp = helper_fldt(env, addr, ra);
+ ST(i) = tmp;
+ addr += 16;
+ }
+}
+
+static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + XO(legacy.mxcsr), ra));
+}
+
+static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + XO(legacy.xmm_regs);
+ for (i = 0; i < nb_xmm_regs; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra);
+ env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra);
+ addr += 16;
+ }
+}
+
+static void do_xrstor_bndregs(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ target_ulong addr = ptr + offsetof(XSaveBNDREG, bnd_regs);
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra);
+ env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra);
+ }
+}
+
+static void do_xrstor_bndcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->bndcs_regs.cfgu
+ = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.cfgu), ra);
+ env->bndcs_regs.sts
+ = cpu_ldq_data_ra(env, ptr + offsetof(XSaveBNDCSR, bndcsr.sts), ra);
+}
+
+static void do_xrstor_pkru(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ env->pkru = cpu_ldq_data_ra(env, ptr, ra);
+}
+
+void helper_fxrstor(CPUX86State *env, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xrstor_fpu(env, ptr, ra);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ do_xrstor_mxcsr(env, ptr, ra);
+ /* Fast FXRSTOR leaves out the XMM registers */
+ if (!(env->efer & MSR_EFER_FFXSR)
+ || (env->hflags & HF_CPL_MASK)
+ || !(env->hflags & HF_LMA_MASK)) {
+ do_xrstor_sse(env, ptr, ra);
+ }
+ }
+}
+
+void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uintptr_t ra = GETPC();
+ uint64_t xstate_bv, xcomp_bv, reserve0;
+
+ rfbm &= env->xcr0;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ xstate_bv = cpu_ldq_data_ra(env, ptr + XO(header.xstate_bv), ra);
+
+ if ((int64_t)xstate_bv < 0) {
+ /* FIXME: Compact form. */
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Standard form. */
+
+ /* The XSTATE_BV field must not set bits not present in XCR0. */
+ if (xstate_bv & ~env->xcr0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* The XCOMP_BV field must be zero. Note that, as of the April 2016
+ revision, the description of the XSAVE Header (Vol 1, Sec 13.4.2)
+ describes only XCOMP_BV, but the description of the standard form
+ of XRSTOR (Vol 1, Sec 13.8.1) checks bytes 23:8 for zero, which
+ includes the next 64-bit field. */
+ xcomp_bv = cpu_ldq_data_ra(env, ptr + XO(header.xcomp_bv), ra);
+ reserve0 = cpu_ldq_data_ra(env, ptr + XO(header.reserve0), ra);
+ if (xcomp_bv || reserve0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ if (rfbm & XSTATE_FP_MASK) {
+ if (xstate_bv & XSTATE_FP_MASK) {
+ do_xrstor_fpu(env, ptr, ra);
+ } else {
+ helper_fninit(env);
+ memset(env->fpregs, 0, sizeof(env->fpregs));
+ }
+ }
+ if (rfbm & XSTATE_SSE_MASK) {
+ /* Note that the standard form of XRSTOR loads MXCSR from memory
+ whether or not the XSTATE_BV bit is set. */
+ do_xrstor_mxcsr(env, ptr, ra);
+ if (xstate_bv & XSTATE_SSE_MASK) {
+ do_xrstor_sse(env, ptr, ra);
+ } else {
+ /* ??? When AVX is implemented, we may have to be more
+ selective in the clearing. */
+ memset(env->xmm_regs, 0, sizeof(env->xmm_regs));
+ }
+ }
+ if (rfbm & XSTATE_BNDREGS_MASK) {
+ if (xstate_bv & XSTATE_BNDREGS_MASK) {
+ do_xrstor_bndregs(env, ptr + XO(bndreg_state), ra);
+ env->hflags |= HF_MPX_IU_MASK;
+ } else {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+ }
+ if (rfbm & XSTATE_BNDCSR_MASK) {
+ if (xstate_bv & XSTATE_BNDCSR_MASK) {
+ do_xrstor_bndcsr(env, ptr + XO(bndcsr_state), ra);
+ } else {
+ memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs));
+ }
+ cpu_sync_bndcs_hflags(env);
+ }
+ if (rfbm & XSTATE_PKRU_MASK) {
+ uint64_t old_pkru = env->pkru;
+ if (xstate_bv & XSTATE_PKRU_MASK) {
+ do_xrstor_pkru(env, ptr + XO(pkru_state), ra);
+ } else {
+ env->pkru = 0;
+ }
+ if (env->pkru != old_pkru) {
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ tlb_flush(cs, 1);
+ }
+ }
+}
+
+#undef XO
+
+uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
+{
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ switch (ecx) {
+ case 0:
+ return env->xcr0;
+ case 1:
+ if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) {
+ return env->xcr0 & get_xinuse(env);
+ }
+ break;
+ }
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
+{
+ uint32_t dummy, ena_lo, ena_hi;
+ uint64_t ena;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ /* Only XCR0 is defined at present; the FPU may not be disabled. */
+ if (ecx != 0 || (mask & XSTATE_FP_MASK) == 0) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling unimplemented features. */
+ cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi);
+ ena = ((uint64_t)ena_hi << 32) | ena_lo;
+ if (mask & ~ena) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling only half of MPX. */
+ if ((mask ^ (mask * (XSTATE_BNDCSR_MASK / XSTATE_BNDREGS_MASK)))
+ & XSTATE_BNDCSR_MASK) {
+ goto do_gpf;
+ }
+
+ env->xcr0 = mask;
+ cpu_sync_bndcs_hflags(env);
+ return;
+
+ do_gpf:
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ *pmant = temp.l.lower;
+ *pexp = temp.l.upper;
+}
+
+floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.upper = upper;
+ temp.l.lower = mant;
+ return temp.d;
+}
+
+/* MMX/SSE */
+/* XXX: optimize by storing fptt and fptags in the static cpu state */
+
+#define SSE_DAZ 0x0040
+#define SSE_RC_MASK 0x6000
+#define SSE_RC_NEAR 0x0000
+#define SSE_RC_DOWN 0x2000
+#define SSE_RC_UP 0x4000
+#define SSE_RC_CHOP 0x6000
+#define SSE_FZ 0x8000
+
+void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
+{
+ int rnd_type;
+
+ env->mxcsr = mxcsr;
+
+ /* set rounding mode */
+ switch (mxcsr & SSE_RC_MASK) {
+ default:
+ case SSE_RC_NEAR:
+ rnd_type = float_round_nearest_even;
+ break;
+ case SSE_RC_DOWN:
+ rnd_type = float_round_down;
+ break;
+ case SSE_RC_UP:
+ rnd_type = float_round_up;
+ break;
+ case SSE_RC_CHOP:
+ rnd_type = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->sse_status);
+
+ /* set denormals are zero */
+ set_flush_inputs_to_zero((mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
+
+ /* set flush to zero */
+ set_flush_to_zero((mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
+}
+
+void cpu_set_fpuc(CPUX86State *env, uint16_t val)
+{
+ env->fpuc = val;
+ update_fp_status(env);
+}
+
+void helper_ldmxcsr(CPUX86State *env, uint32_t val)
+{
+ cpu_set_mxcsr(env, val);
+}
+
+void helper_enter_mmx(CPUX86State *env)
+{
+ env->fpstt = 0;
+ *(uint32_t *)(env->fptags) = 0;
+ *(uint32_t *)(env->fptags + 4) = 0;
+}
+
+void helper_emms(CPUX86State *env)
+{
+ /* set to empty state */
+ *(uint32_t *)(env->fptags) = 0x01010101;
+ *(uint32_t *)(env->fptags + 4) = 0x01010101;
+}
+
+/* XXX: suppress */
+void helper_movq(CPUX86State *env, void *d, void *s)
+{
+ *(uint64_t *)d = *(uint64_t *)s;
+}
+
+#define SHIFT 0
+#include "ops_sse.h"
+
+#define SHIFT 1
+#include "ops_sse.h"
diff --git a/target/i386/gdbstub.c b/target/i386/gdbstub.c
new file mode 100644
index 0000000000..c494535df1
--- /dev/null
+++ b/target/i386/gdbstub.c
@@ -0,0 +1,234 @@
+/*
+ * x86 gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_X86_64
+static const int gpr_map[16] = {
+ R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
+ 8, 9, 10, 11, 12, 13, 14, 15
+};
+#else
+#define gpr_map gpr_map32
+#endif
+static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+
+#define IDX_IP_REG CPU_NB_REGS
+#define IDX_FLAGS_REG (IDX_IP_REG + 1)
+#define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
+#define IDX_FP_REGS (IDX_SEG_REGS + 6)
+#define IDX_XMM_REGS (IDX_FP_REGS + 16)
+#define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
+
+int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (n < CPU_NB_REGS) {
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]);
+ } else if (n < CPU_NB_REGS32) {
+ return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]);
+ }
+ } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+#ifdef USE_X86LDOUBLE
+ /* FIXME: byteswap float values - after fixing fpregs layout. */
+ memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
+#else
+ memset(mem_buf, 0, 10);
+#endif
+ return 10;
+ } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+ n -= IDX_XMM_REGS;
+ if (n < CPU_NB_REGS32 ||
+ (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
+ stq_p(mem_buf, env->xmm_regs[n].ZMM_Q(0));
+ stq_p(mem_buf + 8, env->xmm_regs[n].ZMM_Q(1));
+ return 16;
+ }
+ } else {
+ switch (n) {
+ case IDX_IP_REG:
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ return gdb_get_reg64(mem_buf, env->eip);
+ } else {
+ return gdb_get_reg32(mem_buf, env->eip);
+ }
+ case IDX_FLAGS_REG:
+ return gdb_get_reg32(mem_buf, env->eflags);
+
+ case IDX_SEG_REGS:
+ return gdb_get_reg32(mem_buf, env->segs[R_CS].selector);
+ case IDX_SEG_REGS + 1:
+ return gdb_get_reg32(mem_buf, env->segs[R_SS].selector);
+ case IDX_SEG_REGS + 2:
+ return gdb_get_reg32(mem_buf, env->segs[R_DS].selector);
+ case IDX_SEG_REGS + 3:
+ return gdb_get_reg32(mem_buf, env->segs[R_ES].selector);
+ case IDX_SEG_REGS + 4:
+ return gdb_get_reg32(mem_buf, env->segs[R_FS].selector);
+ case IDX_SEG_REGS + 5:
+ return gdb_get_reg32(mem_buf, env->segs[R_GS].selector);
+
+ case IDX_FP_REGS + 8:
+ return gdb_get_reg32(mem_buf, env->fpuc);
+ case IDX_FP_REGS + 9:
+ return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) |
+ (env->fpstt & 0x7) << 11);
+ case IDX_FP_REGS + 10:
+ return gdb_get_reg32(mem_buf, 0); /* ftag */
+ case IDX_FP_REGS + 11:
+ return gdb_get_reg32(mem_buf, 0); /* fiseg */
+ case IDX_FP_REGS + 12:
+ return gdb_get_reg32(mem_buf, 0); /* fioff */
+ case IDX_FP_REGS + 13:
+ return gdb_get_reg32(mem_buf, 0); /* foseg */
+ case IDX_FP_REGS + 14:
+ return gdb_get_reg32(mem_buf, 0); /* fooff */
+ case IDX_FP_REGS + 15:
+ return gdb_get_reg32(mem_buf, 0); /* fop */
+
+ case IDX_MXCSR_REG:
+ return gdb_get_reg32(mem_buf, env->mxcsr);
+ }
+ }
+ return 0;
+}
+
+static int x86_cpu_gdb_load_seg(X86CPU *cpu, int sreg, uint8_t *mem_buf)
+{
+ CPUX86State *env = &cpu->env;
+ uint16_t selector = ldl_p(mem_buf);
+
+ if (selector != env->segs[sreg].selector) {
+#if defined(CONFIG_USER_ONLY)
+ cpu_x86_load_seg(env, sreg, selector);
+#else
+ unsigned int limit, flags;
+ target_ulong base;
+
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
+ base = selector << 4;
+ limit = 0xffff;
+ flags = DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT);
+ } else {
+ if (!cpu_x86_get_descr_debug(env, selector, &base, &limit,
+ &flags)) {
+ return 4;
+ }
+ }
+ cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
+#endif
+ }
+ return 4;
+}
+
+int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n < CPU_NB_REGS) {
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ env->regs[gpr_map[n]] = ldtul_p(mem_buf);
+ return sizeof(target_ulong);
+ } else if (n < CPU_NB_REGS32) {
+ n = gpr_map32[n];
+ env->regs[n] &= ~0xffffffffUL;
+ env->regs[n] |= (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+ } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
+#ifdef USE_X86LDOUBLE
+ /* FIXME: byteswap float values - after fixing fpregs layout. */
+ memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
+#endif
+ return 10;
+ } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
+ n -= IDX_XMM_REGS;
+ if (n < CPU_NB_REGS32 ||
+ (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
+ env->xmm_regs[n].ZMM_Q(0) = ldq_p(mem_buf);
+ env->xmm_regs[n].ZMM_Q(1) = ldq_p(mem_buf + 8);
+ return 16;
+ }
+ } else {
+ switch (n) {
+ case IDX_IP_REG:
+ if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
+ env->eip = ldq_p(mem_buf);
+ return 8;
+ } else {
+ env->eip &= ~0xffffffffUL;
+ env->eip |= (uint32_t)ldl_p(mem_buf);
+ return 4;
+ }
+ case IDX_FLAGS_REG:
+ env->eflags = ldl_p(mem_buf);
+ return 4;
+
+ case IDX_SEG_REGS:
+ return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf);
+ case IDX_SEG_REGS + 1:
+ return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf);
+ case IDX_SEG_REGS + 2:
+ return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf);
+ case IDX_SEG_REGS + 3:
+ return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf);
+ case IDX_SEG_REGS + 4:
+ return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf);
+ case IDX_SEG_REGS + 5:
+ return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf);
+
+ case IDX_FP_REGS + 8:
+ cpu_set_fpuc(env, ldl_p(mem_buf));
+ return 4;
+ case IDX_FP_REGS + 9:
+ tmp = ldl_p(mem_buf);
+ env->fpstt = (tmp >> 11) & 7;
+ env->fpus = tmp & ~0x3800;
+ return 4;
+ case IDX_FP_REGS + 10: /* ftag */
+ return 4;
+ case IDX_FP_REGS + 11: /* fiseg */
+ return 4;
+ case IDX_FP_REGS + 12: /* fioff */
+ return 4;
+ case IDX_FP_REGS + 13: /* foseg */
+ return 4;
+ case IDX_FP_REGS + 14: /* fooff */
+ return 4;
+ case IDX_FP_REGS + 15: /* fop */
+ return 4;
+
+ case IDX_MXCSR_REG:
+ cpu_set_mxcsr(env, ldl_p(mem_buf));
+ return 4;
+ }
+ }
+ /* Unrecognised register. */
+ return 0;
+}
diff --git a/target/i386/helper.c b/target/i386/helper.c
new file mode 100644
index 0000000000..4ecc0912a4
--- /dev/null
+++ b/target/i386/helper.c
@@ -0,0 +1,1446 @@
+/*
+ * i386 helpers (without register variable usage)
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "sysemu/kvm.h"
+#include "kvm_i386.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/sysemu.h"
+#include "monitor/monitor.h"
+#include "hw/i386/apic_internal.h"
+#endif
+
+static void cpu_x86_version(CPUX86State *env, int *family, int *model)
+{
+ int cpuver = env->cpuid_version;
+
+ if (family == NULL || model == NULL) {
+ return;
+ }
+
+ *family = (cpuver >> 8) & 0x0f;
+ *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
+}
+
+/* Broadcast MCA signal for processor version 06H_EH and above */
+int cpu_x86_support_mca_broadcast(CPUX86State *env)
+{
+ int family = 0;
+ int model = 0;
+
+ cpu_x86_version(env, &family, &model);
+ if ((family == 6 && model >= 14) || family > 6) {
+ return 1;
+ }
+
+ return 0;
+}
+
+/***********************************************************/
+/* x86 debug */
+
+static const char *cc_op_str[CC_OP_NB] = {
+ "DYNAMIC",
+ "EFLAGS",
+
+ "MULB",
+ "MULW",
+ "MULL",
+ "MULQ",
+
+ "ADDB",
+ "ADDW",
+ "ADDL",
+ "ADDQ",
+
+ "ADCB",
+ "ADCW",
+ "ADCL",
+ "ADCQ",
+
+ "SUBB",
+ "SUBW",
+ "SUBL",
+ "SUBQ",
+
+ "SBBB",
+ "SBBW",
+ "SBBL",
+ "SBBQ",
+
+ "LOGICB",
+ "LOGICW",
+ "LOGICL",
+ "LOGICQ",
+
+ "INCB",
+ "INCW",
+ "INCL",
+ "INCQ",
+
+ "DECB",
+ "DECW",
+ "DECL",
+ "DECQ",
+
+ "SHLB",
+ "SHLW",
+ "SHLL",
+ "SHLQ",
+
+ "SARB",
+ "SARW",
+ "SARL",
+ "SARQ",
+
+ "BMILGB",
+ "BMILGW",
+ "BMILGL",
+ "BMILGQ",
+
+ "ADCX",
+ "ADOX",
+ "ADCOX",
+
+ "CLR",
+};
+
+static void
+cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
+ const char *name, struct SegmentCache *sc)
+{
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
+ sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
+ } else
+#endif
+ {
+ cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
+ (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
+ }
+
+ if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
+ goto done;
+
+ cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
+ if (sc->flags & DESC_S_MASK) {
+ if (sc->flags & DESC_CS_MASK) {
+ cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
+ ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
+ cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
+ (sc->flags & DESC_R_MASK) ? 'R' : '-');
+ } else {
+ cpu_fprintf(f,
+ (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
+ ? "DS " : "DS16");
+ cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
+ (sc->flags & DESC_W_MASK) ? 'W' : '-');
+ }
+ cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
+ } else {
+ static const char *sys_type_name[2][16] = {
+ { /* 32 bit mode */
+ "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
+ "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
+ "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
+ "CallGate32", "Reserved", "IntGate32", "TrapGate32"
+ },
+ { /* 64 bit mode */
+ "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved",
+ "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
+ "Reserved", "IntGate64", "TrapGate64"
+ }
+ };
+ cpu_fprintf(f, "%s",
+ sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
+ [(sc->flags & DESC_TYPE_MASK)
+ >> DESC_TYPE_SHIFT]);
+ }
+done:
+ cpu_fprintf(f, "\n");
+}
+
+#ifndef CONFIG_USER_ONLY
+
+/* ARRAY_SIZE check is not required because
+ * DeliveryMode(dm) has a size of 3 bit.
+ */
+static inline const char *dm2str(uint32_t dm)
+{
+ static const char *str[] = {
+ "Fixed",
+ "...",
+ "SMI",
+ "...",
+ "NMI",
+ "INIT",
+ "...",
+ "ExtINT"
+ };
+ return str[dm];
+}
+
+static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
+ const char *name, uint32_t lvt, bool is_timer)
+{
+ uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
+ cpu_fprintf(f,
+ "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
+ name, lvt,
+ lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
+ lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
+ lvt & APIC_LVT_MASKED ? "masked" : "",
+ lvt & APIC_LVT_DELIV_STS ? "pending" : "",
+ !is_timer ?
+ "" : lvt & APIC_LVT_TIMER_PERIODIC ?
+ "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
+ "tsc-deadline" : "one-shot",
+ dm2str(dm));
+ if (dm != APIC_DM_NMI) {
+ cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
+ } else {
+ cpu_fprintf(f, "\n");
+ }
+}
+
+/* ARRAY_SIZE check is not required because
+ * destination shorthand has a size of 2 bit.
+ */
+static inline const char *shorthand2str(uint32_t shorthand)
+{
+ const char *str[] = {
+ "no-shorthand", "self", "all-self", "all"
+ };
+ return str[shorthand];
+}
+
+static inline uint8_t divider_conf(uint32_t divide_conf)
+{
+ uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
+
+ return divide_val == 7 ? 1 : 2 << divide_val;
+}
+
+static inline void mask2str(char *str, uint32_t val, uint8_t size)
+{
+ while (size--) {
+ *str++ = (val >> size) & 1 ? '1' : '0';
+ }
+ *str = 0;
+}
+
+#define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
+
+static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
+ APICCommonState *s, CPUX86State *env)
+{
+ uint32_t icr = s->icr[0], icr2 = s->icr[1];
+ uint8_t dest_shorthand = \
+ (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
+ bool logical_mod = icr & APIC_ICR_DEST_MOD;
+ char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
+ uint32_t dest_field;
+ bool x2apic;
+
+ cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
+ icr,
+ logical_mod ? "logical" : "physical",
+ icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
+ icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
+ shorthand2str(dest_shorthand));
+
+ cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
+ if (dest_shorthand != 0) {
+ cpu_fprintf(f, "\n");
+ return;
+ }
+ x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
+ dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
+
+ if (!logical_mod) {
+ if (x2apic) {
+ cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
+ } else {
+ cpu_fprintf(f, " cpu %u (APIC ID)\n",
+ dest_field & APIC_LOGDEST_XAPIC_ID);
+ }
+ return;
+ }
+
+ if (s->dest_mode == 0xf) { /* flat mode */
+ mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
+ cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
+ } else if (s->dest_mode == 0) { /* cluster mode */
+ if (x2apic) {
+ mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
+ cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
+ dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
+ } else {
+ mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
+ cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
+ dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
+ }
+ }
+}
+
+static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
+ const char *name, uint32_t *ireg_tab,
+ uint32_t *tmr_tab)
+{
+ int i, empty = true;
+
+ cpu_fprintf(f, "%s\t ", name);
+ for (i = 0; i < 256; i++) {
+ if (apic_get_bit(ireg_tab, i)) {
+ cpu_fprintf(f, "%u%s ", i,
+ apic_get_bit(tmr_tab, i) ? "(level)" : "");
+ empty = false;
+ }
+ }
+ cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
+}
+
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ APICCommonState *s = APIC_COMMON(cpu->apic_state);
+ uint32_t *lvt = s->lvt;
+
+ cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
+ CPU(cpu)->cpu_index);
+ dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
+ dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
+
+ cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
+ s->divide_conf & APIC_DCR_MASK,
+ divider_conf(s->divide_conf),
+ s->initial_count);
+
+ cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
+ s->spurious_vec,
+ s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
+ s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
+ s->spurious_vec & APIC_VECTOR_MASK);
+
+ dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
+
+ cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
+
+ dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
+ dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
+
+ cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
+ s->arb_id, s->tpr, s->dest_mode, s->log_dest);
+ if (s->dest_mode == 0) {
+ cpu_fprintf(f, "(cluster %u: id %u)",
+ s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
+ s->log_dest & APIC_LOGDEST_XAPIC_ID);
+ }
+ cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
+}
+#else
+void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+}
+#endif /* !CONFIG_USER_ONLY */
+
+#define DUMP_CODE_BYTES_TOTAL 50
+#define DUMP_CODE_BYTES_BACKWARD 20
+
+void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int eflags, i, nb;
+ char cc_op_name[32];
+ static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
+
+ eflags = cpu_compute_eflags(env);
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ cpu_fprintf(f,
+ "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
+ "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
+ "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
+ "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
+ "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
+ env->regs[R_EAX],
+ env->regs[R_EBX],
+ env->regs[R_ECX],
+ env->regs[R_EDX],
+ env->regs[R_ESI],
+ env->regs[R_EDI],
+ env->regs[R_EBP],
+ env->regs[R_ESP],
+ env->regs[8],
+ env->regs[9],
+ env->regs[10],
+ env->regs[11],
+ env->regs[12],
+ env->regs[13],
+ env->regs[14],
+ env->regs[15],
+ env->eip, eflags,
+ eflags & DF_MASK ? 'D' : '-',
+ eflags & CC_O ? 'O' : '-',
+ eflags & CC_S ? 'S' : '-',
+ eflags & CC_Z ? 'Z' : '-',
+ eflags & CC_A ? 'A' : '-',
+ eflags & CC_P ? 'P' : '-',
+ eflags & CC_C ? 'C' : '-',
+ env->hflags & HF_CPL_MASK,
+ (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
+ (env->a20_mask >> 20) & 1,
+ (env->hflags >> HF_SMM_SHIFT) & 1,
+ cs->halted);
+ } else
+#endif
+ {
+ cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
+ "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
+ "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
+ (uint32_t)env->regs[R_EAX],
+ (uint32_t)env->regs[R_EBX],
+ (uint32_t)env->regs[R_ECX],
+ (uint32_t)env->regs[R_EDX],
+ (uint32_t)env->regs[R_ESI],
+ (uint32_t)env->regs[R_EDI],
+ (uint32_t)env->regs[R_EBP],
+ (uint32_t)env->regs[R_ESP],
+ (uint32_t)env->eip, eflags,
+ eflags & DF_MASK ? 'D' : '-',
+ eflags & CC_O ? 'O' : '-',
+ eflags & CC_S ? 'S' : '-',
+ eflags & CC_Z ? 'Z' : '-',
+ eflags & CC_A ? 'A' : '-',
+ eflags & CC_P ? 'P' : '-',
+ eflags & CC_C ? 'C' : '-',
+ env->hflags & HF_CPL_MASK,
+ (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
+ (env->a20_mask >> 20) & 1,
+ (env->hflags >> HF_SMM_SHIFT) & 1,
+ cs->halted);
+ }
+
+ for(i = 0; i < 6; i++) {
+ cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
+ &env->segs[i]);
+ }
+ cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
+ cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
+ env->gdt.base, env->gdt.limit);
+ cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
+ env->idt.base, env->idt.limit);
+ cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
+ (uint32_t)env->cr[0],
+ env->cr[2],
+ env->cr[3],
+ (uint32_t)env->cr[4]);
+ for(i = 0; i < 4; i++)
+ cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
+ cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
+ env->dr[6], env->dr[7]);
+ } else
+#endif
+ {
+ cpu_fprintf(f, "GDT= %08x %08x\n",
+ (uint32_t)env->gdt.base, env->gdt.limit);
+ cpu_fprintf(f, "IDT= %08x %08x\n",
+ (uint32_t)env->idt.base, env->idt.limit);
+ cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
+ (uint32_t)env->cr[0],
+ (uint32_t)env->cr[2],
+ (uint32_t)env->cr[3],
+ (uint32_t)env->cr[4]);
+ for(i = 0; i < 4; i++) {
+ cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
+ }
+ cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
+ env->dr[6], env->dr[7]);
+ }
+ if (flags & CPU_DUMP_CCOP) {
+ if ((unsigned)env->cc_op < CC_OP_NB)
+ snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
+ else
+ snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
+ env->cc_src, env->cc_dst,
+ cc_op_name);
+ } else
+#endif
+ {
+ cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
+ (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
+ cc_op_name);
+ }
+ }
+ cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
+ if (flags & CPU_DUMP_FPU) {
+ int fptag;
+ fptag = 0;
+ for(i = 0; i < 8; i++) {
+ fptag |= ((!env->fptags[i]) << i);
+ }
+ cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
+ env->fpuc,
+ (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
+ env->fpstt,
+ fptag,
+ env->mxcsr);
+ for(i=0;i<8;i++) {
+ CPU_LDoubleU u;
+ u.d = env->fpregs[i].d;
+ cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
+ i, u.l.lower, u.l.upper);
+ if ((i & 1) == 1)
+ cpu_fprintf(f, "\n");
+ else
+ cpu_fprintf(f, " ");
+ }
+ if (env->hflags & HF_CS64_MASK)
+ nb = 16;
+ else
+ nb = 8;
+ for(i=0;i<nb;i++) {
+ cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
+ i,
+ env->xmm_regs[i].ZMM_L(3),
+ env->xmm_regs[i].ZMM_L(2),
+ env->xmm_regs[i].ZMM_L(1),
+ env->xmm_regs[i].ZMM_L(0));
+ if ((i & 1) == 1)
+ cpu_fprintf(f, "\n");
+ else
+ cpu_fprintf(f, " ");
+ }
+ }
+ if (flags & CPU_DUMP_CODE) {
+ target_ulong base = env->segs[R_CS].base + env->eip;
+ target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
+ uint8_t code;
+ char codestr[3];
+
+ cpu_fprintf(f, "Code=");
+ for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
+ if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
+ snprintf(codestr, sizeof(codestr), "%02x", code);
+ } else {
+ snprintf(codestr, sizeof(codestr), "??");
+ }
+ cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
+ i == offs ? "<" : "", codestr, i == offs ? ">" : "");
+ }
+ cpu_fprintf(f, "\n");
+ }
+}
+
+/***********************************************************/
+/* x86 mmu */
+/* XXX: add PGE support */
+
+void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
+{
+ CPUX86State *env = &cpu->env;
+
+ a20_state = (a20_state != 0);
+ if (a20_state != ((env->a20_mask >> 20) & 1)) {
+ CPUState *cs = CPU(cpu);
+
+ qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
+ /* if the cpu is currently executing code, we must unlink it and
+ all the potentially executing TB */
+ cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
+
+ /* when a20 is changed, all the MMU mappings are invalid, so
+ we must flush everything */
+ tlb_flush(cs, 1);
+ env->a20_mask = ~(1 << 20) | (a20_state << 20);
+ }
+}
+
+void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ int pe_state;
+
+ qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
+ if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
+ (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
+ tlb_flush(CPU(cpu), 1);
+ }
+
+#ifdef TARGET_X86_64
+ if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
+ (env->efer & MSR_EFER_LME)) {
+ /* enter in long mode */
+ /* XXX: generate an exception */
+ if (!(env->cr[4] & CR4_PAE_MASK))
+ return;
+ env->efer |= MSR_EFER_LMA;
+ env->hflags |= HF_LMA_MASK;
+ } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
+ (env->efer & MSR_EFER_LMA)) {
+ /* exit long mode */
+ env->efer &= ~MSR_EFER_LMA;
+ env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
+ env->eip &= 0xffffffff;
+ }
+#endif
+ env->cr[0] = new_cr0 | CR0_ET_MASK;
+
+ /* update PE flag in hidden flags */
+ pe_state = (env->cr[0] & CR0_PE_MASK);
+ env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
+ /* ensure that ADDSEG is always set in real mode */
+ env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
+ /* update FPU flags */
+ env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
+ ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
+}
+
+/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
+ the PDPT */
+void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
+ env->cr[3] = new_cr3;
+ if (env->cr[0] & CR0_PG_MASK) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
+ tlb_flush(CPU(cpu), 0);
+ }
+}
+
+void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ uint32_t hflags;
+
+#if defined(DEBUG_MMU)
+ printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
+#endif
+ if ((new_cr4 ^ env->cr[4]) &
+ (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
+ CR4_SMEP_MASK | CR4_SMAP_MASK)) {
+ tlb_flush(CPU(cpu), 1);
+ }
+
+ /* Clear bits we're going to recompute. */
+ hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
+
+ /* SSE handling */
+ if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
+ new_cr4 &= ~CR4_OSFXSR_MASK;
+ }
+ if (new_cr4 & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
+
+ if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
+ new_cr4 &= ~CR4_SMAP_MASK;
+ }
+ if (new_cr4 & CR4_SMAP_MASK) {
+ hflags |= HF_SMAP_MASK;
+ }
+
+ if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
+ new_cr4 &= ~CR4_PKE_MASK;
+ }
+
+ env->cr[4] = new_cr4;
+ env->hflags = hflags;
+
+ cpu_sync_bndcs_hflags(env);
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
+ int is_write, int mmu_idx)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /* user mode only emulation */
+ is_write &= 1;
+ env->cr[2] = addr;
+ env->error_code = (is_write << PG_ERROR_W_BIT);
+ env->error_code |= PG_ERROR_U_MASK;
+ cs->exception_index = EXCP0E_PAGE;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ return 1;
+}
+
+#else
+
+/* return value:
+ * -1 = cannot handle fault
+ * 0 = nothing more to do
+ * 1 = generate PF fault
+ */
+int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
+ int is_write1, int mmu_idx)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint64_t ptep, pte;
+ target_ulong pde_addr, pte_addr;
+ int error_code = 0;
+ int is_dirty, prot, page_size, is_write, is_user;
+ hwaddr paddr;
+ uint64_t rsvd_mask = PG_HI_RSVD_MASK;
+ uint32_t page_offset;
+ target_ulong vaddr;
+
+ is_user = mmu_idx == MMU_USER_IDX;
+#if defined(DEBUG_MMU)
+ printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
+ addr, is_write1, is_user, env->eip);
+#endif
+ is_write = is_write1 & 1;
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ pte = addr;
+#ifdef TARGET_X86_64
+ if (!(env->hflags & HF_LMA_MASK)) {
+ /* Without long mode we can only address 32bits in real mode */
+ pte = (uint32_t)pte;
+ }
+#endif
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ page_size = 4096;
+ goto do_mapping;
+ }
+
+ if (!(env->efer & MSR_EFER_NXE)) {
+ rsvd_mask |= PG_NX_MASK;
+ }
+
+ if (env->cr[4] & CR4_PAE_MASK) {
+ uint64_t pde, pdpe;
+ target_ulong pdpe_addr;
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint64_t pml4e_addr, pml4e;
+ int32_t sext;
+
+ /* test virtual address sign extension */
+ sext = (int64_t)addr >> 47;
+ if (sext != 0 && sext != -1) {
+ env->error_code = 0;
+ cs->exception_index = EXCP0D_GPF;
+ return 1;
+ }
+
+ pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
+ env->a20_mask;
+ pml4e = x86_ldq_phys(cs, pml4e_addr);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
+ goto do_fault_rsvd;
+ }
+ if (!(pml4e & PG_ACCESSED_MASK)) {
+ pml4e |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
+ }
+ ptep = pml4e ^ PG_NX_MASK;
+ pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
+ env->a20_mask;
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pdpe & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep &= pdpe ^ PG_NX_MASK;
+ if (!(pdpe & PG_ACCESSED_MASK)) {
+ pdpe |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
+ }
+ if (pdpe & PG_PSE_MASK) {
+ /* 1 GB page */
+ page_size = 1024 * 1024 * 1024;
+ pte_addr = pdpe_addr;
+ pte = pdpe;
+ goto do_check_protect;
+ }
+ } else
+#endif
+ {
+ /* XXX: load them when cr3 is loaded ? */
+ pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
+ env->a20_mask;
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ rsvd_mask |= PG_HI_USER_MASK;
+ if (pdpe & (rsvd_mask | PG_NX_MASK)) {
+ goto do_fault_rsvd;
+ }
+ ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
+ }
+
+ pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
+ env->a20_mask;
+ pde = x86_ldq_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pde & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep &= pde ^ PG_NX_MASK;
+ if (pde & PG_PSE_MASK) {
+ /* 2 MB page */
+ page_size = 2048 * 1024;
+ pte_addr = pde_addr;
+ pte = pde;
+ goto do_check_protect;
+ }
+ /* 4 KB page */
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pde_addr, pde);
+ }
+ pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
+ env->a20_mask;
+ pte = x86_ldq_phys(cs, pte_addr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ /* combine pde and pte nx, user and rw protections */
+ ptep &= pte ^ PG_NX_MASK;
+ page_size = 4096;
+ } else {
+ uint32_t pde;
+
+ /* page directory entry */
+ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
+ env->a20_mask;
+ pde = x86_ldl_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ ptep = pde | PG_NX_MASK;
+
+ /* if PSE bit is set, then we use a 4MB page */
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ page_size = 4096 * 1024;
+ pte_addr = pde_addr;
+
+ /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
+ * Leave bits 20-13 in place for setting accessed/dirty bits below.
+ */
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
+ rsvd_mask = 0x200000;
+ goto do_check_protect_pse36;
+ }
+
+ if (!(pde & PG_ACCESSED_MASK)) {
+ pde |= PG_ACCESSED_MASK;
+ x86_stl_phys_notdirty(cs, pde_addr, pde);
+ }
+
+ /* page directory entry */
+ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
+ env->a20_mask;
+ pte = x86_ldl_phys(cs, pte_addr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ goto do_fault;
+ }
+ /* combine pde and pte user and rw protections */
+ ptep &= pte | PG_NX_MASK;
+ page_size = 4096;
+ rsvd_mask = 0;
+ }
+
+do_check_protect:
+ rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
+do_check_protect_pse36:
+ if (pte & rsvd_mask) {
+ goto do_fault_rsvd;
+ }
+ ptep ^= PG_NX_MASK;
+
+ /* can the page can be put in the TLB? prot will tell us */
+ if (is_user && !(ptep & PG_USER_MASK)) {
+ goto do_fault_protect;
+ }
+
+ prot = 0;
+ if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
+ prot |= PAGE_READ;
+ if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
+ prot |= PAGE_WRITE;
+ }
+ }
+ if (!(ptep & PG_NX_MASK) &&
+ (mmu_idx == MMU_USER_IDX ||
+ !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
+ prot |= PAGE_EXEC;
+ }
+ if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
+ (ptep & PG_USER_MASK) && env->pkru) {
+ uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
+ uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
+ uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
+ uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ if (pkru_ad) {
+ pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
+ } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
+ pkru_prot &= ~PAGE_WRITE;
+ }
+
+ prot &= pkru_prot;
+ if ((pkru_prot & (1 << is_write1)) == 0) {
+ assert(is_write1 != 2);
+ error_code |= PG_ERROR_PK_MASK;
+ goto do_fault_protect;
+ }
+ }
+
+ if ((prot & (1 << is_write1)) == 0) {
+ goto do_fault_protect;
+ }
+
+ /* yes, it can! */
+ is_dirty = is_write && !(pte & PG_DIRTY_MASK);
+ if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
+ pte |= PG_ACCESSED_MASK;
+ if (is_dirty) {
+ pte |= PG_DIRTY_MASK;
+ }
+ x86_stl_phys_notdirty(cs, pte_addr, pte);
+ }
+
+ if (!(pte & PG_DIRTY_MASK)) {
+ /* only set write access if already dirty... otherwise wait
+ for dirty access */
+ assert(!is_write);
+ prot &= ~PAGE_WRITE;
+ }
+
+ do_mapping:
+ pte = pte & env->a20_mask;
+
+ /* align to page_size */
+ pte &= PG_ADDRESS_MASK & ~(page_size - 1);
+
+ /* Even if 4MB pages, we map only one 4KB page in the cache to
+ avoid filling it too fast */
+ vaddr = addr & TARGET_PAGE_MASK;
+ page_offset = vaddr & (page_size - 1);
+ paddr = pte + page_offset;
+
+ assert(prot & (1 << is_write1));
+ tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
+ prot, mmu_idx, page_size);
+ return 0;
+ do_fault_rsvd:
+ error_code |= PG_ERROR_RSVD_MASK;
+ do_fault_protect:
+ error_code |= PG_ERROR_P_MASK;
+ do_fault:
+ error_code |= (is_write << PG_ERROR_W_BIT);
+ if (is_user)
+ error_code |= PG_ERROR_U_MASK;
+ if (is_write1 == 2 &&
+ (((env->efer & MSR_EFER_NXE) &&
+ (env->cr[4] & CR4_PAE_MASK)) ||
+ (env->cr[4] & CR4_SMEP_MASK)))
+ error_code |= PG_ERROR_I_D_MASK;
+ if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
+ /* cr2 is not modified in case of exceptions */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ addr);
+ } else {
+ env->cr[2] = addr;
+ }
+ env->error_code = error_code;
+ cs->exception_index = EXCP0E_PAGE;
+ return 1;
+}
+
+hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ target_ulong pde_addr, pte_addr;
+ uint64_t pte;
+ uint32_t page_offset;
+ int page_size;
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ pte = addr & env->a20_mask;
+ page_size = 4096;
+ } else if (env->cr[4] & CR4_PAE_MASK) {
+ target_ulong pdpe_addr;
+ uint64_t pde, pdpe;
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint64_t pml4e_addr, pml4e;
+ int32_t sext;
+
+ /* test virtual address sign extension */
+ sext = (int64_t)addr >> 47;
+ if (sext != 0 && sext != -1) {
+ return -1;
+ }
+ pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
+ env->a20_mask;
+ pml4e = x86_ldq_phys(cs, pml4e_addr);
+ if (!(pml4e & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
+ (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ if (pdpe & PG_PSE_MASK) {
+ page_size = 1024 * 1024 * 1024;
+ pte = pdpe;
+ goto out;
+ }
+
+ } else
+#endif
+ {
+ pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
+ env->a20_mask;
+ pdpe = x86_ldq_phys(cs, pdpe_addr);
+ if (!(pdpe & PG_PRESENT_MASK))
+ return -1;
+ }
+
+ pde_addr = ((pdpe & PG_ADDRESS_MASK) +
+ (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
+ pde = x86_ldq_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ if (pde & PG_PSE_MASK) {
+ /* 2 MB page */
+ page_size = 2048 * 1024;
+ pte = pde;
+ } else {
+ /* 4 KB page */
+ pte_addr = ((pde & PG_ADDRESS_MASK) +
+ (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
+ page_size = 4096;
+ pte = x86_ldq_phys(cs, pte_addr);
+ }
+ if (!(pte & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ } else {
+ uint32_t pde;
+
+ /* page directory entry */
+ pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
+ pde = x86_ldl_phys(cs, pde_addr);
+ if (!(pde & PG_PRESENT_MASK))
+ return -1;
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
+ page_size = 4096 * 1024;
+ } else {
+ /* page directory entry */
+ pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
+ pte = x86_ldl_phys(cs, pte_addr);
+ if (!(pte & PG_PRESENT_MASK)) {
+ return -1;
+ }
+ page_size = 4096;
+ }
+ pte = pte & env->a20_mask;
+ }
+
+#ifdef TARGET_X86_64
+out:
+#endif
+ pte &= PG_ADDRESS_MASK & ~(page_size - 1);
+ page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
+ return pte | page_offset;
+}
+
+typedef struct MCEInjectionParams {
+ Monitor *mon;
+ int bank;
+ uint64_t status;
+ uint64_t mcg_status;
+ uint64_t addr;
+ uint64_t misc;
+ int flags;
+} MCEInjectionParams;
+
+static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
+{
+ MCEInjectionParams *params = data.host_ptr;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *cenv = &cpu->env;
+ uint64_t *banks = cenv->mce_banks + 4 * params->bank;
+
+ cpu_synchronize_state(cs);
+
+ /*
+ * If there is an MCE exception being processed, ignore this SRAO MCE
+ * unless unconditional injection was requested.
+ */
+ if (!(params->flags & MCE_INJECT_UNCOND_AO)
+ && !(params->status & MCI_STATUS_AR)
+ && (cenv->mcg_status & MCG_STATUS_MCIP)) {
+ return;
+ }
+
+ if (params->status & MCI_STATUS_UC) {
+ /*
+ * if MSR_MCG_CTL is not all 1s, the uncorrected error
+ * reporting is disabled
+ */
+ if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
+ monitor_printf(params->mon,
+ "CPU %d: Uncorrected error reporting disabled\n",
+ cs->cpu_index);
+ return;
+ }
+
+ /*
+ * if MSR_MCi_CTL is not all 1s, the uncorrected error
+ * reporting is disabled for the bank
+ */
+ if (banks[0] != ~(uint64_t)0) {
+ monitor_printf(params->mon,
+ "CPU %d: Uncorrected error reporting disabled for"
+ " bank %d\n",
+ cs->cpu_index, params->bank);
+ return;
+ }
+
+ if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
+ !(cenv->cr[4] & CR4_MCE_MASK)) {
+ monitor_printf(params->mon,
+ "CPU %d: Previous MCE still in progress, raising"
+ " triple fault\n",
+ cs->cpu_index);
+ qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
+ qemu_system_reset_request();
+ return;
+ }
+ if (banks[1] & MCI_STATUS_VAL) {
+ params->status |= MCI_STATUS_OVER;
+ }
+ banks[2] = params->addr;
+ banks[3] = params->misc;
+ cenv->mcg_status = params->mcg_status;
+ banks[1] = params->status;
+ cpu_interrupt(cs, CPU_INTERRUPT_MCE);
+ } else if (!(banks[1] & MCI_STATUS_VAL)
+ || !(banks[1] & MCI_STATUS_UC)) {
+ if (banks[1] & MCI_STATUS_VAL) {
+ params->status |= MCI_STATUS_OVER;
+ }
+ banks[2] = params->addr;
+ banks[3] = params->misc;
+ banks[1] = params->status;
+ } else {
+ banks[1] |= MCI_STATUS_OVER;
+ }
+}
+
+void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
+ uint64_t status, uint64_t mcg_status, uint64_t addr,
+ uint64_t misc, int flags)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *cenv = &cpu->env;
+ MCEInjectionParams params = {
+ .mon = mon,
+ .bank = bank,
+ .status = status,
+ .mcg_status = mcg_status,
+ .addr = addr,
+ .misc = misc,
+ .flags = flags,
+ };
+ unsigned bank_num = cenv->mcg_cap & 0xff;
+
+ if (!cenv->mcg_cap) {
+ monitor_printf(mon, "MCE injection not supported\n");
+ return;
+ }
+ if (bank >= bank_num) {
+ monitor_printf(mon, "Invalid MCE bank number\n");
+ return;
+ }
+ if (!(status & MCI_STATUS_VAL)) {
+ monitor_printf(mon, "Invalid MCE status code\n");
+ return;
+ }
+ if ((flags & MCE_INJECT_BROADCAST)
+ && !cpu_x86_support_mca_broadcast(cenv)) {
+ monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
+ return;
+ }
+
+ run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
+ if (flags & MCE_INJECT_BROADCAST) {
+ CPUState *other_cs;
+
+ params.bank = 1;
+ params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
+ params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
+ params.addr = 0;
+ params.misc = 0;
+ CPU_FOREACH(other_cs) {
+ if (other_cs == cs) {
+ continue;
+ }
+ run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
+ }
+ }
+}
+
+void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_enabled()) {
+ env->tpr_access_type = access;
+
+ cpu_interrupt(cs, CPU_INTERRUPT_TPR);
+ } else {
+ cpu_restore_state(cs, cs->mem_io_pc);
+
+ apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
+
+int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
+ target_ulong *base, unsigned int *limit,
+ unsigned int *flags)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ SegmentCache *dt;
+ target_ulong ptr;
+ uint32_t e1, e2;
+ int index;
+
+ if (selector & 0x4)
+ dt = &env->ldt;
+ else
+ dt = &env->gdt;
+ index = selector & ~7;
+ ptr = dt->base + index;
+ if ((index + 7) > dt->limit
+ || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
+ || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
+ return 0;
+
+ *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
+ *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & DESC_G_MASK)
+ *limit = (*limit << 12) | 0xfff;
+ *flags = e2;
+
+ return 1;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void do_cpu_init(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ CPUX86State *save = g_new(CPUX86State, 1);
+ int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
+
+ *save = *env;
+
+ cpu_reset(cs);
+ cs->interrupt_request = sipi;
+ memcpy(&env->start_init_save, &save->start_init_save,
+ offsetof(CPUX86State, end_init_save) -
+ offsetof(CPUX86State, start_init_save));
+ g_free(save);
+
+ if (kvm_enabled()) {
+ kvm_arch_do_init_vcpu(cpu);
+ }
+ apic_init_reset(cpu->apic_state);
+}
+
+void do_cpu_sipi(X86CPU *cpu)
+{
+ apic_sipi(cpu->apic_state);
+}
+#else
+void do_cpu_init(X86CPU *cpu)
+{
+}
+void do_cpu_sipi(X86CPU *cpu)
+{
+}
+#endif
+
+/* Frob eflags into and out of the CPU temporary format. */
+
+void x86_cpu_exec_enter(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ env->df = 1 - (2 * ((env->eflags >> 10) & 1));
+ CC_OP = CC_OP_EFLAGS;
+ env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+}
+
+void x86_cpu_exec_exit(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->eflags = cpu_compute_eflags(env);
+}
+
+#ifndef CONFIG_USER_ONLY
+uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ return address_space_ldub(cs->as, addr,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ return address_space_lduw(cs->as, addr,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ return address_space_ldl(cs->as, addr,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ return address_space_ldq(cs->as, addr,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ address_space_stb(cs->as, addr, val,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ address_space_stl_notdirty(cs->as, addr, val,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ address_space_stw(cs->as, addr, val,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ address_space_stl(cs->as, addr, val,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+
+void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ address_space_stq(cs->as, addr, val,
+ cpu_get_mem_attrs(env),
+ NULL);
+}
+#endif
diff --git a/target/i386/helper.h b/target/i386/helper.h
new file mode 100644
index 0000000000..4e859eba9d
--- /dev/null
+++ b/target/i386/helper.h
@@ -0,0 +1,230 @@
+DEF_HELPER_FLAGS_4(cc_compute_all, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+DEF_HELPER_FLAGS_4(cc_compute_c, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl, int)
+
+DEF_HELPER_3(write_eflags, void, env, tl, i32)
+DEF_HELPER_1(read_eflags, tl, env)
+DEF_HELPER_2(divb_AL, void, env, tl)
+DEF_HELPER_2(idivb_AL, void, env, tl)
+DEF_HELPER_2(divw_AX, void, env, tl)
+DEF_HELPER_2(idivw_AX, void, env, tl)
+DEF_HELPER_2(divl_EAX, void, env, tl)
+DEF_HELPER_2(idivl_EAX, void, env, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(divq_EAX, void, env, tl)
+DEF_HELPER_2(idivq_EAX, void, env, tl)
+#endif
+DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_1(bnd_jmp, void, env)
+
+DEF_HELPER_2(aam, void, env, int)
+DEF_HELPER_2(aad, void, env, int)
+DEF_HELPER_1(aaa, void, env)
+DEF_HELPER_1(aas, void, env)
+DEF_HELPER_1(daa, void, env)
+DEF_HELPER_1(das, void, env)
+
+DEF_HELPER_2(lsl, tl, env, tl)
+DEF_HELPER_2(lar, tl, env, tl)
+DEF_HELPER_2(verr, void, env, tl)
+DEF_HELPER_2(verw, void, env, tl)
+DEF_HELPER_2(lldt, void, env, int)
+DEF_HELPER_2(ltr, void, env, int)
+DEF_HELPER_3(load_seg, void, env, int, int)
+DEF_HELPER_4(ljmp_protected, void, env, int, tl, tl)
+DEF_HELPER_5(lcall_real, void, env, int, tl, int, int)
+DEF_HELPER_5(lcall_protected, void, env, int, tl, int, tl)
+DEF_HELPER_2(iret_real, void, env, int)
+DEF_HELPER_3(iret_protected, void, env, int, int)
+DEF_HELPER_3(lret_protected, void, env, int, int)
+DEF_HELPER_2(read_crN, tl, env, int)
+DEF_HELPER_3(write_crN, void, env, int, tl)
+DEF_HELPER_2(lmsw, void, env, tl)
+DEF_HELPER_1(clts, void, env)
+DEF_HELPER_FLAGS_3(set_dr, TCG_CALL_NO_WG, void, env, int, tl)
+DEF_HELPER_FLAGS_2(get_dr, TCG_CALL_NO_WG, tl, env, int)
+DEF_HELPER_2(invlpg, void, env, tl)
+
+DEF_HELPER_1(sysenter, void, env)
+DEF_HELPER_2(sysexit, void, env, int)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(syscall, void, env, int)
+DEF_HELPER_2(sysret, void, env, int)
+#endif
+DEF_HELPER_2(hlt, void, env, int)
+DEF_HELPER_2(monitor, void, env, tl)
+DEF_HELPER_2(mwait, void, env, int)
+DEF_HELPER_2(pause, void, env, int)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_1(reset_rf, void, env)
+DEF_HELPER_3(raise_interrupt, void, env, int, int)
+DEF_HELPER_2(raise_exception, void, env, int)
+DEF_HELPER_1(cli, void, env)
+DEF_HELPER_1(sti, void, env)
+DEF_HELPER_1(clac, void, env)
+DEF_HELPER_1(stac, void, env)
+DEF_HELPER_3(boundw, void, env, tl, int)
+DEF_HELPER_3(boundl, void, env, tl, int)
+DEF_HELPER_1(rsm, void, env)
+DEF_HELPER_2(into, void, env, int)
+DEF_HELPER_2(cmpxchg8b_unlocked, void, env, tl)
+DEF_HELPER_2(cmpxchg8b, void, env, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cmpxchg16b_unlocked, void, env, tl)
+DEF_HELPER_2(cmpxchg16b, void, env, tl)
+#endif
+DEF_HELPER_1(single_step, void, env)
+DEF_HELPER_1(cpuid, void, env)
+DEF_HELPER_1(rdtsc, void, env)
+DEF_HELPER_1(rdtscp, void, env)
+DEF_HELPER_1(rdpmc, void, env)
+DEF_HELPER_1(rdmsr, void, env)
+DEF_HELPER_1(wrmsr, void, env)
+
+DEF_HELPER_2(check_iob, void, env, i32)
+DEF_HELPER_2(check_iow, void, env, i32)
+DEF_HELPER_2(check_iol, void, env, i32)
+DEF_HELPER_3(outb, void, env, i32, i32)
+DEF_HELPER_2(inb, tl, env, i32)
+DEF_HELPER_3(outw, void, env, i32, i32)
+DEF_HELPER_2(inw, tl, env, i32)
+DEF_HELPER_3(outl, void, env, i32, i32)
+DEF_HELPER_2(inl, tl, env, i32)
+DEF_HELPER_FLAGS_4(bpt_io, TCG_CALL_NO_WG, void, env, i32, i32, tl)
+
+DEF_HELPER_3(svm_check_intercept_param, void, env, i32, i64)
+DEF_HELPER_3(vmexit, void, env, i32, i64)
+DEF_HELPER_4(svm_check_io, void, env, i32, i32, i32)
+DEF_HELPER_3(vmrun, void, env, int, int)
+DEF_HELPER_1(vmmcall, void, env)
+DEF_HELPER_2(vmload, void, env, int)
+DEF_HELPER_2(vmsave, void, env, int)
+DEF_HELPER_1(stgi, void, env)
+DEF_HELPER_1(clgi, void, env)
+DEF_HELPER_1(skinit, void, env)
+DEF_HELPER_2(invlpga, void, env, int)
+
+/* x86 FPU */
+
+DEF_HELPER_2(flds_FT0, void, env, i32)
+DEF_HELPER_2(fldl_FT0, void, env, i64)
+DEF_HELPER_2(fildl_FT0, void, env, s32)
+DEF_HELPER_2(flds_ST0, void, env, i32)
+DEF_HELPER_2(fldl_ST0, void, env, i64)
+DEF_HELPER_2(fildl_ST0, void, env, s32)
+DEF_HELPER_2(fildll_ST0, void, env, s64)
+DEF_HELPER_1(fsts_ST0, i32, env)
+DEF_HELPER_1(fstl_ST0, i64, env)
+DEF_HELPER_1(fist_ST0, s32, env)
+DEF_HELPER_1(fistl_ST0, s32, env)
+DEF_HELPER_1(fistll_ST0, s64, env)
+DEF_HELPER_1(fistt_ST0, s32, env)
+DEF_HELPER_1(fisttl_ST0, s32, env)
+DEF_HELPER_1(fisttll_ST0, s64, env)
+DEF_HELPER_2(fldt_ST0, void, env, tl)
+DEF_HELPER_2(fstt_ST0, void, env, tl)
+DEF_HELPER_1(fpush, void, env)
+DEF_HELPER_1(fpop, void, env)
+DEF_HELPER_1(fdecstp, void, env)
+DEF_HELPER_1(fincstp, void, env)
+DEF_HELPER_2(ffree_STN, void, env, int)
+DEF_HELPER_1(fmov_ST0_FT0, void, env)
+DEF_HELPER_2(fmov_FT0_STN, void, env, int)
+DEF_HELPER_2(fmov_ST0_STN, void, env, int)
+DEF_HELPER_2(fmov_STN_ST0, void, env, int)
+DEF_HELPER_2(fxchg_ST0_STN, void, env, int)
+DEF_HELPER_1(fcom_ST0_FT0, void, env)
+DEF_HELPER_1(fucom_ST0_FT0, void, env)
+DEF_HELPER_1(fcomi_ST0_FT0, void, env)
+DEF_HELPER_1(fucomi_ST0_FT0, void, env)
+DEF_HELPER_1(fadd_ST0_FT0, void, env)
+DEF_HELPER_1(fmul_ST0_FT0, void, env)
+DEF_HELPER_1(fsub_ST0_FT0, void, env)
+DEF_HELPER_1(fsubr_ST0_FT0, void, env)
+DEF_HELPER_1(fdiv_ST0_FT0, void, env)
+DEF_HELPER_1(fdivr_ST0_FT0, void, env)
+DEF_HELPER_2(fadd_STN_ST0, void, env, int)
+DEF_HELPER_2(fmul_STN_ST0, void, env, int)
+DEF_HELPER_2(fsub_STN_ST0, void, env, int)
+DEF_HELPER_2(fsubr_STN_ST0, void, env, int)
+DEF_HELPER_2(fdiv_STN_ST0, void, env, int)
+DEF_HELPER_2(fdivr_STN_ST0, void, env, int)
+DEF_HELPER_1(fchs_ST0, void, env)
+DEF_HELPER_1(fabs_ST0, void, env)
+DEF_HELPER_1(fxam_ST0, void, env)
+DEF_HELPER_1(fld1_ST0, void, env)
+DEF_HELPER_1(fldl2t_ST0, void, env)
+DEF_HELPER_1(fldl2e_ST0, void, env)
+DEF_HELPER_1(fldpi_ST0, void, env)
+DEF_HELPER_1(fldlg2_ST0, void, env)
+DEF_HELPER_1(fldln2_ST0, void, env)
+DEF_HELPER_1(fldz_ST0, void, env)
+DEF_HELPER_1(fldz_FT0, void, env)
+DEF_HELPER_1(fnstsw, i32, env)
+DEF_HELPER_1(fnstcw, i32, env)
+DEF_HELPER_2(fldcw, void, env, i32)
+DEF_HELPER_1(fclex, void, env)
+DEF_HELPER_1(fwait, void, env)
+DEF_HELPER_1(fninit, void, env)
+DEF_HELPER_2(fbld_ST0, void, env, tl)
+DEF_HELPER_2(fbst_ST0, void, env, tl)
+DEF_HELPER_1(f2xm1, void, env)
+DEF_HELPER_1(fyl2x, void, env)
+DEF_HELPER_1(fptan, void, env)
+DEF_HELPER_1(fpatan, void, env)
+DEF_HELPER_1(fxtract, void, env)
+DEF_HELPER_1(fprem1, void, env)
+DEF_HELPER_1(fprem, void, env)
+DEF_HELPER_1(fyl2xp1, void, env)
+DEF_HELPER_1(fsqrt, void, env)
+DEF_HELPER_1(fsincos, void, env)
+DEF_HELPER_1(frndint, void, env)
+DEF_HELPER_1(fscale, void, env)
+DEF_HELPER_1(fsin, void, env)
+DEF_HELPER_1(fcos, void, env)
+DEF_HELPER_3(fstenv, void, env, tl, int)
+DEF_HELPER_3(fldenv, void, env, tl, int)
+DEF_HELPER_3(fsave, void, env, tl, int)
+DEF_HELPER_3(frstor, void, env, tl, int)
+DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
+DEF_HELPER_FLAGS_2(rdpkru, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(wrpkru, TCG_CALL_NO_WG, void, env, i32, i64)
+
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(pdep, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(pext, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+
+/* MMX/SSE */
+
+DEF_HELPER_2(ldmxcsr, void, env, i32)
+DEF_HELPER_1(enter_mmx, void, env)
+DEF_HELPER_1(emms, void, env)
+DEF_HELPER_3(movq, void, env, ptr, ptr)
+
+#define SHIFT 0
+#include "ops_sse_header.h"
+#define SHIFT 1
+#include "ops_sse_header.h"
+
+DEF_HELPER_3(rclb, tl, env, tl, tl)
+DEF_HELPER_3(rclw, tl, env, tl, tl)
+DEF_HELPER_3(rcll, tl, env, tl, tl)
+DEF_HELPER_3(rcrb, tl, env, tl, tl)
+DEF_HELPER_3(rcrw, tl, env, tl, tl)
+DEF_HELPER_3(rcrl, tl, env, tl, tl)
+#ifdef TARGET_X86_64
+DEF_HELPER_3(rclq, tl, env, tl, tl)
+DEF_HELPER_3(rcrq, tl, env, tl, tl)
+#endif
diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c
new file mode 100644
index 0000000000..39a230f119
--- /dev/null
+++ b/target/i386/hyperv.c
@@ -0,0 +1,140 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "hyperv.h"
+#include "standard-headers/asm-x86/hyperv.h"
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
+{
+ CPUX86State *env = &cpu->env;
+
+ switch (exit->type) {
+ case KVM_EXIT_HYPERV_SYNIC:
+ if (!cpu->hyperv_synic) {
+ return -1;
+ }
+
+ /*
+ * For now just track changes in SynIC control and msg/evt pages msr's.
+ * When SynIC messaging/events processing will be added in future
+ * here we will do messages queues flushing and pages remapping.
+ */
+ switch (exit->u.synic.msr) {
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = exit->u.synic.control;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = exit->u.synic.msg_page;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = exit->u.synic.evt_page;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+ case KVM_EXIT_HYPERV_HCALL: {
+ uint16_t code;
+
+ code = exit->u.hcall.input & 0xffff;
+ switch (code) {
+ case HVCALL_POST_MESSAGE:
+ case HVCALL_SIGNAL_EVENT:
+ default:
+ exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE;
+ return 0;
+ }
+ }
+ default:
+ return -1;
+ }
+}
+
+static void kvm_hv_sint_ack_handler(EventNotifier *notifier)
+{
+ HvSintRoute *sint_route = container_of(notifier, HvSintRoute,
+ sint_ack_notifier);
+ event_notifier_test_and_clear(notifier);
+ if (sint_route->sint_ack_clb) {
+ sint_route->sint_ack_clb(sint_route);
+ }
+}
+
+HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
+ HvSintAckClb sint_ack_clb)
+{
+ HvSintRoute *sint_route;
+ int r, gsi;
+
+ sint_route = g_malloc0(sizeof(*sint_route));
+ r = event_notifier_init(&sint_route->sint_set_notifier, false);
+ if (r) {
+ goto err;
+ }
+
+ r = event_notifier_init(&sint_route->sint_ack_notifier, false);
+ if (r) {
+ goto err_sint_set_notifier;
+ }
+
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, false,
+ kvm_hv_sint_ack_handler);
+
+ gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vcpu_id, sint);
+ if (gsi < 0) {
+ goto err_gsi;
+ }
+
+ r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state,
+ &sint_route->sint_set_notifier,
+ &sint_route->sint_ack_notifier, gsi);
+ if (r) {
+ goto err_irqfd;
+ }
+ sint_route->gsi = gsi;
+ sint_route->sint_ack_clb = sint_ack_clb;
+ sint_route->vcpu_id = vcpu_id;
+ sint_route->sint = sint;
+
+ return sint_route;
+
+err_irqfd:
+ kvm_irqchip_release_virq(kvm_state, gsi);
+err_gsi:
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+ event_notifier_cleanup(&sint_route->sint_ack_notifier);
+err_sint_set_notifier:
+ event_notifier_cleanup(&sint_route->sint_set_notifier);
+err:
+ g_free(sint_route);
+
+ return NULL;
+}
+
+void kvm_hv_sint_route_destroy(HvSintRoute *sint_route)
+{
+ kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state,
+ &sint_route->sint_set_notifier,
+ sint_route->gsi);
+ kvm_irqchip_release_virq(kvm_state, sint_route->gsi);
+ event_notifier_set_handler(&sint_route->sint_ack_notifier, false, NULL);
+ event_notifier_cleanup(&sint_route->sint_ack_notifier);
+ event_notifier_cleanup(&sint_route->sint_set_notifier);
+ g_free(sint_route);
+}
+
+int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route)
+{
+ return event_notifier_set(&sint_route->sint_set_notifier);
+}
diff --git a/target/i386/hyperv.h b/target/i386/hyperv.h
new file mode 100644
index 0000000000..0c3b562018
--- /dev/null
+++ b/target/i386/hyperv.h
@@ -0,0 +1,42 @@
+/*
+ * QEMU KVM Hyper-V support
+ *
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ * Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef TARGET_I386_HYPERV_H
+#define TARGET_I386_HYPERV_H
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "qemu/event_notifier.h"
+
+typedef struct HvSintRoute HvSintRoute;
+typedef void (*HvSintAckClb)(HvSintRoute *sint_route);
+
+struct HvSintRoute {
+ uint32_t sint;
+ uint32_t vcpu_id;
+ int gsi;
+ EventNotifier sint_set_notifier;
+ EventNotifier sint_ack_notifier;
+ HvSintAckClb sint_ack_clb;
+};
+
+int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit);
+
+HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint,
+ HvSintAckClb sint_ack_clb);
+
+void kvm_hv_sint_route_destroy(HvSintRoute *sint_route);
+
+int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route);
+
+#endif
diff --git a/target/i386/int_helper.c b/target/i386/int_helper.c
new file mode 100644
index 0000000000..9e873ac150
--- /dev/null
+++ b/target/i386/int_helper.c
@@ -0,0 +1,483 @@
+/*
+ * x86 integer helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+//#define DEBUG_MULDIV
+
+/* modulo 9 table */
+static const uint8_t rclb_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 0, 1, 2, 3, 4,
+};
+
+/* modulo 17 table */
+static const uint8_t rclw_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+/* division, flags are undefined */
+
+void helper_divb_AL(CPUX86State *env, target_ulong t0)
+{
+ unsigned int num, den, q, r;
+
+ num = (env->regs[R_EAX] & 0xffff);
+ den = (t0 & 0xff);
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q > 0xff) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xff;
+ r = (num % den) & 0xff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
+}
+
+void helper_idivb_AL(CPUX86State *env, target_ulong t0)
+{
+ int num, den, q, r;
+
+ num = (int16_t)env->regs[R_EAX];
+ den = (int8_t)t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q != (int8_t)q) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xff;
+ r = (num % den) & 0xff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | (r << 8) | q;
+}
+
+void helper_divw_AX(CPUX86State *env, target_ulong t0)
+{
+ unsigned int num, den, q, r;
+
+ num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
+ den = (t0 & 0xffff);
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q > 0xffff) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xffff;
+ r = (num % den) & 0xffff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
+ env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
+}
+
+void helper_idivw_AX(CPUX86State *env, target_ulong t0)
+{
+ int num, den, q, r;
+
+ num = (env->regs[R_EAX] & 0xffff) | ((env->regs[R_EDX] & 0xffff) << 16);
+ den = (int16_t)t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ if (q != (int16_t)q) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q &= 0xffff;
+ r = (num % den) & 0xffff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | q;
+ env->regs[R_EDX] = (env->regs[R_EDX] & ~0xffff) | r;
+}
+
+void helper_divl_EAX(CPUX86State *env, target_ulong t0)
+{
+ unsigned int den, r;
+ uint64_t num, q;
+
+ num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+ den = t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ r = (num % den);
+ if (q > 0xffffffff) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = (uint32_t)q;
+ env->regs[R_EDX] = (uint32_t)r;
+}
+
+void helper_idivl_EAX(CPUX86State *env, target_ulong t0)
+{
+ int den, r;
+ int64_t num, q;
+
+ num = ((uint32_t)env->regs[R_EAX]) | ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+ den = t0;
+ if (den == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ q = (num / den);
+ r = (num % den);
+ if (q != (int32_t)q) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = (uint32_t)q;
+ env->regs[R_EDX] = (uint32_t)r;
+}
+
+/* bcd */
+
+/* XXX: exception */
+void helper_aam(CPUX86State *env, int base)
+{
+ int al, ah;
+
+ al = env->regs[R_EAX] & 0xff;
+ ah = al / base;
+ al = al % base;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+ CC_DST = al;
+}
+
+void helper_aad(CPUX86State *env, int base)
+{
+ int al, ah;
+
+ al = env->regs[R_EAX] & 0xff;
+ ah = (env->regs[R_EAX] >> 8) & 0xff;
+ al = ((ah * base) + al) & 0xff;
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al;
+ CC_DST = al;
+}
+
+void helper_aaa(CPUX86State *env)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ af = eflags & CC_A;
+ al = env->regs[R_EAX] & 0xff;
+ ah = (env->regs[R_EAX] >> 8) & 0xff;
+
+ icarry = (al > 0xf9);
+ if (((al & 0x0f) > 9) || af) {
+ al = (al + 6) & 0x0f;
+ ah = (ah + 1 + icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void helper_aas(CPUX86State *env)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ af = eflags & CC_A;
+ al = env->regs[R_EAX] & 0xff;
+ ah = (env->regs[R_EAX] >> 8) & 0xff;
+
+ icarry = (al < 6);
+ if (((al & 0x0f) > 9) || af) {
+ al = (al - 6) & 0x0f;
+ ah = (ah - 1 - icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void helper_daa(CPUX86State *env)
+{
+ int old_al, al, af, cf;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ old_al = al = env->regs[R_EAX] & 0xff;
+
+ eflags = 0;
+ if (((al & 0x0f) > 9) || af) {
+ al = (al + 6) & 0xff;
+ eflags |= CC_A;
+ }
+ if ((old_al > 0x99) || cf) {
+ al = (al + 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+void helper_das(CPUX86State *env)
+{
+ int al, al1, af, cf;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ al = env->regs[R_EAX] & 0xff;
+
+ eflags = 0;
+ al1 = al;
+ if (((al & 0x0f) > 9) || af) {
+ eflags |= CC_A;
+ if (al < 6 || cf) {
+ eflags |= CC_C;
+ }
+ al = (al - 6) & 0xff;
+ }
+ if ((al1 > 0x99) || cf) {
+ al = (al - 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ env->regs[R_EAX] = (env->regs[R_EAX] & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+#ifdef TARGET_X86_64
+static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
+{
+ *plow += a;
+ /* carry test */
+ if (*plow < a) {
+ (*phigh)++;
+ }
+ *phigh += b;
+}
+
+static void neg128(uint64_t *plow, uint64_t *phigh)
+{
+ *plow = ~*plow;
+ *phigh = ~*phigh;
+ add128(plow, phigh, 1, 0);
+}
+
+/* return TRUE if overflow */
+static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
+{
+ uint64_t q, r, a1, a0;
+ int i, qb, ab;
+
+ a0 = *plow;
+ a1 = *phigh;
+ if (a1 == 0) {
+ q = a0 / b;
+ r = a0 % b;
+ *plow = q;
+ *phigh = r;
+ } else {
+ if (a1 >= b) {
+ return 1;
+ }
+ /* XXX: use a better algorithm */
+ for (i = 0; i < 64; i++) {
+ ab = a1 >> 63;
+ a1 = (a1 << 1) | (a0 >> 63);
+ if (ab || a1 >= b) {
+ a1 -= b;
+ qb = 1;
+ } else {
+ qb = 0;
+ }
+ a0 = (a0 << 1) | qb;
+ }
+#if defined(DEBUG_MULDIV)
+ printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
+ ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
+ *phigh, *plow, b, a0, a1);
+#endif
+ *plow = a0;
+ *phigh = a1;
+ }
+ return 0;
+}
+
+/* return TRUE if overflow */
+static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
+{
+ int sa, sb;
+
+ sa = ((int64_t)*phigh < 0);
+ if (sa) {
+ neg128(plow, phigh);
+ }
+ sb = (b < 0);
+ if (sb) {
+ b = -b;
+ }
+ if (div64(plow, phigh, b) != 0) {
+ return 1;
+ }
+ if (sa ^ sb) {
+ if (*plow > (1ULL << 63)) {
+ return 1;
+ }
+ *plow = -*plow;
+ } else {
+ if (*plow >= (1ULL << 63)) {
+ return 1;
+ }
+ }
+ if (sa) {
+ *phigh = -*phigh;
+ }
+ return 0;
+}
+
+void helper_divq_EAX(CPUX86State *env, target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ if (t0 == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ r0 = env->regs[R_EAX];
+ r1 = env->regs[R_EDX];
+ if (div64(&r0, &r1, t0)) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = r0;
+ env->regs[R_EDX] = r1;
+}
+
+void helper_idivq_EAX(CPUX86State *env, target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ if (t0 == 0) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ r0 = env->regs[R_EAX];
+ r1 = env->regs[R_EDX];
+ if (idiv64(&r0, &r1, t0)) {
+ raise_exception_ra(env, EXCP00_DIVZ, GETPC());
+ }
+ env->regs[R_EAX] = r0;
+ env->regs[R_EDX] = r1;
+}
+#endif
+
+#if TARGET_LONG_BITS == 32
+# define ctztl ctz32
+# define clztl clz32
+#else
+# define ctztl ctz64
+# define clztl clz64
+#endif
+
+/* bit operations */
+target_ulong helper_ctz(target_ulong t0)
+{
+ return ctztl(t0);
+}
+
+target_ulong helper_clz(target_ulong t0)
+{
+ return clztl(t0);
+}
+
+target_ulong helper_pdep(target_ulong src, target_ulong mask)
+{
+ target_ulong dest = 0;
+ int i, o;
+
+ for (i = 0; mask != 0; i++) {
+ o = ctztl(mask);
+ mask &= mask - 1;
+ dest |= ((src >> i) & 1) << o;
+ }
+ return dest;
+}
+
+target_ulong helper_pext(target_ulong src, target_ulong mask)
+{
+ target_ulong dest = 0;
+ int i, o;
+
+ for (o = 0; mask != 0; o++) {
+ i = ctztl(mask);
+ mask &= mask - 1;
+ dest |= ((src >> i) & 1) << o;
+ }
+ return dest;
+}
+
+#define SHIFT 0
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+#define SHIFT 3
+#include "shift_helper_template.h"
+#undef SHIFT
+#endif
+
+/* Test that BIT is enabled in CR4. If not, raise an illegal opcode
+ exception. This reduces the requirements for rare CR4 bits being
+ mapped into HFLAGS. */
+void helper_cr4_testbit(CPUX86State *env, uint32_t bit)
+{
+ if (unlikely((env->cr[4] & bit) == 0)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+}
diff --git a/target/i386/kvm-stub.c b/target/i386/kvm-stub.c
new file mode 100644
index 0000000000..bda4dc2f0c
--- /dev/null
+++ b/target/i386/kvm-stub.c
@@ -0,0 +1,42 @@
+/*
+ * QEMU KVM x86 specific function stubs
+ *
+ * Copyright Linaro Limited 2012
+ *
+ * Author: Peter Maydell <peter.maydell@linaro.org>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "kvm_i386.h"
+
+bool kvm_allows_irq0_override(void)
+{
+ return 1;
+}
+
+#ifndef __OPTIMIZE__
+bool kvm_has_smm(void)
+{
+ return 1;
+}
+
+bool kvm_enable_x2apic(void)
+{
+ return false;
+}
+
+/* This function is only called inside conditionals which we
+ * rely on the compiler to optimize out when CONFIG_KVM is not
+ * defined.
+ */
+uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
+ uint32_t index, int reg)
+{
+ abort();
+}
+#endif
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
new file mode 100644
index 0000000000..f62264a7a8
--- /dev/null
+++ b/target/i386/kvm.c
@@ -0,0 +1,3536 @@
+/*
+ * QEMU KVM support
+ *
+ * Copyright (C) 2006-2008 Qumranet Technologies
+ * Copyright IBM, Corp. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm_int.h"
+#include "kvm_i386.h"
+#include "hyperv.h"
+
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#include "qemu/config-file.h"
+#include "qemu/error-report.h"
+#include "hw/i386/pc.h"
+#include "hw/i386/apic.h"
+#include "hw/i386/apic_internal.h"
+#include "hw/i386/apic-msidef.h"
+#include "hw/i386/intel_iommu.h"
+#include "hw/i386/x86-iommu.h"
+
+#include "exec/ioport.h"
+#include "standard-headers/asm-x86/hyperv.h"
+#include "hw/pci/pci.h"
+#include "hw/pci/msi.h"
+#include "migration/migration.h"
+#include "exec/memattrs.h"
+#include "trace.h"
+
+//#define DEBUG_KVM
+
+#ifdef DEBUG_KVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+/* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus
+ * 255 kvm_msr_entry structs */
+#define MSR_BUF_SIZE 4096
+
+#ifndef BUS_MCEERR_AR
+#define BUS_MCEERR_AR 4
+#endif
+#ifndef BUS_MCEERR_AO
+#define BUS_MCEERR_AO 5
+#endif
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_INFO(SET_TSS_ADDR),
+ KVM_CAP_INFO(EXT_CPUID),
+ KVM_CAP_INFO(MP_STATE),
+ KVM_CAP_LAST_INFO
+};
+
+static bool has_msr_star;
+static bool has_msr_hsave_pa;
+static bool has_msr_tsc_aux;
+static bool has_msr_tsc_adjust;
+static bool has_msr_tsc_deadline;
+static bool has_msr_feature_control;
+static bool has_msr_misc_enable;
+static bool has_msr_smbase;
+static bool has_msr_bndcfgs;
+static int lm_capable_kernel;
+static bool has_msr_hv_hypercall;
+static bool has_msr_hv_crash;
+static bool has_msr_hv_reset;
+static bool has_msr_hv_vpindex;
+static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
+static bool has_msr_hv_stimer;
+static bool has_msr_xss;
+
+static bool has_msr_architectural_pmu;
+static uint32_t num_architectural_pmu_counters;
+
+static int has_xsave;
+static int has_xcrs;
+static int has_pit_state2;
+
+static bool has_msr_mcg_ext_ctl;
+
+static struct kvm_cpuid2 *cpuid_cache;
+
+int kvm_has_pit_state2(void)
+{
+ return has_pit_state2;
+}
+
+bool kvm_has_smm(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM);
+}
+
+bool kvm_allows_irq0_override(void)
+{
+ return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
+}
+
+static bool kvm_x2apic_api_set_flags(uint64_t flags)
+{
+ KVMState *s = KVM_STATE(current_machine->accelerator);
+
+ return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags);
+}
+
+#define MEMORIZE(fn, _result) \
+ ({ \
+ static bool _memorized; \
+ \
+ if (_memorized) { \
+ return _result; \
+ } \
+ _memorized = true; \
+ _result = fn; \
+ })
+
+static bool has_x2apic_api;
+
+bool kvm_has_x2apic_api(void)
+{
+ return has_x2apic_api;
+}
+
+bool kvm_enable_x2apic(void)
+{
+ return MEMORIZE(
+ kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS |
+ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK),
+ has_x2apic_api);
+}
+
+static int kvm_get_tsc(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data;
+ int ret;
+
+ if (env->tsc_valid) {
+ return 0;
+ }
+
+ msr_data.info.nmsrs = 1;
+ msr_data.entries[0].index = MSR_IA32_TSC;
+ env->tsc_valid = !runstate_is_running();
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ env->tsc = msr_data.entries[0].data;
+ return 0;
+}
+
+static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg)
+{
+ kvm_get_tsc(cpu);
+}
+
+void kvm_synchronize_all_tsc(void)
+{
+ CPUState *cpu;
+
+ if (kvm_enabled()) {
+ CPU_FOREACH(cpu) {
+ run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL);
+ }
+ }
+}
+
+static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r, size;
+
+ size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+ cpuid = g_malloc0(size);
+ cpuid->nent = max;
+ r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid);
+ if (r == 0 && cpuid->nent >= max) {
+ r = -E2BIG;
+ }
+ if (r < 0) {
+ if (r == -E2BIG) {
+ g_free(cpuid);
+ return NULL;
+ } else {
+ fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n",
+ strerror(-r));
+ exit(1);
+ }
+ }
+ return cpuid;
+}
+
+/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s)
+{
+ struct kvm_cpuid2 *cpuid;
+ int max = 1;
+
+ if (cpuid_cache != NULL) {
+ return cpuid_cache;
+ }
+ while ((cpuid = try_get_cpuid(s, max)) == NULL) {
+ max *= 2;
+ }
+ cpuid_cache = cpuid;
+ return cpuid;
+}
+
+static const struct kvm_para_features {
+ int cap;
+ int feature;
+} para_features[] = {
+ { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE },
+ { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY },
+ { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP },
+ { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF },
+};
+
+static int get_para_features(KVMState *s)
+{
+ int i, features = 0;
+
+ for (i = 0; i < ARRAY_SIZE(para_features); i++) {
+ if (kvm_check_extension(s, para_features[i].cap)) {
+ features |= (1 << para_features[i].feature);
+ }
+ }
+
+ return features;
+}
+
+
+/* Returns the value for a specific register on the cpuid entry
+ */
+static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg)
+{
+ uint32_t ret = 0;
+ switch (reg) {
+ case R_EAX:
+ ret = entry->eax;
+ break;
+ case R_EBX:
+ ret = entry->ebx;
+ break;
+ case R_ECX:
+ ret = entry->ecx;
+ break;
+ case R_EDX:
+ ret = entry->edx;
+ break;
+ }
+ return ret;
+}
+
+/* Find matching entry for function/index on kvm_cpuid2 struct
+ */
+static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid,
+ uint32_t function,
+ uint32_t index)
+{
+ int i;
+ for (i = 0; i < cpuid->nent; ++i) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ return &cpuid->entries[i];
+ }
+ }
+ /* not found: */
+ return NULL;
+}
+
+uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function,
+ uint32_t index, int reg)
+{
+ struct kvm_cpuid2 *cpuid;
+ uint32_t ret = 0;
+ uint32_t cpuid_1_edx;
+ bool found = false;
+
+ cpuid = get_supported_cpuid(s);
+
+ struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index);
+ if (entry) {
+ found = true;
+ ret = cpuid_entry_get_reg(entry, reg);
+ }
+
+ /* Fixups for the data returned by KVM, below */
+
+ if (function == 1 && reg == R_EDX) {
+ /* KVM before 2.6.30 misreports the following features */
+ ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA;
+ } else if (function == 1 && reg == R_ECX) {
+ /* We can set the hypervisor flag, even if KVM does not return it on
+ * GET_SUPPORTED_CPUID
+ */
+ ret |= CPUID_EXT_HYPERVISOR;
+ /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it
+ * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER,
+ * and the irqchip is in the kernel.
+ */
+ if (kvm_irqchip_in_kernel() &&
+ kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
+ ret |= CPUID_EXT_TSC_DEADLINE_TIMER;
+ }
+
+ /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled
+ * without the in-kernel irqchip
+ */
+ if (!kvm_irqchip_in_kernel()) {
+ ret &= ~CPUID_EXT_X2APIC;
+ }
+ } else if (function == 6 && reg == R_EAX) {
+ ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */
+ } else if (function == 0x80000001 && reg == R_EDX) {
+ /* On Intel, kvm returns cpuid according to the Intel spec,
+ * so add missing bits according to the AMD spec:
+ */
+ cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
+ ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES;
+ } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) {
+ /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't
+ * be enabled without the in-kernel irqchip
+ */
+ if (!kvm_irqchip_in_kernel()) {
+ ret &= ~(1U << KVM_FEATURE_PV_UNHALT);
+ }
+ }
+
+ /* fallback for older kernels */
+ if ((function == KVM_CPUID_FEATURES) && !found) {
+ ret = get_para_features(s);
+ }
+
+ return ret;
+}
+
+typedef struct HWPoisonPage {
+ ram_addr_t ram_addr;
+ QLIST_ENTRY(HWPoisonPage) list;
+} HWPoisonPage;
+
+static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
+ QLIST_HEAD_INITIALIZER(hwpoison_page_list);
+
+static void kvm_unpoison_all(void *param)
+{
+ HWPoisonPage *page, *next_page;
+
+ QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
+ QLIST_REMOVE(page, list);
+ qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
+ g_free(page);
+ }
+}
+
+static void kvm_hwpoison_page_add(ram_addr_t ram_addr)
+{
+ HWPoisonPage *page;
+
+ QLIST_FOREACH(page, &hwpoison_page_list, list) {
+ if (page->ram_addr == ram_addr) {
+ return;
+ }
+ }
+ page = g_new(HWPoisonPage, 1);
+ page->ram_addr = ram_addr;
+ QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
+}
+
+static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap,
+ int *max_banks)
+{
+ int r;
+
+ r = kvm_check_extension(s, KVM_CAP_MCE);
+ if (r > 0) {
+ *max_banks = r;
+ return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap);
+ }
+ return -ENOSYS;
+}
+
+static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN |
+ MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S;
+ uint64_t mcg_status = MCG_STATUS_MCIP;
+ int flags = 0;
+
+ if (code == BUS_MCEERR_AR) {
+ status |= MCI_STATUS_AR | 0x134;
+ mcg_status |= MCG_STATUS_EIPV;
+ } else {
+ status |= 0xc0;
+ mcg_status |= MCG_STATUS_RIPV;
+ }
+
+ flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0;
+ /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the
+ * guest kernel back into env->mcg_ext_ctl.
+ */
+ cpu_synchronize_state(cs);
+ if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) {
+ mcg_status |= MCG_STATUS_LMCE;
+ flags = 0;
+ }
+
+ cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr,
+ (MCM_ADDR_PHYS << 6) | 0xc, flags);
+}
+
+static void hardware_memory_error(void)
+{
+ fprintf(stderr, "Hardware memory error!\n");
+ exit(1);
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
+{
+ X86CPU *cpu = X86_CPU(c);
+ CPUX86State *env = &cpu->env;
+ ram_addr_t ram_addr;
+ hwaddr paddr;
+
+ if ((env->mcg_cap & MCG_SER_P) && addr
+ && (code == BUS_MCEERR_AR || code == BUS_MCEERR_AO)) {
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr == RAM_ADDR_INVALID ||
+ !kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+ fprintf(stderr, "Hardware memory error for memory used by "
+ "QEMU itself instead of guest system!\n");
+ /* Hope we are lucky for AO MCE */
+ if (code == BUS_MCEERR_AO) {
+ return 0;
+ } else {
+ hardware_memory_error();
+ }
+ }
+ kvm_hwpoison_page_add(ram_addr);
+ kvm_mce_inject(cpu, paddr, code);
+ } else {
+ if (code == BUS_MCEERR_AO) {
+ return 0;
+ } else if (code == BUS_MCEERR_AR) {
+ hardware_memory_error();
+ } else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+ X86CPU *cpu = X86_CPU(first_cpu);
+
+ if ((cpu->env.mcg_cap & MCG_SER_P) && addr && code == BUS_MCEERR_AO) {
+ ram_addr_t ram_addr;
+ hwaddr paddr;
+
+ /* Hope we are lucky for AO MCE */
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr == RAM_ADDR_INVALID ||
+ !kvm_physical_memory_addr_from_host(first_cpu->kvm_state,
+ addr, &paddr)) {
+ fprintf(stderr, "Hardware memory error for memory used by "
+ "QEMU itself instead of guest system!: %p\n", addr);
+ return 0;
+ }
+ kvm_hwpoison_page_add(ram_addr);
+ kvm_mce_inject(X86_CPU(first_cpu), paddr, code);
+ } else {
+ if (code == BUS_MCEERR_AO) {
+ return 0;
+ } else if (code == BUS_MCEERR_AR) {
+ hardware_memory_error();
+ } else {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int kvm_inject_mce_oldstyle(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
+ unsigned int bank, bank_num = env->mcg_cap & 0xff;
+ struct kvm_x86_mce mce;
+
+ env->exception_injected = -1;
+
+ /*
+ * There must be at least one bank in use if an MCE is pending.
+ * Find it and use its values for the event injection.
+ */
+ for (bank = 0; bank < bank_num; bank++) {
+ if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) {
+ break;
+ }
+ }
+ assert(bank < bank_num);
+
+ mce.bank = bank;
+ mce.status = env->mce_banks[bank * 4 + 1];
+ mce.mcg_status = env->mcg_status;
+ mce.addr = env->mce_banks[bank * 4 + 2];
+ mce.misc = env->mce_banks[bank * 4 + 3];
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce);
+ }
+ return 0;
+}
+
+static void cpu_update_state(void *opaque, int running, RunState state)
+{
+ CPUX86State *env = opaque;
+
+ if (running) {
+ env->tsc_valid = false;
+ }
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ return cpu->apic_id;
+}
+
+#ifndef KVM_CPUID_SIGNATURE_NEXT
+#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
+#endif
+
+static bool hyperv_hypercall_available(X86CPU *cpu)
+{
+ return cpu->hyperv_vapic ||
+ (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
+}
+
+static bool hyperv_enabled(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
+ (hyperv_hypercall_available(cpu) ||
+ cpu->hyperv_time ||
+ cpu->hyperv_relaxed_timing ||
+ cpu->hyperv_crash ||
+ cpu->hyperv_reset ||
+ cpu->hyperv_vpindex ||
+ cpu->hyperv_runtime ||
+ cpu->hyperv_synic ||
+ cpu->hyperv_stimer);
+}
+
+static int kvm_arch_set_tsc_khz(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ int r;
+
+ if (!env->tsc_khz) {
+ return 0;
+ }
+
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ?
+ kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) :
+ -ENOTSUP;
+ if (r < 0) {
+ /* When KVM_SET_TSC_KHZ fails, it's an error only if the current
+ * TSC frequency doesn't match the one we want.
+ */
+ int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (cur_freq <= 0 || cur_freq != env->tsc_khz) {
+ error_report("warning: TSC frequency mismatch between "
+ "VM (%" PRId64 " kHz) and host (%d kHz), "
+ "and TSC scaling unavailable",
+ env->tsc_khz, cur_freq);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static int hyperv_handle_properties(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cpu->hyperv_time &&
+ kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
+ cpu->hyperv_time = false;
+ }
+
+ if (cpu->hyperv_relaxed_timing) {
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ }
+ if (cpu->hyperv_vapic) {
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+ }
+ if (cpu->hyperv_time) {
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= 0x200;
+ }
+ if (cpu->hyperv_crash && has_msr_hv_crash) {
+ env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
+ }
+ env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+ if (cpu->hyperv_reset && has_msr_hv_reset) {
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
+ }
+ if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+ }
+ if (cpu->hyperv_runtime && has_msr_hv_runtime) {
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ }
+ if (cpu->hyperv_synic) {
+ int sint;
+
+ if (!has_msr_hv_synic ||
+ kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
+ fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
+ return -ENOSYS;
+ }
+
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
+ env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+ for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
+ env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
+ }
+ }
+ if (cpu->hyperv_stimer) {
+ if (!has_msr_hv_stimer) {
+ fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
+ return -ENOSYS;
+ }
+ env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
+ }
+ return 0;
+}
+
+static Error *invtsc_mig_blocker;
+
+#define KVM_MAX_CPUID_ENTRIES 100
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ struct {
+ struct kvm_cpuid2 cpuid;
+ struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES];
+ } QEMU_PACKED cpuid_data;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t limit, i, j, cpuid_i;
+ uint32_t unused;
+ struct kvm_cpuid_entry2 *c;
+ uint32_t signature[3];
+ int kvm_base = KVM_CPUID_SIGNATURE;
+ int r;
+
+ memset(&cpuid_data, 0, sizeof(cpuid_data));
+
+ cpuid_i = 0;
+
+ /* Paravirtualization CPUIDs */
+ if (hyperv_enabled(cpu)) {
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+ if (!cpu->hyperv_vendor_id) {
+ memcpy(signature, "Microsoft Hv", 12);
+ } else {
+ size_t len = strlen(cpu->hyperv_vendor_id);
+
+ if (len > 12) {
+ error_report("hv-vendor-id truncated to 12 characters");
+ len = 12;
+ }
+ memset(signature, 0, 12);
+ memcpy(signature, cpu->hyperv_vendor_id, len);
+ }
+ c->eax = HYPERV_CPUID_MIN;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = HYPERV_CPUID_INTERFACE;
+ memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
+ c->eax = signature[0];
+ c->ebx = 0;
+ c->ecx = 0;
+ c->edx = 0;
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = HYPERV_CPUID_VERSION;
+ c->eax = 0x00001bbc;
+ c->ebx = 0x00060001;
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = HYPERV_CPUID_FEATURES;
+ r = hyperv_handle_properties(cs);
+ if (r) {
+ return r;
+ }
+ c->eax = env->features[FEAT_HYPERV_EAX];
+ c->ebx = env->features[FEAT_HYPERV_EBX];
+ c->edx = env->features[FEAT_HYPERV_EDX];
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
+ if (cpu->hyperv_relaxed_timing) {
+ c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+ }
+ if (cpu->hyperv_vapic) {
+ c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ }
+ c->ebx = cpu->hyperv_spinlock_attempts;
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
+ c->eax = 0x40;
+ c->ebx = 0x40;
+
+ kvm_base = KVM_CPUID_SIGNATURE_NEXT;
+ has_msr_hv_hypercall = true;
+ }
+
+ if (cpu->expose_kvm) {
+ memcpy(signature, "KVMKVMKVM\0\0\0", 12);
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_SIGNATURE | kvm_base;
+ c->eax = KVM_CPUID_FEATURES | kvm_base;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
+
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = KVM_CPUID_FEATURES | kvm_base;
+ c->eax = env->features[FEAT_KVM];
+ }
+
+ cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0; i <= limit; i++) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "unsupported level value: 0x%x\n", limit);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+
+ switch (i) {
+ case 2: {
+ /* Keep reading function 2 till all the input is received */
+ int times;
+
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
+ KVM_CPUID_FLAG_STATE_READ_NEXT;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ times = c->eax & 0xff;
+
+ for (j = 1; j < times; ++j) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:2):eax & 0xf = 0x%x\n", times);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ break;
+ }
+ case 4:
+ case 0xb:
+ case 0xd:
+ for (j = 0; ; j++) {
+ if (i == 0xd && j == 64) {
+ break;
+ }
+ c->function = i;
+ c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ c->index = j;
+ cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
+
+ if (i == 4 && c->eax == 0) {
+ break;
+ }
+ if (i == 0xb && !(c->ecx & 0xff00)) {
+ break;
+ }
+ if (i == 0xd && c->eax == 0) {
+ continue;
+ }
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "cpuid_data is full, no space for "
+ "cpuid(eax:0x%x,ecx:0x%x)\n", i, j);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+ }
+ break;
+ default:
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ break;
+ }
+ }
+
+ if (limit >= 0x0a) {
+ uint32_t ver;
+
+ cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused);
+ if ((ver & 0xff) > 0) {
+ has_msr_architectural_pmu = true;
+ num_architectural_pmu_counters = (ver & 0xff00) >> 8;
+
+ /* Shouldn't be more than 32, since that's the number of bits
+ * available in EBX to tell us _which_ counters are available.
+ * Play it safe.
+ */
+ if (num_architectural_pmu_counters > MAX_GP_COUNTERS) {
+ num_architectural_pmu_counters = MAX_GP_COUNTERS;
+ }
+ }
+ }
+
+ cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0x80000000; i <= limit; i++) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "unsupported xlevel value: 0x%x\n", limit);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+
+ /* Call Centaur's CPUID instructions they are supported. */
+ if (env->cpuid_xlevel2 > 0) {
+ cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
+
+ for (i = 0xC0000000; i <= limit; i++) {
+ if (cpuid_i == KVM_MAX_CPUID_ENTRIES) {
+ fprintf(stderr, "unsupported xlevel2 value: 0x%x\n", limit);
+ abort();
+ }
+ c = &cpuid_data.entries[cpuid_i++];
+
+ c->function = i;
+ c->flags = 0;
+ cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
+ }
+ }
+
+ cpuid_data.cpuid.nent = cpuid_i;
+
+ if (((env->cpuid_version >> 8)&0xF) >= 6
+ && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
+ (CPUID_MCE | CPUID_MCA)
+ && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) {
+ uint64_t mcg_cap, unsupported_caps;
+ int banks;
+ int ret;
+
+ ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks);
+ if (ret < 0) {
+ fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
+ return ret;
+ }
+
+ if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) {
+ error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)",
+ (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks);
+ return -ENOTSUP;
+ }
+
+ unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK);
+ if (unsupported_caps) {
+ if (unsupported_caps & MCG_LMCE_P) {
+ error_report("kvm: LMCE not supported");
+ return -ENOTSUP;
+ }
+ error_report("warning: Unsupported MCG_CAP bits: 0x%" PRIx64,
+ unsupported_caps);
+ }
+
+ env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK;
+ ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap);
+ if (ret < 0) {
+ fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
+ return ret;
+ }
+ }
+
+ qemu_add_vm_change_state_handler(cpu_update_state, env);
+
+ c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0);
+ if (c) {
+ has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) ||
+ !!(c->ecx & CPUID_EXT_SMX);
+ }
+
+ if (env->mcg_cap & MCG_LMCE_P) {
+ has_msr_mcg_ext_ctl = has_msr_feature_control = true;
+ }
+
+ c = cpuid_find_entry(&cpuid_data.cpuid, 0x80000007, 0);
+ if (c && (c->edx & 1<<8) && invtsc_mig_blocker == NULL) {
+ /* for migration */
+ error_setg(&invtsc_mig_blocker,
+ "State blocked by non-migratable CPU device"
+ " (invtsc flag)");
+ migrate_add_blocker(invtsc_mig_blocker);
+ /* for savevm */
+ vmstate_x86_cpu.unmigratable = 1;
+ }
+
+ cpuid_data.cpuid.padding = 0;
+ r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data);
+ if (r) {
+ return r;
+ }
+
+ r = kvm_arch_set_tsc_khz(cs);
+ if (r < 0) {
+ return r;
+ }
+
+ /* vcpu's TSC frequency is either specified by user, or following
+ * the value used by KVM if the former is not present. In the
+ * latter case, we query it from KVM and record in env->tsc_khz,
+ * so that vcpu's TSC frequency can be migrated later via this field.
+ */
+ if (!env->tsc_khz) {
+ r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ?
+ kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) :
+ -ENOTSUP;
+ if (r > 0) {
+ env->tsc_khz = r;
+ }
+ }
+
+ if (has_xsave) {
+ env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
+ }
+ cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
+
+ if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
+ has_msr_tsc_aux = false;
+ }
+
+ return 0;
+}
+
+void kvm_arch_reset_vcpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ env->exception_injected = -1;
+ env->interrupt_injected = -1;
+ env->xcr0 = 1;
+ if (kvm_irqchip_in_kernel()) {
+ env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
+ KVM_MP_STATE_UNINITIALIZED;
+ } else {
+ env->mp_state = KVM_MP_STATE_RUNNABLE;
+ }
+}
+
+void kvm_arch_do_init_vcpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+
+ /* APs get directly into wait-for-SIPI state. */
+ if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) {
+ env->mp_state = KVM_MP_STATE_INIT_RECEIVED;
+ }
+}
+
+static int kvm_get_supported_msrs(KVMState *s)
+{
+ static int kvm_supported_msrs;
+ int ret = 0;
+
+ /* first time */
+ if (kvm_supported_msrs == 0) {
+ struct kvm_msr_list msr_list, *kvm_msr_list;
+
+ kvm_supported_msrs = -1;
+
+ /* Obtain MSR list from KVM. These are the MSRs that we must
+ * save/restore */
+ msr_list.nmsrs = 0;
+ ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list);
+ if (ret < 0 && ret != -E2BIG) {
+ return ret;
+ }
+ /* Old kernel modules had a bug and could write beyond the provided
+ memory. Allocate at least a safe amount of 1K. */
+ kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) +
+ msr_list.nmsrs *
+ sizeof(msr_list.indices[0])));
+
+ kvm_msr_list->nmsrs = msr_list.nmsrs;
+ ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list);
+ if (ret >= 0) {
+ int i;
+
+ for (i = 0; i < kvm_msr_list->nmsrs; i++) {
+ if (kvm_msr_list->indices[i] == MSR_STAR) {
+ has_msr_star = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_VM_HSAVE_PA) {
+ has_msr_hsave_pa = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_TSC_AUX) {
+ has_msr_tsc_aux = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_TSC_ADJUST) {
+ has_msr_tsc_adjust = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
+ has_msr_tsc_deadline = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_IA32_SMBASE) {
+ has_msr_smbase = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_IA32_MISC_ENABLE) {
+ has_msr_misc_enable = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_IA32_BNDCFGS) {
+ has_msr_bndcfgs = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == MSR_IA32_XSS) {
+ has_msr_xss = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_CRASH_CTL) {
+ has_msr_hv_crash = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_RESET) {
+ has_msr_hv_reset = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_INDEX) {
+ has_msr_hv_vpindex = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_VP_RUNTIME) {
+ has_msr_hv_runtime = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+ has_msr_hv_synic = true;
+ continue;
+ }
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) {
+ has_msr_hv_stimer = true;
+ continue;
+ }
+ }
+ }
+
+ g_free(kvm_msr_list);
+ }
+
+ return ret;
+}
+
+static Notifier smram_machine_done;
+static KVMMemoryListener smram_listener;
+static AddressSpace smram_address_space;
+static MemoryRegion smram_as_root;
+static MemoryRegion smram_as_mem;
+
+static void register_smram_listener(Notifier *n, void *unused)
+{
+ MemoryRegion *smram =
+ (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
+
+ /* Outer container... */
+ memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram", ~0ull);
+ memory_region_set_enabled(&smram_as_root, true);
+
+ /* ... with two regions inside: normal system memory with low
+ * priority, and...
+ */
+ memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram",
+ get_system_memory(), 0, ~0ull);
+ memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0);
+ memory_region_set_enabled(&smram_as_mem, true);
+
+ if (smram) {
+ /* ... SMRAM with higher priority */
+ memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10);
+ memory_region_set_enabled(smram, true);
+ }
+
+ address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM");
+ kvm_memory_listener_register(kvm_state, &smram_listener,
+ &smram_address_space, 1);
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ uint64_t identity_base = 0xfffbc000;
+ uint64_t shadow_mem;
+ int ret;
+ struct utsname utsname;
+
+#ifdef KVM_CAP_XSAVE
+ has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
+#endif
+
+#ifdef KVM_CAP_XCRS
+ has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
+#endif
+
+#ifdef KVM_CAP_PIT_STATE2
+ has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
+#endif
+
+ ret = kvm_get_supported_msrs(s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ uname(&utsname);
+ lm_capable_kernel = strcmp(utsname.machine, "x86_64") == 0;
+
+ /*
+ * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly.
+ * In order to use vm86 mode, an EPT identity map and a TSS are needed.
+ * Since these must be part of guest physical memory, we need to allocate
+ * them, both by setting their start addresses in the kernel and by
+ * creating a corresponding e820 entry. We need 4 pages before the BIOS.
+ *
+ * Older KVM versions may not support setting the identity map base. In
+ * that case we need to stick with the default, i.e. a 256K maximum BIOS
+ * size.
+ */
+ if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) {
+ /* Allows up to 16M BIOSes. */
+ identity_base = 0xfeffc000;
+
+ ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ /* Set TSS base one page after EPT identity map. */
+ ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Tell fw_cfg to notify the BIOS to reserve the range. */
+ ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED);
+ if (ret < 0) {
+ fprintf(stderr, "e820_add_entry() table is full\n");
+ return ret;
+ }
+ qemu_register_reset(kvm_unpoison_all, NULL);
+
+ shadow_mem = machine_kvm_shadow_mem(ms);
+ if (shadow_mem != -1) {
+ shadow_mem /= 4096;
+ ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (kvm_check_extension(s, KVM_CAP_X86_SMM)) {
+ smram_machine_done.notify = register_smram_listener;
+ qemu_add_machine_init_done_notifier(&smram_machine_done);
+ }
+ return 0;
+}
+
+static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = 3;
+ lhs->present = 1;
+ lhs->dpl = 3;
+ lhs->db = 0;
+ lhs->s = 1;
+ lhs->l = 0;
+ lhs->g = 0;
+ lhs->avl = 0;
+ lhs->unusable = 0;
+}
+
+static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs)
+{
+ unsigned flags = rhs->flags;
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+ lhs->present = (flags & DESC_P_MASK) != 0;
+ lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3;
+ lhs->db = (flags >> DESC_B_SHIFT) & 1;
+ lhs->s = (flags & DESC_S_MASK) != 0;
+ lhs->l = (flags >> DESC_L_SHIFT) & 1;
+ lhs->g = (flags & DESC_G_MASK) != 0;
+ lhs->avl = (flags & DESC_AVL_MASK) != 0;
+ lhs->unusable = !lhs->present;
+ lhs->padding = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ if (rhs->unusable) {
+ lhs->flags = 0;
+ } else {
+ lhs->flags = (rhs->type << DESC_TYPE_SHIFT) |
+ (rhs->present * DESC_P_MASK) |
+ (rhs->dpl << DESC_DPL_SHIFT) |
+ (rhs->db << DESC_B_SHIFT) |
+ (rhs->s * DESC_S_MASK) |
+ (rhs->l << DESC_L_SHIFT) |
+ (rhs->g * DESC_G_MASK) |
+ (rhs->avl * DESC_AVL_MASK);
+ }
+}
+
+static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set)
+{
+ if (set) {
+ *kvm_reg = *qemu_reg;
+ } else {
+ *qemu_reg = *kvm_reg;
+ }
+}
+
+static int kvm_getput_regs(X86CPU *cpu, int set)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret = 0;
+
+ if (!set) {
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, &regs);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ kvm_getput_reg(&regs.rax, &env->regs[R_EAX], set);
+ kvm_getput_reg(&regs.rbx, &env->regs[R_EBX], set);
+ kvm_getput_reg(&regs.rcx, &env->regs[R_ECX], set);
+ kvm_getput_reg(&regs.rdx, &env->regs[R_EDX], set);
+ kvm_getput_reg(&regs.rsi, &env->regs[R_ESI], set);
+ kvm_getput_reg(&regs.rdi, &env->regs[R_EDI], set);
+ kvm_getput_reg(&regs.rsp, &env->regs[R_ESP], set);
+ kvm_getput_reg(&regs.rbp, &env->regs[R_EBP], set);
+#ifdef TARGET_X86_64
+ kvm_getput_reg(&regs.r8, &env->regs[8], set);
+ kvm_getput_reg(&regs.r9, &env->regs[9], set);
+ kvm_getput_reg(&regs.r10, &env->regs[10], set);
+ kvm_getput_reg(&regs.r11, &env->regs[11], set);
+ kvm_getput_reg(&regs.r12, &env->regs[12], set);
+ kvm_getput_reg(&regs.r13, &env->regs[13], set);
+ kvm_getput_reg(&regs.r14, &env->regs[14], set);
+ kvm_getput_reg(&regs.r15, &env->regs[15], set);
+#endif
+
+ kvm_getput_reg(&regs.rflags, &env->eflags, set);
+ kvm_getput_reg(&regs.rip, &env->eip, set);
+
+ if (set) {
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, &regs);
+ }
+
+ return ret;
+}
+
+static int kvm_put_fpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_fpu fpu;
+ int i;
+
+ memset(&fpu, 0, sizeof fpu);
+ fpu.fsw = env->fpus & ~(7 << 11);
+ fpu.fsw |= (env->fpstt & 7) << 11;
+ fpu.fcw = env->fpuc;
+ fpu.last_opcode = env->fpop;
+ fpu.last_ip = env->fpip;
+ fpu.last_dp = env->fpdp;
+ for (i = 0; i < 8; ++i) {
+ fpu.ftwx |= (!env->fptags[i]) << i;
+ }
+ memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs);
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0));
+ stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1));
+ }
+ fpu.mxcsr = env->mxcsr;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu);
+}
+
+#define XSAVE_FCW_FSW 0
+#define XSAVE_FTW_FOP 1
+#define XSAVE_CWD_RIP 2
+#define XSAVE_CWD_RDP 4
+#define XSAVE_MXCSR 6
+#define XSAVE_ST_SPACE 8
+#define XSAVE_XMM_SPACE 40
+#define XSAVE_XSTATE_BV 128
+#define XSAVE_YMMH_SPACE 144
+#define XSAVE_BNDREGS 240
+#define XSAVE_BNDCSR 256
+#define XSAVE_OPMASK 272
+#define XSAVE_ZMM_Hi256 288
+#define XSAVE_Hi16_ZMM 416
+#define XSAVE_PKRU 672
+
+#define XSAVE_BYTE_OFFSET(word_offset) \
+ ((word_offset) * sizeof(((struct kvm_xsave *)0)->region[0]))
+
+#define ASSERT_OFFSET(word_offset, field) \
+ QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \
+ offsetof(X86XSaveArea, field))
+
+ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw);
+ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw);
+ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip);
+ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp);
+ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr);
+ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs);
+ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs);
+ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv);
+ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state);
+ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state);
+ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state);
+ASSERT_OFFSET(XSAVE_OPMASK, opmask_state);
+ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state);
+ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state);
+ASSERT_OFFSET(XSAVE_PKRU, pkru_state);
+
+static int kvm_put_xsave(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ X86XSaveArea *xsave = env->kvm_xsave_buf;
+ uint16_t cwd, swd, twd;
+ int i;
+
+ if (!has_xsave) {
+ return kvm_put_fpu(cpu);
+ }
+
+ memset(xsave, 0, sizeof(struct kvm_xsave));
+ twd = 0;
+ swd = env->fpus & ~(7 << 11);
+ swd |= (env->fpstt & 7) << 11;
+ cwd = env->fpuc;
+ for (i = 0; i < 8; ++i) {
+ twd |= (!env->fptags[i]) << i;
+ }
+ xsave->legacy.fcw = cwd;
+ xsave->legacy.fsw = swd;
+ xsave->legacy.ftw = twd;
+ xsave->legacy.fpop = env->fpop;
+ xsave->legacy.fpip = env->fpip;
+ xsave->legacy.fpdp = env->fpdp;
+ memcpy(&xsave->legacy.fpregs, env->fpregs,
+ sizeof env->fpregs);
+ xsave->legacy.mxcsr = env->mxcsr;
+ xsave->header.xstate_bv = env->xstate_bv;
+ memcpy(&xsave->bndreg_state.bnd_regs, env->bnd_regs,
+ sizeof env->bnd_regs);
+ xsave->bndcsr_state.bndcsr = env->bndcs_regs;
+ memcpy(&xsave->opmask_state.opmask_regs, env->opmask_regs,
+ sizeof env->opmask_regs);
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *xmm = xsave->legacy.xmm_regs[i];
+ uint8_t *ymmh = xsave->avx_state.ymmh[i];
+ uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
+ stq_p(xmm, env->xmm_regs[i].ZMM_Q(0));
+ stq_p(xmm+8, env->xmm_regs[i].ZMM_Q(1));
+ stq_p(ymmh, env->xmm_regs[i].ZMM_Q(2));
+ stq_p(ymmh+8, env->xmm_regs[i].ZMM_Q(3));
+ stq_p(zmmh, env->xmm_regs[i].ZMM_Q(4));
+ stq_p(zmmh+8, env->xmm_regs[i].ZMM_Q(5));
+ stq_p(zmmh+16, env->xmm_regs[i].ZMM_Q(6));
+ stq_p(zmmh+24, env->xmm_regs[i].ZMM_Q(7));
+ }
+
+#ifdef TARGET_X86_64
+ memcpy(&xsave->hi16_zmm_state.hi16_zmm, &env->xmm_regs[16],
+ 16 * sizeof env->xmm_regs[16]);
+ memcpy(&xsave->pkru_state, &env->pkru, sizeof env->pkru);
+#endif
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave);
+}
+
+static int kvm_put_xcrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_xcrs xcrs = {};
+
+ if (!has_xcrs) {
+ return 0;
+ }
+
+ xcrs.nr_xcrs = 1;
+ xcrs.flags = 0;
+ xcrs.xcrs[0].xcr = 0;
+ xcrs.xcrs[0].value = env->xcr0;
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs);
+}
+
+static int kvm_put_sregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_sregs sregs;
+
+ memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap));
+ if (env->interrupt_injected >= 0) {
+ sregs.interrupt_bitmap[env->interrupt_injected / 64] |=
+ (uint64_t)1 << (env->interrupt_injected % 64);
+ }
+
+ if ((env->eflags & VM_MASK)) {
+ set_v8086_seg(&sregs.cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs.ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs.es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs.fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs.gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs.ss, &env->segs[R_SS]);
+ } else {
+ set_seg(&sregs.cs, &env->segs[R_CS]);
+ set_seg(&sregs.ds, &env->segs[R_DS]);
+ set_seg(&sregs.es, &env->segs[R_ES]);
+ set_seg(&sregs.fs, &env->segs[R_FS]);
+ set_seg(&sregs.gs, &env->segs[R_GS]);
+ set_seg(&sregs.ss, &env->segs[R_SS]);
+ }
+
+ set_seg(&sregs.tr, &env->tr);
+ set_seg(&sregs.ldt, &env->ldt);
+
+ sregs.idt.limit = env->idt.limit;
+ sregs.idt.base = env->idt.base;
+ memset(sregs.idt.padding, 0, sizeof sregs.idt.padding);
+ sregs.gdt.limit = env->gdt.limit;
+ sregs.gdt.base = env->gdt.base;
+ memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding);
+
+ sregs.cr0 = env->cr[0];
+ sregs.cr2 = env->cr[2];
+ sregs.cr3 = env->cr[3];
+ sregs.cr4 = env->cr[4];
+
+ sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state);
+ sregs.apic_base = cpu_get_apic_base(cpu->apic_state);
+
+ sregs.efer = env->efer;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
+static void kvm_msr_buf_reset(X86CPU *cpu)
+{
+ memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE);
+}
+
+static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value)
+{
+ struct kvm_msrs *msrs = cpu->kvm_msr_buf;
+ void *limit = ((void *)msrs) + MSR_BUF_SIZE;
+ struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs];
+
+ assert((void *)(entry + 1) <= limit);
+
+ entry->index = index;
+ entry->reserved = 0;
+ entry->data = value;
+ msrs->nmsrs++;
+}
+
+static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value)
+{
+ kvm_msr_buf_reset(cpu);
+ kvm_msr_entry_add(cpu, index, value);
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+}
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value)
+{
+ int ret;
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value);
+ assert(ret == 1);
+}
+
+static int kvm_put_tscdeadline_msr(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int ret;
+
+ if (!has_msr_tsc_deadline) {
+ return 0;
+ }
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
+}
+
+/*
+ * Provide a separate write service for the feature control MSR in order to
+ * kick the VCPU out of VMXON or even guest mode on reset. This has to be done
+ * before writing any other state because forcibly leaving nested mode
+ * invalidates the VCPU state.
+ */
+static int kvm_put_msr_feature_control(X86CPU *cpu)
+{
+ int ret;
+
+ if (!has_msr_feature_control) {
+ return 0;
+ }
+
+ ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL,
+ cpu->env.msr_ia32_feature_control);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == 1);
+ return 0;
+}
+
+static int kvm_put_msrs(X86CPU *cpu, int level)
+{
+ CPUX86State *env = &cpu->env;
+ int i;
+ int ret;
+
+ kvm_msr_buf_reset(cpu);
+
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ kvm_msr_entry_add(cpu, MSR_PAT, env->pat);
+ if (has_msr_star) {
+ kvm_msr_entry_add(cpu, MSR_STAR, env->star);
+ }
+ if (has_msr_hsave_pa) {
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave);
+ }
+ if (has_msr_tsc_aux) {
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux);
+ }
+ if (has_msr_tsc_adjust) {
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust);
+ }
+ if (has_msr_misc_enable) {
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE,
+ env->msr_ia32_misc_enable);
+ }
+ if (has_msr_smbase) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
+ }
+ if (has_msr_bndcfgs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
+ }
+ if (has_msr_xss) {
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss);
+ }
+#ifdef TARGET_X86_64
+ if (lm_capable_kernel) {
+ kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase);
+ kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar);
+ }
+#endif
+ /*
+ * The following MSRs have side effects on the guest or are too heavy
+ * for normal writeback. Limit them to reset or full state updates.
+ */
+ if (level >= KVM_PUT_RESET_STATE) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc);
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr);
+ }
+ if (has_msr_architectural_pmu) {
+ /* Stop the counter. */
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+ /* Set the counter values. */
+ for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
+ env->msr_fixed_counters[i]);
+ }
+ for (i = 0; i < num_architectural_pmu_counters; i++) {
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
+ env->msr_gp_counters[i]);
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
+ env->msr_gp_evtsel[i]);
+ }
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
+ env->msr_global_status);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
+ env->msr_global_ovf_ctrl);
+
+ /* Now start the PMU. */
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL,
+ env->msr_fixed_ctr_ctrl);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ env->msr_global_ctrl);
+ }
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
+ env->msr_hv_guest_os_id);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
+ env->msr_hv_hypercall);
+ }
+ if (cpu->hyperv_vapic) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
+ env->msr_hv_vapic);
+ }
+ if (cpu->hyperv_time) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
+ }
+ if (has_msr_hv_crash) {
+ int j;
+
+ for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
+ env->msr_hv_crash_params[j]);
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
+ HV_X64_MSR_CRASH_CTL_NOTIFY);
+ }
+ if (has_msr_hv_runtime) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
+ }
+ if (cpu->hyperv_synic) {
+ int j;
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL,
+ env->msr_hv_synic_control);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION,
+ env->msr_hv_synic_version);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP,
+ env->msr_hv_synic_evt_page);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP,
+ env->msr_hv_synic_msg_page);
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j,
+ env->msr_hv_synic_sint[j]);
+ }
+ }
+ if (has_msr_hv_stimer) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2,
+ env->msr_hv_stimer_config[j]);
+ }
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2,
+ env->msr_hv_stimer_count[j]);
+ }
+ }
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits);
+
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]);
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ /* The CPU GPs if we write to a bit above the physical limit of
+ * the host CPU (and KVM emulates that)
+ */
+ uint64_t mask = env->mtrr_var[i].mask;
+ mask &= phys_mask;
+
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i),
+ env->mtrr_var[i].base);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask);
+ }
+ }
+
+ /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
+ * kvm_put_msr_feature_control. */
+ }
+ if (env->mcg_cap) {
+ int i;
+
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl);
+ if (has_msr_mcg_ext_ctl) {
+ kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl);
+ }
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]);
+ }
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == cpu->kvm_msr_buf->nmsrs);
+ return 0;
+}
+
+
+static int kvm_get_fpu(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_fpu fpu;
+ int i, ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu);
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->fpstt = (fpu.fsw >> 11) & 7;
+ env->fpus = fpu.fsw;
+ env->fpuc = fpu.fcw;
+ env->fpop = fpu.last_opcode;
+ env->fpip = fpu.last_ip;
+ env->fpdp = fpu.last_dp;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((fpu.ftwx >> i) & 1);
+ }
+ memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs);
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]);
+ }
+ env->mxcsr = fpu.mxcsr;
+
+ return 0;
+}
+
+static int kvm_get_xsave(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ X86XSaveArea *xsave = env->kvm_xsave_buf;
+ int ret, i;
+ uint16_t cwd, swd, twd;
+
+ if (!has_xsave) {
+ return kvm_get_fpu(cpu);
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave);
+ if (ret < 0) {
+ return ret;
+ }
+
+ cwd = xsave->legacy.fcw;
+ swd = xsave->legacy.fsw;
+ twd = xsave->legacy.ftw;
+ env->fpop = xsave->legacy.fpop;
+ env->fpstt = (swd >> 11) & 7;
+ env->fpus = swd;
+ env->fpuc = cwd;
+ for (i = 0; i < 8; ++i) {
+ env->fptags[i] = !((twd >> i) & 1);
+ }
+ env->fpip = xsave->legacy.fpip;
+ env->fpdp = xsave->legacy.fpdp;
+ env->mxcsr = xsave->legacy.mxcsr;
+ memcpy(env->fpregs, &xsave->legacy.fpregs,
+ sizeof env->fpregs);
+ env->xstate_bv = xsave->header.xstate_bv;
+ memcpy(env->bnd_regs, &xsave->bndreg_state.bnd_regs,
+ sizeof env->bnd_regs);
+ env->bndcs_regs = xsave->bndcsr_state.bndcsr;
+ memcpy(env->opmask_regs, &xsave->opmask_state.opmask_regs,
+ sizeof env->opmask_regs);
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+ uint8_t *xmm = xsave->legacy.xmm_regs[i];
+ uint8_t *ymmh = xsave->avx_state.ymmh[i];
+ uint8_t *zmmh = xsave->zmm_hi256_state.zmm_hi256[i];
+ env->xmm_regs[i].ZMM_Q(0) = ldq_p(xmm);
+ env->xmm_regs[i].ZMM_Q(1) = ldq_p(xmm+8);
+ env->xmm_regs[i].ZMM_Q(2) = ldq_p(ymmh);
+ env->xmm_regs[i].ZMM_Q(3) = ldq_p(ymmh+8);
+ env->xmm_regs[i].ZMM_Q(4) = ldq_p(zmmh);
+ env->xmm_regs[i].ZMM_Q(5) = ldq_p(zmmh+8);
+ env->xmm_regs[i].ZMM_Q(6) = ldq_p(zmmh+16);
+ env->xmm_regs[i].ZMM_Q(7) = ldq_p(zmmh+24);
+ }
+
+#ifdef TARGET_X86_64
+ memcpy(&env->xmm_regs[16], &xsave->hi16_zmm_state.hi16_zmm,
+ 16 * sizeof env->xmm_regs[16]);
+ memcpy(&env->pkru, &xsave->pkru_state, sizeof env->pkru);
+#endif
+ return 0;
+}
+
+static int kvm_get_xcrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int i, ret;
+ struct kvm_xcrs xcrs;
+
+ if (!has_xcrs) {
+ return 0;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < xcrs.nr_xcrs; i++) {
+ /* Only support xcr0 now */
+ if (xcrs.xcrs[i].xcr == 0) {
+ env->xcr0 = xcrs.xcrs[i].value;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int kvm_get_sregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_sregs sregs;
+ uint32_t hflags;
+ int bit, i, ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* There can only be one pending IRQ set in the bitmap at a time, so try
+ to find it and save its number instead (-1 for none). */
+ env->interrupt_injected = -1;
+ for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) {
+ if (sregs.interrupt_bitmap[i]) {
+ bit = ctz64(sregs.interrupt_bitmap[i]);
+ env->interrupt_injected = i * 64 + bit;
+ break;
+ }
+ }
+
+ get_seg(&env->segs[R_CS], &sregs.cs);
+ get_seg(&env->segs[R_DS], &sregs.ds);
+ get_seg(&env->segs[R_ES], &sregs.es);
+ get_seg(&env->segs[R_FS], &sregs.fs);
+ get_seg(&env->segs[R_GS], &sregs.gs);
+ get_seg(&env->segs[R_SS], &sregs.ss);
+
+ get_seg(&env->tr, &sregs.tr);
+ get_seg(&env->ldt, &sregs.ldt);
+
+ env->idt.limit = sregs.idt.limit;
+ env->idt.base = sregs.idt.base;
+ env->gdt.limit = sregs.gdt.limit;
+ env->gdt.base = sregs.gdt.base;
+
+ env->cr[0] = sregs.cr0;
+ env->cr[2] = sregs.cr2;
+ env->cr[3] = sregs.cr3;
+ env->cr[4] = sregs.cr4;
+
+ env->efer = sregs.efer;
+
+ /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */
+
+#define HFLAG_COPY_MASK \
+ ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
+ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
+ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
+ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
+
+ hflags = env->hflags & HFLAG_COPY_MASK;
+ hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
+ hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
+ (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
+ hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
+
+ if (env->efer & MSR_EFER_LMA) {
+ hflags |= HF_LMA_MASK;
+ }
+
+ if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
+ hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ } else {
+ hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_CS32_SHIFT);
+ hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
+ !(hflags & HF_CS32_MASK)) {
+ hflags |= HF_ADDSEG_MASK;
+ } else {
+ hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
+ }
+ }
+ env->hflags = hflags;
+
+ return 0;
+}
+
+static int kvm_get_msrs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries;
+ int ret, i;
+ uint64_t mtrr_top_bits;
+
+ kvm_msr_buf_reset(cpu);
+
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0);
+ kvm_msr_entry_add(cpu, MSR_PAT, 0);
+ if (has_msr_star) {
+ kvm_msr_entry_add(cpu, MSR_STAR, 0);
+ }
+ if (has_msr_hsave_pa) {
+ kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0);
+ }
+ if (has_msr_tsc_aux) {
+ kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0);
+ }
+ if (has_msr_tsc_adjust) {
+ kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0);
+ }
+ if (has_msr_tsc_deadline) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0);
+ }
+ if (has_msr_misc_enable) {
+ kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0);
+ }
+ if (has_msr_smbase) {
+ kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
+ }
+ if (has_msr_feature_control) {
+ kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
+ }
+ if (has_msr_bndcfgs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0);
+ }
+ if (has_msr_xss) {
+ kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0);
+ }
+
+
+ if (!env->tsc_valid) {
+ kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0);
+ env->tsc_valid = !runstate_is_running();
+ }
+
+#ifdef TARGET_X86_64
+ if (lm_capable_kernel) {
+ kvm_msr_entry_add(cpu, MSR_CSTAR, 0);
+ kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0);
+ kvm_msr_entry_add(cpu, MSR_FMASK, 0);
+ kvm_msr_entry_add(cpu, MSR_LSTAR, 0);
+ }
+#endif
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0);
+ }
+ if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) {
+ kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0);
+ }
+ if (has_msr_architectural_pmu) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
+ for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+ kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
+ }
+ for (i = 0; i < num_architectural_pmu_counters; i++) {
+ kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
+ kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
+ }
+ }
+
+ if (env->mcg_cap) {
+ kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0);
+ if (has_msr_mcg_ext_ctl) {
+ kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0);
+ }
+ for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) {
+ kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0);
+ }
+ }
+
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
+ }
+ if (cpu->hyperv_vapic) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
+ }
+ if (cpu->hyperv_time) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
+ }
+ if (has_msr_hv_crash) {
+ int j;
+
+ for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
+ }
+ }
+ if (has_msr_hv_runtime) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
+ }
+ if (cpu->hyperv_synic) {
+ uint32_t msr;
+
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0);
+ for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+ kvm_msr_entry_add(cpu, msr, 0);
+ }
+ }
+ if (has_msr_hv_stimer) {
+ uint32_t msr;
+
+ for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT;
+ msr++) {
+ kvm_msr_entry_add(cpu, msr, 0);
+ }
+ }
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0);
+ for (i = 0; i < MSR_MTRRcap_VCNT; i++) {
+ kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0);
+ kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0);
+ }
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf);
+ if (ret < 0) {
+ return ret;
+ }
+
+ assert(ret == cpu->kvm_msr_buf->nmsrs);
+ /*
+ * MTRR masks: Each mask consists of 5 parts
+ * a 10..0: must be zero
+ * b 11 : valid bit
+ * c n-1.12: actual mask bits
+ * d 51..n: reserved must be zero
+ * e 63.52: reserved must be zero
+ *
+ * 'n' is the number of physical bits supported by the CPU and is
+ * apparently always <= 52. We know our 'n' but don't know what
+ * the destinations 'n' is; it might be smaller, in which case
+ * it masks (c) on loading. It might be larger, in which case
+ * we fill 'd' so that d..c is consistent irrespetive of the 'n'
+ * we're migrating to.
+ */
+
+ if (cpu->fill_mtrr_mask) {
+ QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52);
+ assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS);
+ mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits);
+ } else {
+ mtrr_top_bits = 0;
+ }
+
+ for (i = 0; i < ret; i++) {
+ uint32_t index = msrs[i].index;
+ switch (index) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = msrs[i].data;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = msrs[i].data;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = msrs[i].data;
+ break;
+ case MSR_PAT:
+ env->pat = msrs[i].data;
+ break;
+ case MSR_STAR:
+ env->star = msrs[i].data;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_CSTAR:
+ env->cstar = msrs[i].data;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = msrs[i].data;
+ break;
+ case MSR_FMASK:
+ env->fmask = msrs[i].data;
+ break;
+ case MSR_LSTAR:
+ env->lstar = msrs[i].data;
+ break;
+#endif
+ case MSR_IA32_TSC:
+ env->tsc = msrs[i].data;
+ break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = msrs[i].data;
+ break;
+ case MSR_TSC_ADJUST:
+ env->tsc_adjust = msrs[i].data;
+ break;
+ case MSR_IA32_TSCDEADLINE:
+ env->tsc_deadline = msrs[i].data;
+ break;
+ case MSR_VM_HSAVE_PA:
+ env->vm_hsave = msrs[i].data;
+ break;
+ case MSR_KVM_SYSTEM_TIME:
+ env->system_time_msr = msrs[i].data;
+ break;
+ case MSR_KVM_WALL_CLOCK:
+ env->wall_clock_msr = msrs[i].data;
+ break;
+ case MSR_MCG_STATUS:
+ env->mcg_status = msrs[i].data;
+ break;
+ case MSR_MCG_CTL:
+ env->mcg_ctl = msrs[i].data;
+ break;
+ case MSR_MCG_EXT_CTL:
+ env->mcg_ext_ctl = msrs[i].data;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = msrs[i].data;
+ break;
+ case MSR_IA32_SMBASE:
+ env->smbase = msrs[i].data;
+ break;
+ case MSR_IA32_FEATURE_CONTROL:
+ env->msr_ia32_feature_control = msrs[i].data;
+ break;
+ case MSR_IA32_BNDCFGS:
+ env->msr_bndcfgs = msrs[i].data;
+ break;
+ case MSR_IA32_XSS:
+ env->xss = msrs[i].data;
+ break;
+ default:
+ if (msrs[i].index >= MSR_MC0_CTL &&
+ msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) {
+ env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data;
+ }
+ break;
+ case MSR_KVM_ASYNC_PF_EN:
+ env->async_pf_en_msr = msrs[i].data;
+ break;
+ case MSR_KVM_PV_EOI_EN:
+ env->pv_eoi_en_msr = msrs[i].data;
+ break;
+ case MSR_KVM_STEAL_TIME:
+ env->steal_time_msr = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_FIXED_CTR_CTRL:
+ env->msr_fixed_ctr_ctrl = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ env->msr_global_ctrl = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_GLOBAL_STATUS:
+ env->msr_global_status = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+ env->msr_global_ovf_ctrl = msrs[i].data;
+ break;
+ case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1:
+ env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data;
+ break;
+ case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1:
+ env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data;
+ break;
+ case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1:
+ env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_HYPERCALL:
+ env->msr_hv_hypercall = msrs[i].data;
+ break;
+ case HV_X64_MSR_GUEST_OS_ID:
+ env->msr_hv_guest_os_id = msrs[i].data;
+ break;
+ case HV_X64_MSR_APIC_ASSIST_PAGE:
+ env->msr_hv_vapic = msrs[i].data;
+ break;
+ case HV_X64_MSR_REFERENCE_TSC:
+ env->msr_hv_tsc = msrs[i].data;
+ break;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_VP_RUNTIME:
+ env->msr_hv_runtime = msrs[i].data;
+ break;
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = msrs[i].data;
+ break;
+ case HV_X64_MSR_SVERSION:
+ env->msr_hv_synic_version = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+ break;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] =
+ msrs[i].data;
+ break;
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] =
+ msrs[i].data;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = msrs[i].data;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[0] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_80000:
+ env->mtrr_fixed[1] = msrs[i].data;
+ break;
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[2] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ env->mtrr_fixed[3] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_C8000:
+ env->mtrr_fixed[4] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D0000:
+ env->mtrr_fixed[5] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_D8000:
+ env->mtrr_fixed[6] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E0000:
+ env->mtrr_fixed[7] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_E8000:
+ env->mtrr_fixed[8] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F0000:
+ env->mtrr_fixed[9] = msrs[i].data;
+ break;
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[10] = msrs[i].data;
+ break;
+ case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1):
+ if (index & 1) {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data |
+ mtrr_top_bits;
+ } else {
+ env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_put_mp_state(X86CPU *cpu)
+{
+ struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state };
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+}
+
+static int kvm_get_mp_state(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ struct kvm_mp_state mp_state;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state);
+ if (ret < 0) {
+ return ret;
+ }
+ env->mp_state = mp_state.mp_state;
+ if (kvm_irqchip_in_kernel()) {
+ cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED);
+ }
+ return 0;
+}
+
+static int kvm_get_apic(X86CPU *cpu)
+{
+ DeviceState *apic = cpu->apic_state;
+ struct kvm_lapic_state kapic;
+ int ret;
+
+ if (apic && kvm_irqchip_in_kernel()) {
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic);
+ if (ret < 0) {
+ return ret;
+ }
+
+ kvm_get_apic_state(apic, &kapic);
+ }
+ return 0;
+}
+
+static int kvm_put_vcpu_events(X86CPU *cpu, int level)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ struct kvm_vcpu_events events = {};
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ events.exception.injected = (env->exception_injected >= 0);
+ events.exception.nr = env->exception_injected;
+ events.exception.has_error_code = env->has_error_code;
+ events.exception.error_code = env->error_code;
+ events.exception.pad = 0;
+
+ events.interrupt.injected = (env->interrupt_injected >= 0);
+ events.interrupt.nr = env->interrupt_injected;
+ events.interrupt.soft = env->soft_interrupt;
+
+ events.nmi.injected = env->nmi_injected;
+ events.nmi.pending = env->nmi_pending;
+ events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
+ events.nmi.pad = 0;
+
+ events.sipi_vector = env->sipi_vector;
+ events.flags = 0;
+
+ if (has_msr_smbase) {
+ events.smi.smm = !!(env->hflags & HF_SMM_MASK);
+ events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
+ if (kvm_irqchip_in_kernel()) {
+ /* As soon as these are moved to the kernel, remove them
+ * from cs->interrupt_request.
+ */
+ events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
+ events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
+ cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
+ } else {
+ /* Keep these in cs->interrupt_request. */
+ events.smi.pending = 0;
+ events.smi.latched_init = 0;
+ }
+ events.flags |= KVM_VCPUEVENT_VALID_SMM;
+ }
+
+ if (level >= KVM_PUT_RESET_STATE) {
+ events.flags |=
+ KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR;
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events);
+}
+
+static int kvm_get_vcpu_events(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_vcpu_events events;
+ int ret;
+
+ if (!kvm_has_vcpu_events()) {
+ return 0;
+ }
+
+ memset(&events, 0, sizeof(events));
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events);
+ if (ret < 0) {
+ return ret;
+ }
+ env->exception_injected =
+ events.exception.injected ? events.exception.nr : -1;
+ env->has_error_code = events.exception.has_error_code;
+ env->error_code = events.exception.error_code;
+
+ env->interrupt_injected =
+ events.interrupt.injected ? events.interrupt.nr : -1;
+ env->soft_interrupt = events.interrupt.soft;
+
+ env->nmi_injected = events.nmi.injected;
+ env->nmi_pending = events.nmi.pending;
+ if (events.nmi.masked) {
+ env->hflags2 |= HF2_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+
+ if (events.flags & KVM_VCPUEVENT_VALID_SMM) {
+ if (events.smi.smm) {
+ env->hflags |= HF_SMM_MASK;
+ } else {
+ env->hflags &= ~HF_SMM_MASK;
+ }
+ if (events.smi.pending) {
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
+ } else {
+ cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI);
+ }
+ if (events.smi.smm_inside_nmi) {
+ env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+ } else {
+ env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+ }
+ if (events.smi.latched_init) {
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
+ } else {
+ cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT);
+ }
+ }
+
+ env->sipi_vector = events.sipi_vector;
+
+ return 0;
+}
+
+static int kvm_guest_debug_workarounds(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ int ret = 0;
+ unsigned long reinject_trap = 0;
+
+ if (!kvm_has_vcpu_events()) {
+ if (env->exception_injected == 1) {
+ reinject_trap = KVM_GUESTDBG_INJECT_DB;
+ } else if (env->exception_injected == 3) {
+ reinject_trap = KVM_GUESTDBG_INJECT_BP;
+ }
+ env->exception_injected = -1;
+ }
+
+ /*
+ * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF
+ * injected via SET_GUEST_DEBUG while updating GP regs. Work around this
+ * by updating the debug state once again if single-stepping is on.
+ * Another reason to call kvm_update_guest_debug here is a pending debug
+ * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to
+ * reinject them via SET_GUEST_DEBUG.
+ */
+ if (reinject_trap ||
+ (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) {
+ ret = kvm_update_guest_debug(cs, reinject_trap);
+ }
+ return ret;
+}
+
+static int kvm_put_debugregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_debugregs dbgregs;
+ int i;
+
+ if (!kvm_has_debugregs()) {
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ dbgregs.db[i] = env->dr[i];
+ }
+ dbgregs.dr6 = env->dr[6];
+ dbgregs.dr7 = env->dr[7];
+ dbgregs.flags = 0;
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs);
+}
+
+static int kvm_get_debugregs(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ struct kvm_debugregs dbgregs;
+ int i, ret;
+
+ if (!kvm_has_debugregs()) {
+ return 0;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs);
+ if (ret < 0) {
+ return ret;
+ }
+ for (i = 0; i < 4; i++) {
+ env->dr[i] = dbgregs.db[i];
+ }
+ env->dr[4] = env->dr[6] = dbgregs.dr6;
+ env->dr[5] = env->dr[7] = dbgregs.dr7;
+
+ return 0;
+}
+
+int kvm_arch_put_registers(CPUState *cpu, int level)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ int ret;
+
+ assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+
+ if (level >= KVM_PUT_RESET_STATE) {
+ ret = kvm_put_msr_feature_control(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (level == KVM_PUT_FULL_STATE) {
+ /* We don't check for kvm_arch_set_tsc_khz() errors here,
+ * because TSC frequency mismatch shouldn't abort migration,
+ * unless the user explicitly asked for a more strict TSC
+ * setting (e.g. using an explicit "tsc-freq" option).
+ */
+ kvm_arch_set_tsc_khz(cpu);
+ }
+
+ ret = kvm_getput_regs(x86_cpu, 1);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_xsave(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_xcrs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_sregs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ /* must be before kvm_put_msrs */
+ ret = kvm_inject_mce_oldstyle(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_msrs(x86_cpu, level);
+ if (ret < 0) {
+ return ret;
+ }
+ if (level >= KVM_PUT_RESET_STATE) {
+ ret = kvm_put_mp_state(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ ret = kvm_put_tscdeadline_msr(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_put_vcpu_events(x86_cpu, level);
+ if (ret < 0) {
+ return ret;
+ }
+ ret = kvm_put_debugregs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ /* must be last */
+ ret = kvm_guest_debug_workarounds(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ int ret;
+
+ assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs));
+
+ ret = kvm_getput_regs(cpu, 0);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_xsave(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_xcrs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_sregs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_msrs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_mp_state(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_apic(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = kvm_get_debugregs(cpu);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = 0;
+ out:
+ cpu_sync_bndcs_hflags(&cpu->env);
+ return ret;
+}
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+ int ret;
+
+ /* Inject NMI */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ qemu_mutex_lock_iothread();
+ cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ qemu_mutex_unlock_iothread();
+ DPRINTF("injected NMI\n");
+ ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
+ if (ret < 0) {
+ fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n",
+ strerror(-ret));
+ }
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ qemu_mutex_lock_iothread();
+ cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ qemu_mutex_unlock_iothread();
+ DPRINTF("injected SMI\n");
+ ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
+ if (ret < 0) {
+ fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n",
+ strerror(-ret));
+ }
+ }
+ }
+
+ if (!kvm_pic_in_kernel()) {
+ qemu_mutex_lock_iothread();
+ }
+
+ /* Force the VCPU out of its inner loop to process any INIT requests
+ * or (for userspace APIC, but it is cheap to combine the checks here)
+ * pending TPR access reports.
+ */
+ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ cpu->exit_request = 1;
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ cpu->exit_request = 1;
+ }
+ }
+
+ if (!kvm_pic_in_kernel()) {
+ /* Try to inject an interrupt if the guest can accept it */
+ if (run->ready_for_interrupt_injection &&
+ (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) {
+ int irq;
+
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ struct kvm_interrupt intr;
+
+ intr.irq = irq;
+ DPRINTF("injected interrupt %d\n", irq);
+ ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr);
+ if (ret < 0) {
+ fprintf(stderr,
+ "KVM: injection failed, interrupt lost (%s)\n",
+ strerror(-ret));
+ }
+ }
+ }
+
+ /* If we have an interrupt but the guest is not ready to receive an
+ * interrupt, request an interrupt window exit. This will
+ * cause a return to userspace as soon as the guest is ready to
+ * receive interrupts. */
+ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ run->request_interrupt_window = 1;
+ } else {
+ run->request_interrupt_window = 0;
+ }
+
+ DPRINTF("setting tpr\n");
+ run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
+
+ qemu_mutex_unlock_iothread();
+ }
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+{
+ X86CPU *x86_cpu = X86_CPU(cpu);
+ CPUX86State *env = &x86_cpu->env;
+
+ if (run->flags & KVM_RUN_X86_SMM) {
+ env->hflags |= HF_SMM_MASK;
+ } else {
+ env->hflags &= ~HF_SMM_MASK;
+ }
+ if (run->if_flag) {
+ env->eflags |= IF_MASK;
+ } else {
+ env->eflags &= ~IF_MASK;
+ }
+
+ /* We need to protect the apic state against concurrent accesses from
+ * different threads in case the userspace irqchip is used. */
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_lock_iothread();
+ }
+ cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
+ cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_unlock_iothread();
+ }
+ return cpu_get_mem_attrs(env);
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+ /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
+ assert(env->mcg_cap);
+
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+
+ kvm_cpu_synchronize_state(cs);
+
+ if (env->exception_injected == EXCP08_DBLE) {
+ /* this means triple fault */
+ qemu_system_reset_request();
+ cs->exit_request = 1;
+ return 0;
+ }
+ env->exception_injected = EXCP12_MCHK;
+ env->has_error_code = 0;
+
+ cs->halted = 0;
+ if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) {
+ env->mp_state = KVM_MP_STATE_RUNNABLE;
+ }
+ }
+
+ if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ kvm_cpu_synchronize_state(cs);
+ do_cpu_init(cpu);
+ }
+
+ if (kvm_irqchip_in_kernel()) {
+ return 0;
+ }
+
+ if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ }
+ if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cs->halted = 0;
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+ kvm_cpu_synchronize_state(cs);
+ do_cpu_sipi(cpu);
+ }
+ if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ kvm_cpu_synchronize_state(cs);
+ apic_handle_tpr_access_report(cpu->apic_state, env->eip,
+ env->tpr_access_type);
+ }
+
+ return cs->halted;
+}
+
+static int kvm_handle_halt(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+
+ if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) &&
+ !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cs->halted = 1;
+ return EXCP_HLT;
+ }
+
+ return 0;
+}
+
+static int kvm_handle_tpr_access(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+
+ apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip,
+ run->tpr_access.is_write ? TPR_ACCESS_WRITE
+ : TPR_ACCESS_READ);
+ return 1;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static const uint8_t int3 = 0xcc;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint8_t int3;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct {
+ target_ulong addr;
+ int len;
+ int type;
+} hw_breakpoint[4];
+
+static int nb_hw_breakpoint;
+
+static int find_hw_breakpoint(target_ulong addr, int len, int type)
+{
+ int n;
+
+ for (n = 0; n < nb_hw_breakpoint; n++) {
+ if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type &&
+ (hw_breakpoint[n].len == len || len == -1)) {
+ return n;
+ }
+ }
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ len = 1;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ case 4:
+ case 8:
+ if (addr & (len - 1)) {
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ if (nb_hw_breakpoint == 4) {
+ return -ENOBUFS;
+ }
+ if (find_hw_breakpoint(addr, len, type) >= 0) {
+ return -EEXIST;
+ }
+ hw_breakpoint[nb_hw_breakpoint].addr = addr;
+ hw_breakpoint[nb_hw_breakpoint].len = len;
+ hw_breakpoint[nb_hw_breakpoint].type = type;
+ nb_hw_breakpoint++;
+
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
+ if (n < 0) {
+ return -ENOENT;
+ }
+ nb_hw_breakpoint--;
+ hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint];
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoint = 0;
+}
+
+static CPUWatchpoint hw_watchpoint;
+
+static int kvm_handle_debug(X86CPU *cpu,
+ struct kvm_debug_exit_arch *arch_info)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ int ret = 0;
+ int n;
+
+ if (arch_info->exception == 1) {
+ if (arch_info->dr6 & (1 << 14)) {
+ if (cs->singlestep_enabled) {
+ ret = EXCP_DEBUG;
+ }
+ } else {
+ for (n = 0; n < 4; n++) {
+ if (arch_info->dr6 & (1 << n)) {
+ switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) {
+ case 0x0:
+ ret = EXCP_DEBUG;
+ break;
+ case 0x1:
+ ret = EXCP_DEBUG;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+ hw_watchpoint.flags = BP_MEM_WRITE;
+ break;
+ case 0x3:
+ ret = EXCP_DEBUG;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_breakpoint[n].addr;
+ hw_watchpoint.flags = BP_MEM_ACCESS;
+ break;
+ }
+ }
+ }
+ }
+ } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) {
+ ret = EXCP_DEBUG;
+ }
+ if (ret == 0) {
+ cpu_synchronize_state(cs);
+ assert(env->exception_injected == -1);
+
+ /* pass to guest */
+ env->exception_injected = arch_info->exception;
+ env->has_error_code = 0;
+ }
+
+ return ret;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+ const uint8_t type_code[] = {
+ [GDB_BREAKPOINT_HW] = 0x0,
+ [GDB_WATCHPOINT_WRITE] = 0x1,
+ [GDB_WATCHPOINT_ACCESS] = 0x3
+ };
+ const uint8_t len_code[] = {
+ [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2
+ };
+ int n;
+
+ if (kvm_sw_breakpoints_active(cpu)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+ if (nb_hw_breakpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ dbg->arch.debugreg[7] = 0x0600;
+ for (n = 0; n < nb_hw_breakpoint; n++) {
+ dbg->arch.debugreg[n] = hw_breakpoint[n].addr;
+ dbg->arch.debugreg[7] |= (2 << (n * 2)) |
+ (type_code[hw_breakpoint[n].type] << (16 + n*4)) |
+ ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4));
+ }
+ }
+}
+
+static bool host_supports_vmx(void)
+{
+ uint32_t ecx, unused;
+
+ host_cpuid(1, 0, &unused, &unused, &ecx, &unused);
+ return ecx & CPUID_EXT_VMX;
+}
+
+#define VMX_INVALID_GUEST_STATE 0x80000021
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ uint64_t code;
+ int ret;
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_HLT:
+ DPRINTF("handle_hlt\n");
+ qemu_mutex_lock_iothread();
+ ret = kvm_handle_halt(cpu);
+ qemu_mutex_unlock_iothread();
+ break;
+ case KVM_EXIT_SET_TPR:
+ ret = 0;
+ break;
+ case KVM_EXIT_TPR_ACCESS:
+ qemu_mutex_lock_iothread();
+ ret = kvm_handle_tpr_access(cpu);
+ qemu_mutex_unlock_iothread();
+ break;
+ case KVM_EXIT_FAIL_ENTRY:
+ code = run->fail_entry.hardware_entry_failure_reason;
+ fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n",
+ code);
+ if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) {
+ fprintf(stderr,
+ "\nIf you're running a guest on an Intel machine without "
+ "unrestricted mode\n"
+ "support, the failure can be most likely due to the guest "
+ "entering an invalid\n"
+ "state for Intel VT. For example, the guest maybe running "
+ "in big real mode\n"
+ "which is not supported on less recent Intel processors."
+ "\n\n");
+ }
+ ret = -1;
+ break;
+ case KVM_EXIT_EXCEPTION:
+ fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n",
+ run->ex.exception, run->ex.error_code);
+ ret = -1;
+ break;
+ case KVM_EXIT_DEBUG:
+ DPRINTF("kvm_exit_debug\n");
+ qemu_mutex_lock_iothread();
+ ret = kvm_handle_debug(cpu, &run->debug.arch);
+ qemu_mutex_unlock_iothread();
+ break;
+ case KVM_EXIT_HYPERV:
+ ret = kvm_hv_handle_exit(cpu, &run->hyperv);
+ break;
+ case KVM_EXIT_IOAPIC_EOI:
+ ioapic_eoi_broadcast(run->eoi.vector);
+ ret = 0;
+ break;
+ default:
+ fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ kvm_cpu_synchronize_state(cs);
+ return !(env->cr[0] & CR0_PE_MASK) ||
+ ((env->segs[R_CS].selector & 3) != 3);
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+ if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+ /* If kernel can't do irq routing, interrupt source
+ * override 0->2 cannot be set up as required by HPET.
+ * So we have to disable it.
+ */
+ no_hpet = 1;
+ }
+ /* We know at this point that we're using the in-kernel
+ * irqchip, so we can use irqfds, and on x86 we know
+ * we can use msi via irqfd and GSI routing.
+ */
+ kvm_msi_via_irqfd_allowed = true;
+ kvm_gsi_routing_allowed = true;
+
+ if (kvm_irqchip_is_split()) {
+ int i;
+
+ /* If the ioapic is in QEMU and the lapics are in KVM, reserve
+ MSI routes for signaling interrupts to the local apics. */
+ for (i = 0; i < IOAPIC_NUM_PINS; i++) {
+ if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) {
+ error_report("Could not enable split IRQ mode.");
+ exit(1);
+ }
+ }
+ }
+}
+
+int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
+{
+ int ret;
+ if (machine_kernel_irqchip_split(ms)) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24);
+ if (ret) {
+ error_report("Could not enable split irqchip mode: %s",
+ strerror(-ret));
+ exit(1);
+ } else {
+ DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n");
+ kvm_split_irqchip = true;
+ return 1;
+ }
+ } else {
+ return 0;
+ }
+}
+
+/* Classic KVM device assignment interface. Will remain x86 only. */
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+ uint32_t flags, uint32_t *dev_id)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .segnr = dev_addr->domain,
+ .busnr = dev_addr->bus,
+ .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function),
+ .flags = flags,
+ };
+ int ret;
+
+ dev_data.assigned_dev_id =
+ (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn;
+
+ ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ *dev_id = dev_data.assigned_dev_id;
+
+ return 0;
+}
+
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .assigned_dev_id = dev_id,
+ };
+
+ return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data);
+}
+
+static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id,
+ uint32_t irq_type, uint32_t guest_irq)
+{
+ struct kvm_assigned_irq assigned_irq = {
+ .assigned_dev_id = dev_id,
+ .guest_irq = guest_irq,
+ .flags = irq_type,
+ };
+
+ if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) {
+ return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq);
+ } else {
+ return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq);
+ }
+}
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi,
+ uint32_t guest_irq)
+{
+ uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX |
+ (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX);
+
+ return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq);
+}
+
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked)
+{
+ struct kvm_assigned_pci_dev dev_data = {
+ .assigned_dev_id = dev_id,
+ .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data);
+}
+
+static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id,
+ uint32_t type)
+{
+ struct kvm_assigned_irq assigned_irq = {
+ .assigned_dev_id = dev_id,
+ .flags = type,
+ };
+
+ return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq);
+}
+
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX |
+ (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX));
+}
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq)
+{
+ return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI |
+ KVM_DEV_IRQ_GUEST_MSI, virq);
+}
+
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI |
+ KVM_DEV_IRQ_HOST_MSI);
+}
+
+bool kvm_device_msix_supported(KVMState *s)
+{
+ /* The kernel lacks a corresponding KVM_CAP, so we probe by calling
+ * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT;
+}
+
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+ uint32_t nr_vectors)
+{
+ struct kvm_assigned_msix_nr msix_nr = {
+ .assigned_dev_id = dev_id,
+ .entry_nr = nr_vectors,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr);
+}
+
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+ int virq)
+{
+ struct kvm_assigned_msix_entry msix_entry = {
+ .assigned_dev_id = dev_id,
+ .gsi = virq,
+ .entry = vector,
+ };
+
+ return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry);
+}
+
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX |
+ KVM_DEV_IRQ_GUEST_MSIX, 0);
+}
+
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id)
+{
+ return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX |
+ KVM_DEV_IRQ_HOST_MSIX);
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ X86IOMMUState *iommu = x86_iommu_get_default();
+
+ if (iommu) {
+ int ret;
+ MSIMessage src, dst;
+ X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu);
+
+ src.address = route->u.msi.address_hi;
+ src.address <<= VTD_MSI_ADDR_HI_SHIFT;
+ src.address |= route->u.msi.address_lo;
+ src.data = route->u.msi.data;
+
+ ret = class->int_remap(iommu, &src, &dst, dev ? \
+ pci_requester_id(dev) : \
+ X86_IOMMU_SID_INVALID);
+ if (ret) {
+ trace_kvm_x86_fixup_msi_error(route->gsi);
+ return 1;
+ }
+
+ route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT;
+ route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK;
+ route->u.msi.data = dst.data;
+ }
+
+ return 0;
+}
+
+typedef struct MSIRouteEntry MSIRouteEntry;
+
+struct MSIRouteEntry {
+ PCIDevice *dev; /* Device pointer */
+ int vector; /* MSI/MSIX vector index */
+ int virq; /* Virtual IRQ index */
+ QLIST_ENTRY(MSIRouteEntry) list;
+};
+
+/* List of used GSI routes */
+static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \
+ QLIST_HEAD_INITIALIZER(msi_route_list);
+
+static void kvm_update_msi_routes_all(void *private, bool global,
+ uint32_t index, uint32_t mask)
+{
+ int cnt = 0;
+ MSIRouteEntry *entry;
+ MSIMessage msg;
+ /* TODO: explicit route update */
+ QLIST_FOREACH(entry, &msi_route_list, list) {
+ cnt++;
+ msg = pci_get_msi_message(entry->dev, entry->vector);
+ kvm_irqchip_update_msi_route(kvm_state, entry->virq,
+ msg, entry->dev);
+ }
+ kvm_irqchip_commit_routes(kvm_state);
+ trace_kvm_x86_update_msi_routes(cnt);
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ static bool notify_list_inited = false;
+ MSIRouteEntry *entry;
+
+ if (!dev) {
+ /* These are (possibly) IOAPIC routes only used for split
+ * kernel irqchip mode, while what we are housekeeping are
+ * PCI devices only. */
+ return 0;
+ }
+
+ entry = g_new0(MSIRouteEntry, 1);
+ entry->dev = dev;
+ entry->vector = vector;
+ entry->virq = route->gsi;
+ QLIST_INSERT_HEAD(&msi_route_list, entry, list);
+
+ trace_kvm_x86_add_msi_route(route->gsi);
+
+ if (!notify_list_inited) {
+ /* For the first time we do add route, add ourselves into
+ * IOMMU's IEC notify list if needed. */
+ X86IOMMUState *iommu = x86_iommu_get_default();
+ if (iommu) {
+ x86_iommu_iec_register_notifier(iommu,
+ kvm_update_msi_routes_all,
+ NULL);
+ }
+ notify_list_inited = true;
+ }
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ MSIRouteEntry *entry, *next;
+ QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) {
+ if (entry->virq == virq) {
+ trace_kvm_x86_remove_msi_route(virq);
+ QLIST_REMOVE(entry, list);
+ break;
+ }
+ }
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
diff --git a/target/i386/kvm_i386.h b/target/i386/kvm_i386.h
new file mode 100644
index 0000000000..76079295b2
--- /dev/null
+++ b/target/i386/kvm_i386.h
@@ -0,0 +1,48 @@
+/*
+ * QEMU KVM support -- x86 specific functions.
+ *
+ * Copyright (c) 2012 Linaro Limited
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_KVM_I386_H
+#define QEMU_KVM_I386_H
+
+#include "sysemu/kvm.h"
+
+#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel())
+
+bool kvm_allows_irq0_override(void);
+bool kvm_has_smm(void);
+void kvm_synchronize_all_tsc(void);
+void kvm_arch_reset_vcpu(X86CPU *cs);
+void kvm_arch_do_init_vcpu(X86CPU *cs);
+
+int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr,
+ uint32_t flags, uint32_t *dev_id);
+int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id);
+
+int kvm_device_intx_assign(KVMState *s, uint32_t dev_id,
+ bool use_host_msi, uint32_t guest_irq);
+int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked);
+int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi);
+
+int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq);
+int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id);
+
+bool kvm_device_msix_supported(KVMState *s);
+int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id,
+ uint32_t nr_vectors);
+int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector,
+ int virq);
+int kvm_device_msix_assign(KVMState *s, uint32_t dev_id);
+int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id);
+
+void kvm_put_apicbase(X86CPU *cpu, uint64_t value);
+
+bool kvm_enable_x2apic(void);
+bool kvm_has_x2apic_api(void);
+#endif
diff --git a/target/i386/machine.c b/target/i386/machine.c
new file mode 100644
index 0000000000..760f82b6c7
--- /dev/null
+++ b/target/i386/machine.c
@@ -0,0 +1,1047 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "hw/i386/pc.h"
+#include "hw/isa/isa.h"
+#include "migration/cpu.h"
+
+#include "sysemu/kvm.h"
+
+#include "qemu/error-report.h"
+
+static const VMStateDescription vmstate_segment = {
+ .name = "segment",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(selector, SegmentCache),
+ VMSTATE_UINTTL(base, SegmentCache),
+ VMSTATE_UINT32(limit, SegmentCache),
+ VMSTATE_UINT32(flags, SegmentCache),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_SEGMENT(_field, _state) { \
+ .name = (stringify(_field)), \
+ .size = sizeof(SegmentCache), \
+ .vmsd = &vmstate_segment, \
+ .flags = VMS_STRUCT, \
+ .offset = offsetof(_state, _field) \
+ + type_check(SegmentCache,typeof_field(_state, _field)) \
+}
+
+#define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
+
+static const VMStateDescription vmstate_xmm_reg = {
+ .name = "xmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_XMM_REGS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_xmm_reg, ZMMReg)
+
+/* YMMH format is the same as XMM, but for bits 128-255 */
+static const VMStateDescription vmstate_ymmh_reg = {
+ .name = "ymmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
+ vmstate_ymmh_reg, ZMMReg)
+
+static const VMStateDescription vmstate_zmmh_reg = {
+ .name = "zmmh_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_zmmh_reg, ZMMReg)
+
+#ifdef TARGET_X86_64
+static const VMStateDescription vmstate_hi16_zmm_reg = {
+ .name = "hi16_zmm_reg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
+ VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
+ VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
+ vmstate_hi16_zmm_reg, ZMMReg)
+#endif
+
+static const VMStateDescription vmstate_bnd_regs = {
+ .name = "bnd_regs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(lb, BNDReg),
+ VMSTATE_UINT64(ub, BNDReg),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_BND_REGS(_field, _state, _n) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
+
+static const VMStateDescription vmstate_mtrr_var = {
+ .name = "mtrr_var",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(base, MTRRVar),
+ VMSTATE_UINT64(mask, MTRRVar),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
+ VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
+
+static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
+{
+ fprintf(stderr, "call put_fpreg() with invalid arguments\n");
+ exit(0);
+}
+
+/* XXX: add that in a FPU generic layer */
+union x86_longdouble {
+ uint64_t mant;
+ uint16_t exp;
+};
+
+#define MANTD1(fp) (fp & ((1LL << 52) - 1))
+#define EXPBIAS1 1023
+#define EXPD1(fp) ((fp >> 52) & 0x7FF)
+#define SIGND1(fp) ((fp >> 32) & 0x80000000)
+
+static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
+{
+ int e;
+ /* mantissa */
+ p->mant = (MANTD1(temp) << 11) | (1LL << 63);
+ /* exponent + sign */
+ e = EXPD1(temp) - EXPBIAS1 + 16383;
+ e |= SIGND1(temp) >> 16;
+ p->exp = e;
+}
+
+static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
+{
+ FPReg *fp_reg = opaque;
+ uint64_t mant;
+ uint16_t exp;
+
+ qemu_get_be64s(f, &mant);
+ qemu_get_be16s(f, &exp);
+ fp_reg->d = cpu_set_fp80(mant, exp);
+ return 0;
+}
+
+static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
+{
+ FPReg *fp_reg = opaque;
+ uint64_t mant;
+ uint16_t exp;
+ /* we save the real CPU data (in case of MMX usage only 'mant'
+ contains the MMX register */
+ cpu_get_fp80(&mant, &exp, fp_reg->d);
+ qemu_put_be64s(f, &mant);
+ qemu_put_be16s(f, &exp);
+}
+
+static const VMStateInfo vmstate_fpreg = {
+ .name = "fpreg",
+ .get = get_fpreg,
+ .put = put_fpreg,
+};
+
+static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
+{
+ union x86_longdouble *p = opaque;
+ uint64_t mant;
+
+ qemu_get_be64s(f, &mant);
+ p->mant = mant;
+ p->exp = 0xffff;
+ return 0;
+}
+
+static const VMStateInfo vmstate_fpreg_1_mmx = {
+ .name = "fpreg_1_mmx",
+ .get = get_fpreg_1_mmx,
+ .put = put_fpreg_error,
+};
+
+static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
+{
+ union x86_longdouble *p = opaque;
+ uint64_t mant;
+
+ qemu_get_be64s(f, &mant);
+ fp64_to_fp80(p, mant);
+ return 0;
+}
+
+static const VMStateInfo vmstate_fpreg_1_no_mmx = {
+ .name = "fpreg_1_no_mmx",
+ .get = get_fpreg_1_no_mmx,
+ .put = put_fpreg_error,
+};
+
+static bool fpregs_is_0(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return (env->fpregs_format_vmstate == 0);
+}
+
+static bool fpregs_is_1_mmx(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int guess_mmx;
+
+ guess_mmx = ((env->fptag_vmstate == 0xff) &&
+ (env->fpus_vmstate & 0x3800) == 0);
+ return (guess_mmx && (env->fpregs_format_vmstate == 1));
+}
+
+static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int guess_mmx;
+
+ guess_mmx = ((env->fptag_vmstate == 0xff) &&
+ (env->fpus_vmstate & 0x3800) == 0);
+ return (!guess_mmx && (env->fpregs_format_vmstate == 1));
+}
+
+#define VMSTATE_FP_REGS(_field, _state, _n) \
+ VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
+ VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
+ VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
+
+static bool version_is_5(void *opaque, int version_id)
+{
+ return version_id == 5;
+}
+
+#ifdef TARGET_X86_64
+static bool less_than_7(void *opaque, int version_id)
+{
+ return version_id < 7;
+}
+
+static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
+{
+ uint64_t *v = pv;
+ *v = qemu_get_be32(f);
+ return 0;
+}
+
+static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
+{
+ uint64_t *v = pv;
+ qemu_put_be32(f, *v);
+}
+
+static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
+ .name = "uint64_as_uint32",
+ .get = get_uint64_as_uint32,
+ .put = put_uint64_as_uint32,
+};
+
+#define VMSTATE_HACK_UINT32(_f, _s, _t) \
+ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
+#endif
+
+static void cpu_pre_save(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ /* FPU */
+ env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ env->fptag_vmstate = 0;
+ for(i = 0; i < 8; i++) {
+ env->fptag_vmstate |= ((!env->fptags[i]) << i);
+ }
+
+ env->fpregs_format_vmstate = 0;
+
+ /*
+ * Real mode guest segments register DPL should be zero.
+ * Older KVM version were setting it wrongly.
+ * Fixing it will allow live migration to host with unrestricted guest
+ * support (otherwise the migration will fail with invalid guest state
+ * error).
+ */
+ if (!(env->cr[0] & CR0_PE_MASK) &&
+ (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
+ env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
+ env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
+ env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
+ env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
+ env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
+ env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
+ }
+
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->tsc_khz && env->user_tsc_khz &&
+ env->tsc_khz != env->user_tsc_khz) {
+ error_report("Mismatch between user-specified TSC frequency and "
+ "migrated TSC frequency");
+ return -EINVAL;
+ }
+
+ /*
+ * Real mode guest segments register DPL should be zero.
+ * Older KVM version were setting it wrongly.
+ * Fixing it will allow live migration from such host that don't have
+ * restricted guest support to a host with unrestricted guest support
+ * (otherwise the migration will fail with invalid guest state
+ * error).
+ */
+ if (!(env->cr[0] & CR0_PE_MASK) &&
+ (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
+ env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
+ env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
+ env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
+ env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
+ env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
+ env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
+ }
+
+ /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
+ * running under KVM. This is wrong for conforming code segments.
+ * Luckily, in our implementation the CPL field of hflags is redundant
+ * and we can get the right value from the SS descriptor privilege level.
+ */
+ env->hflags &= ~HF_CPL_MASK;
+ env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+
+ env->fpstt = (env->fpus_vmstate >> 11) & 7;
+ env->fpus = env->fpus_vmstate & ~0x3800;
+ env->fptag_vmstate ^= 0xff;
+ for(i = 0; i < 8; i++) {
+ env->fptags[i] = (env->fptag_vmstate >> i) & 1;
+ }
+ update_fp_status(env);
+
+ cpu_breakpoint_remove_all(cs, BP_CPU);
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+ {
+ /* Indicate all breakpoints disabled, as they are, then
+ let the helper re-enable them. */
+ target_ulong dr7 = env->dr[7];
+ env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
+ cpu_x86_update_dr7(env, dr7);
+ }
+ tlb_flush(cs, 1);
+
+ if (tcg_enabled()) {
+ cpu_smm_update(cpu);
+ }
+ return 0;
+}
+
+static bool async_pf_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.async_pf_en_msr != 0;
+}
+
+static bool pv_eoi_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.pv_eoi_en_msr != 0;
+}
+
+static bool steal_time_msr_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+
+ return cpu->env.steal_time_msr != 0;
+}
+
+static const VMStateDescription vmstate_steal_time_msr = {
+ .name = "cpu/steal_time_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = steal_time_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.steal_time_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_async_pf_msr = {
+ .name = "cpu/async_pf_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = async_pf_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_pv_eoi_msr = {
+ .name = "cpu/async_pv_eoi_msr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pv_eoi_msr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool fpop_ip_dp_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
+}
+
+static const VMStateDescription vmstate_fpop_ip_dp = {
+ .name = "cpu/fpop_ip_dp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpop_ip_dp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT16(env.fpop, X86CPU),
+ VMSTATE_UINT64(env.fpip, X86CPU),
+ VMSTATE_UINT64(env.fpdp, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool tsc_adjust_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->tsc_adjust != 0;
+}
+
+static const VMStateDescription vmstate_msr_tsc_adjust = {
+ .name = "cpu/msr_tsc_adjust",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tsc_adjust_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.tsc_adjust, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool tscdeadline_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->tsc_deadline != 0;
+}
+
+static const VMStateDescription vmstate_msr_tscdeadline = {
+ .name = "cpu/msr_tscdeadline",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tscdeadline_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.tsc_deadline, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool misc_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
+}
+
+static bool feature_control_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_ia32_feature_control != 0;
+}
+
+static const VMStateDescription vmstate_msr_ia32_misc_enable = {
+ .name = "cpu/msr_ia32_misc_enable",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = misc_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_msr_ia32_feature_control = {
+ .name = "cpu/msr_ia32_feature_control",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = feature_control_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool pmu_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
+ env->msr_global_status || env->msr_global_ovf_ctrl) {
+ return true;
+ }
+ for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
+ if (env->msr_fixed_counters[i]) {
+ return true;
+ }
+ }
+ for (i = 0; i < MAX_GP_COUNTERS; i++) {
+ if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_architectural_pmu = {
+ .name = "cpu/msr_architectural_pmu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pmu_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
+ VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
+ VMSTATE_UINT64(env.msr_global_status, X86CPU),
+ VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
+ VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
+ VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool mpx_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
+ return true;
+ }
+ }
+
+ if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
+ return true;
+ }
+
+ return !!env->msr_bndcfgs;
+}
+
+static const VMStateDescription vmstate_mpx = {
+ .name = "cpu/mpx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = mpx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
+ VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
+ VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
+ VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_hypercall_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
+}
+
+static const VMStateDescription vmstate_msr_hypercall_hypercall = {
+ .name = "cpu/msr_hyperv_hypercall",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_hypercall_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_vapic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_vapic != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_vapic = {
+ .name = "cpu/msr_hyperv_vapic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_vapic_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_time_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_tsc != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_time = {
+ .name = "cpu/msr_hyperv_time",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_time_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_crash_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
+ if (env->msr_hv_crash_params[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_crash = {
+ .name = "cpu/msr_hyperv_crash",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_crash_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
+ X86CPU, HV_X64_MSR_CRASH_PARAMS),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_runtime_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ if (!cpu->hyperv_runtime) {
+ return false;
+ }
+
+ return env->msr_hv_runtime != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_runtime = {
+ .name = "cpu/msr_hyperv_runtime",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_runtime_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_synic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_hv_synic_control != 0 ||
+ env->msr_hv_synic_evt_page != 0 ||
+ env->msr_hv_synic_msg_page != 0) {
+ return true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ if (env->msr_hv_synic_sint[i] != 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+ .name = "cpu/msr_hyperv_synic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_synic_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
+ HV_SYNIC_SINT_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool hyperv_stimer_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
+ if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_stimer = {
+ .name = "cpu/msr_hyperv_stimer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_stimer_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
+ X86CPU, HV_SYNIC_STIMER_COUNT),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
+ X86CPU, HV_SYNIC_STIMER_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool avx512_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ unsigned int i;
+
+ for (i = 0; i < NB_OPMASK_REGS; i++) {
+ if (env->opmask_regs[i]) {
+ return true;
+ }
+ }
+
+ for (i = 0; i < CPU_NB_REGS; i++) {
+#define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
+ if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
+ ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
+ return true;
+ }
+#ifdef TARGET_X86_64
+ if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
+ ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
+ ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
+ ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
+ return true;
+ }
+#endif
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_avx512 = {
+ .name = "cpu/avx512",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = avx512_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
+ VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
+#ifdef TARGET_X86_64
+ VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
+#endif
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool xss_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->xss != 0;
+}
+
+static const VMStateDescription vmstate_xss = {
+ .name = "cpu/xss",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = xss_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.xss, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#ifdef TARGET_X86_64
+static bool pkru_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->pkru != 0;
+}
+
+static const VMStateDescription vmstate_pkru = {
+ .name = "cpu/pkru",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pkru_needed,
+ .fields = (VMStateField[]){
+ VMSTATE_UINT32(env.pkru, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
+static bool tsc_khz_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
+ PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
+ return env->tsc_khz && pcmc->save_tsc_khz;
+}
+
+static const VMStateDescription vmstate_tsc_khz = {
+ .name = "cpu/tsc_khz",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tsc_khz_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(env.tsc_khz, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool mcg_ext_ctl_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ return cpu->enable_lmce && env->mcg_ext_ctl;
+}
+
+static const VMStateDescription vmstate_mcg_ext_ctl = {
+ .name = "cpu/mcg_ext_ctl",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = mcg_ext_ctl_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+VMStateDescription vmstate_x86_cpu = {
+ .name = "cpu",
+ .version_id = 12,
+ .minimum_version_id = 3,
+ .pre_save = cpu_pre_save,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
+ VMSTATE_UINTTL(env.eip, X86CPU),
+ VMSTATE_UINTTL(env.eflags, X86CPU),
+ VMSTATE_UINT32(env.hflags, X86CPU),
+ /* FPU */
+ VMSTATE_UINT16(env.fpuc, X86CPU),
+ VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
+ VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
+ VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
+ VMSTATE_FP_REGS(env.fpregs, X86CPU, 8),
+
+ VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
+ VMSTATE_SEGMENT(env.ldt, X86CPU),
+ VMSTATE_SEGMENT(env.tr, X86CPU),
+ VMSTATE_SEGMENT(env.gdt, X86CPU),
+ VMSTATE_SEGMENT(env.idt, X86CPU),
+
+ VMSTATE_UINT32(env.sysenter_cs, X86CPU),
+#ifdef TARGET_X86_64
+ /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
+ VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7),
+ VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7),
+ VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7),
+ VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7),
+#else
+ VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
+ VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
+#endif
+
+ VMSTATE_UINTTL(env.cr[0], X86CPU),
+ VMSTATE_UINTTL(env.cr[2], X86CPU),
+ VMSTATE_UINTTL(env.cr[3], X86CPU),
+ VMSTATE_UINTTL(env.cr[4], X86CPU),
+ VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
+ /* MMU */
+ VMSTATE_INT32(env.a20_mask, X86CPU),
+ /* XMM */
+ VMSTATE_UINT32(env.mxcsr, X86CPU),
+ VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
+
+#ifdef TARGET_X86_64
+ VMSTATE_UINT64(env.efer, X86CPU),
+ VMSTATE_UINT64(env.star, X86CPU),
+ VMSTATE_UINT64(env.lstar, X86CPU),
+ VMSTATE_UINT64(env.cstar, X86CPU),
+ VMSTATE_UINT64(env.fmask, X86CPU),
+ VMSTATE_UINT64(env.kernelgsbase, X86CPU),
+#endif
+ VMSTATE_UINT32_V(env.smbase, X86CPU, 4),
+
+ VMSTATE_UINT64_V(env.pat, X86CPU, 5),
+ VMSTATE_UINT32_V(env.hflags2, X86CPU, 5),
+
+ VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5),
+ VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5),
+ VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5),
+ VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5),
+ VMSTATE_UINT64_V(env.intercept, X86CPU, 5),
+ VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5),
+ VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5),
+ VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5),
+ VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5),
+ VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5),
+ VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5),
+ /* MTRRs */
+ VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
+ VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
+ VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
+ /* KVM-related states */
+ VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
+ VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
+ VMSTATE_UINT64_V(env.tsc, X86CPU, 9),
+ VMSTATE_INT32_V(env.exception_injected, X86CPU, 11),
+ VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11),
+ VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11),
+ VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11),
+ VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11),
+ VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11),
+ /* MCE */
+ VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10),
+ VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10),
+ VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10),
+ VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10),
+ /* rdtscp */
+ VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11),
+ /* KVM pvclock msr */
+ VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11),
+ VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11),
+ /* XSAVE related fields */
+ VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
+ VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
+ VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
+ VMSTATE_END_OF_LIST()
+ /* The above list is not sorted /wrt version numbers, watch out! */
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_async_pf_msr,
+ &vmstate_pv_eoi_msr,
+ &vmstate_steal_time_msr,
+ &vmstate_fpop_ip_dp,
+ &vmstate_msr_tsc_adjust,
+ &vmstate_msr_tscdeadline,
+ &vmstate_msr_ia32_misc_enable,
+ &vmstate_msr_ia32_feature_control,
+ &vmstate_msr_architectural_pmu,
+ &vmstate_mpx,
+ &vmstate_msr_hypercall_hypercall,
+ &vmstate_msr_hyperv_vapic,
+ &vmstate_msr_hyperv_time,
+ &vmstate_msr_hyperv_crash,
+ &vmstate_msr_hyperv_runtime,
+ &vmstate_msr_hyperv_synic,
+ &vmstate_msr_hyperv_stimer,
+ &vmstate_avx512,
+ &vmstate_xss,
+ &vmstate_tsc_khz,
+#ifdef TARGET_X86_64
+ &vmstate_pkru,
+#endif
+ &vmstate_mcg_ext_ctl,
+ NULL
+ }
+};
diff --git a/target/i386/mem_helper.c b/target/i386/mem_helper.c
new file mode 100644
index 0000000000..70f67668ab
--- /dev/null
+++ b/target/i386/mem_helper.c
@@ -0,0 +1,215 @@
+/*
+ * x86 memory access helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/int128.h"
+#include "tcg.h"
+
+void helper_cmpxchg8b_unlocked(CPUX86State *env, target_ulong a0)
+{
+ uintptr_t ra = GETPC();
+ uint64_t oldv, cmpv, newv;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+
+ cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
+ newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
+
+ oldv = cpu_ldq_data_ra(env, a0, ra);
+ newv = (cmpv == oldv ? newv : oldv);
+ /* always do the store */
+ cpu_stq_data_ra(env, a0, newv, ra);
+
+ if (oldv == cmpv) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = (uint32_t)oldv;
+ env->regs[R_EDX] = (uint32_t)(oldv >> 32);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+
+void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
+{
+#ifdef CONFIG_ATOMIC64
+ uint64_t oldv, cmpv, newv;
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+
+ cmpv = deposit64(env->regs[R_EAX], 32, 32, env->regs[R_EDX]);
+ newv = deposit64(env->regs[R_EBX], 32, 32, env->regs[R_ECX]);
+
+#ifdef CONFIG_USER_ONLY
+ {
+ uint64_t *haddr = g2h(a0);
+ cmpv = cpu_to_le64(cmpv);
+ newv = cpu_to_le64(newv);
+ oldv = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
+ oldv = le64_to_cpu(oldv);
+ }
+#else
+ {
+ uintptr_t ra = GETPC();
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx);
+ oldv = helper_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra);
+ }
+#endif
+
+ if (oldv == cmpv) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = (uint32_t)oldv;
+ env->regs[R_EDX] = (uint32_t)(oldv >> 32);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+#else
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC());
+#endif /* CONFIG_ATOMIC64 */
+}
+
+#ifdef TARGET_X86_64
+void helper_cmpxchg16b_unlocked(CPUX86State *env, target_ulong a0)
+{
+ uintptr_t ra = GETPC();
+ Int128 oldv, cmpv, newv;
+ uint64_t o0, o1;
+ int eflags;
+ bool success;
+
+ if ((a0 & 0xf) != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ eflags = cpu_cc_compute_all(env, CC_OP);
+
+ cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
+ newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
+
+ o0 = cpu_ldq_data_ra(env, a0 + 0, ra);
+ o1 = cpu_ldq_data_ra(env, a0 + 8, ra);
+
+ oldv = int128_make128(o0, o1);
+ success = int128_eq(oldv, cmpv);
+ if (!success) {
+ newv = oldv;
+ }
+
+ cpu_stq_data_ra(env, a0 + 0, int128_getlo(newv), ra);
+ cpu_stq_data_ra(env, a0 + 8, int128_gethi(newv), ra);
+
+ if (success) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = int128_getlo(oldv);
+ env->regs[R_EDX] = int128_gethi(oldv);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+
+void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
+{
+ uintptr_t ra = GETPC();
+
+ if ((a0 & 0xf) != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ } else {
+#ifndef CONFIG_ATOMIC128
+ cpu_loop_exit_atomic(ENV_GET_CPU(env), ra);
+#else
+ int eflags = cpu_cc_compute_all(env, CC_OP);
+
+ Int128 cmpv = int128_make128(env->regs[R_EAX], env->regs[R_EDX]);
+ Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]);
+
+ int mem_idx = cpu_mmu_index(env, false);
+ TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx);
+ Int128 oldv = helper_atomic_cmpxchgo_le_mmu(env, a0, cmpv,
+ newv, oi, ra);
+
+ if (int128_eq(oldv, cmpv)) {
+ eflags |= CC_Z;
+ } else {
+ env->regs[R_EAX] = int128_getlo(oldv);
+ env->regs[R_EDX] = int128_gethi(oldv);
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+#endif
+ }
+}
+#endif
+
+void helper_boundw(CPUX86State *env, target_ulong a0, int v)
+{
+ int low, high;
+
+ low = cpu_ldsw_data_ra(env, a0, GETPC());
+ high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
+ v = (int16_t)v;
+ if (v < low || v > high) {
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+void helper_boundl(CPUX86State *env, target_ulong a0, int v)
+{
+ int low, high;
+
+ low = cpu_ldl_data_ra(env, a0, GETPC());
+ high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
+ if (v < low || v > high) {
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* try to fill the TLB and return an exception if error. If retaddr is
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = x86_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (ret) {
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr);
+ }
+}
+#endif
diff --git a/target/i386/misc_helper.c b/target/i386/misc_helper.c
new file mode 100644
index 0000000000..3f666b4b87
--- /dev/null
+++ b/target/i386/misc_helper.c
@@ -0,0 +1,639 @@
+/*
+ * x86 misc helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/address-spaces.h"
+
+void helper_outb(CPUX86State *env, uint32_t port, uint32_t data)
+{
+#ifdef CONFIG_USER_ONLY
+ fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
+#else
+ address_space_stb(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+target_ulong helper_inb(CPUX86State *env, uint32_t port)
+{
+#ifdef CONFIG_USER_ONLY
+ fprintf(stderr, "inb: port=0x%04x\n", port);
+ return 0;
+#else
+ return address_space_ldub(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+void helper_outw(CPUX86State *env, uint32_t port, uint32_t data)
+{
+#ifdef CONFIG_USER_ONLY
+ fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
+#else
+ address_space_stw(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+target_ulong helper_inw(CPUX86State *env, uint32_t port)
+{
+#ifdef CONFIG_USER_ONLY
+ fprintf(stderr, "inw: port=0x%04x\n", port);
+ return 0;
+#else
+ return address_space_lduw(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+void helper_outl(CPUX86State *env, uint32_t port, uint32_t data)
+{
+#ifdef CONFIG_USER_ONLY
+ fprintf(stderr, "outw: port=0x%04x, data=%08x\n", port, data);
+#else
+ address_space_stl(&address_space_io, port, data,
+ cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+target_ulong helper_inl(CPUX86State *env, uint32_t port)
+{
+#ifdef CONFIG_USER_ONLY
+ fprintf(stderr, "inl: port=0x%04x\n", port);
+ return 0;
+#else
+ return address_space_ldl(&address_space_io, port,
+ cpu_get_mem_attrs(env), NULL);
+#endif
+}
+
+void helper_into(CPUX86State *env, int next_eip_addend)
+{
+ int eflags;
+
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if (eflags & CC_O) {
+ raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
+ }
+}
+
+void helper_cpuid(CPUX86State *env)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
+
+ cpu_x86_cpuid(env, (uint32_t)env->regs[R_EAX], (uint32_t)env->regs[R_ECX],
+ &eax, &ebx, &ecx, &edx);
+ env->regs[R_EAX] = eax;
+ env->regs[R_EBX] = ebx;
+ env->regs[R_ECX] = ecx;
+ env->regs[R_EDX] = edx;
+}
+
+#if defined(CONFIG_USER_ONLY)
+target_ulong helper_read_crN(CPUX86State *env, int reg)
+{
+ return 0;
+}
+
+void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
+{
+}
+#else
+target_ulong helper_read_crN(CPUX86State *env, int reg)
+{
+ target_ulong val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
+ switch (reg) {
+ default:
+ val = env->cr[reg];
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ val = cpu_get_apic_tpr(x86_env_get_cpu(env)->apic_state);
+ } else {
+ val = env->v_tpr;
+ }
+ break;
+ }
+ return val;
+}
+
+void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
+ switch (reg) {
+ case 0:
+ cpu_x86_update_cr0(env, t0);
+ break;
+ case 3:
+ cpu_x86_update_cr3(env, t0);
+ break;
+ case 4:
+ cpu_x86_update_cr4(env, t0);
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ cpu_set_apic_tpr(x86_env_get_cpu(env)->apic_state, t0);
+ }
+ env->v_tpr = t0 & 0x0f;
+ break;
+ default:
+ env->cr[reg] = t0;
+ break;
+ }
+}
+#endif
+
+void helper_lmsw(CPUX86State *env, target_ulong t0)
+{
+ /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
+ if already set to one. */
+ t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
+ helper_write_crN(env, 0, t0);
+}
+
+void helper_invlpg(CPUX86State *env, target_ulong addr)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
+ tlb_flush_page(CPU(cpu), addr);
+}
+
+void helper_rdtsc(CPUX86State *env)
+{
+ uint64_t val;
+
+ if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
+
+ val = cpu_get_tsc(env) + env->tsc_offset;
+ env->regs[R_EAX] = (uint32_t)(val);
+ env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+
+void helper_rdtscp(CPUX86State *env)
+{
+ helper_rdtsc(env);
+ env->regs[R_ECX] = (uint32_t)(env->tsc_aux);
+}
+
+void helper_rdpmc(CPUX86State *env)
+{
+ if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
+
+ /* currently unimplemented */
+ qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
+ raise_exception_err(env, EXCP06_ILLOP, 0);
+}
+
+#if defined(CONFIG_USER_ONLY)
+void helper_wrmsr(CPUX86State *env)
+{
+}
+
+void helper_rdmsr(CPUX86State *env)
+{
+}
+#else
+void helper_wrmsr(CPUX86State *env)
+{
+ uint64_t val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
+
+ val = ((uint32_t)env->regs[R_EAX]) |
+ ((uint64_t)((uint32_t)env->regs[R_EDX]) << 32);
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = val & 0xffff;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = val;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = val;
+ break;
+ case MSR_IA32_APICBASE:
+ cpu_set_apic_base(x86_env_get_cpu(env)->apic_state, val);
+ break;
+ case MSR_EFER:
+ {
+ uint64_t update_mask;
+
+ update_mask = 0;
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_SYSCALL) {
+ update_mask |= MSR_EFER_SCE;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
+ update_mask |= MSR_EFER_LME;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_NX) {
+ update_mask |= MSR_EFER_NXE;
+ }
+ if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
+ update_mask |= MSR_EFER_SVME;
+ }
+ if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ cpu_load_efer(env, (env->efer & ~update_mask) |
+ (val & update_mask));
+ }
+ break;
+ case MSR_STAR:
+ env->star = val;
+ break;
+ case MSR_PAT:
+ env->pat = val;
+ break;
+ case MSR_VM_HSAVE_PA:
+ env->vm_hsave = val;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ env->lstar = val;
+ break;
+ case MSR_CSTAR:
+ env->cstar = val;
+ break;
+ case MSR_FMASK:
+ env->fmask = val;
+ break;
+ case MSR_FSBASE:
+ env->segs[R_FS].base = val;
+ break;
+ case MSR_GSBASE:
+ env->segs[R_GS].base = val;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = val;
+ break;
+#endif
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysBase(0)) / 2].base = val;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysMask(0)) / 2].mask = val;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix64K_00000] = val;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix16K_80000 + 1] = val;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix4K_C0000 + 3] = val;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = val;
+ break;
+ case MSR_MCG_STATUS:
+ env->mcg_status = val;
+ break;
+ case MSR_MCG_CTL:
+ if ((env->mcg_cap & MCG_CTL_P)
+ && (val == 0 || val == ~(uint64_t)0)) {
+ env->mcg_ctl = val;
+ }
+ break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = val;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = val;
+ break;
+ case MSR_IA32_BNDCFGS:
+ /* FIXME: #GP if reserved bits are set. */
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->msr_bndcfgs = val;
+ cpu_sync_bndcs_hflags(env);
+ break;
+ default:
+ if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+ && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+ (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+ if ((offset & 0x3) != 0
+ || (val == 0 || val == ~(uint64_t)0)) {
+ env->mce_banks[offset] = val;
+ }
+ break;
+ }
+ /* XXX: exception? */
+ break;
+ }
+}
+
+void helper_rdmsr(CPUX86State *env)
+{
+ uint64_t val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case MSR_IA32_SYSENTER_CS:
+ val = env->sysenter_cs;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ val = env->sysenter_esp;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ val = env->sysenter_eip;
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(x86_env_get_cpu(env)->apic_state);
+ break;
+ case MSR_EFER:
+ val = env->efer;
+ break;
+ case MSR_STAR:
+ val = env->star;
+ break;
+ case MSR_PAT:
+ val = env->pat;
+ break;
+ case MSR_VM_HSAVE_PA:
+ val = env->vm_hsave;
+ break;
+ case MSR_IA32_PERF_STATUS:
+ /* tsc_increment_by_tick */
+ val = 1000ULL;
+ /* CPU multiplier */
+ val |= (((uint64_t)4ULL) << 40);
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ val = env->lstar;
+ break;
+ case MSR_CSTAR:
+ val = env->cstar;
+ break;
+ case MSR_FMASK:
+ val = env->fmask;
+ break;
+ case MSR_FSBASE:
+ val = env->segs[R_FS].base;
+ break;
+ case MSR_GSBASE:
+ val = env->segs[R_GS].base;
+ break;
+ case MSR_KERNELGSBASE:
+ val = env->kernelgsbase;
+ break;
+ case MSR_TSC_AUX:
+ val = env->tsc_aux;
+ break;
+#endif
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[((uint32_t)env->regs[R_ECX] -
+ MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[(uint32_t)env->regs[R_ECX] -
+ MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ case MSR_MTRRcap:
+ if (env->features[FEAT_1_EDX] & CPUID_MTRR) {
+ val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
+ MSR_MTRRcap_WC_SUPPORTED;
+ } else {
+ /* XXX: exception? */
+ val = 0;
+ }
+ break;
+ case MSR_MCG_CAP:
+ val = env->mcg_cap;
+ break;
+ case MSR_MCG_CTL:
+ if (env->mcg_cap & MCG_CTL_P) {
+ val = env->mcg_ctl;
+ } else {
+ val = 0;
+ }
+ break;
+ case MSR_MCG_STATUS:
+ val = env->mcg_status;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ case MSR_IA32_BNDCFGS:
+ val = env->msr_bndcfgs;
+ break;
+ default:
+ if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
+ && (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
+ (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)env->regs[R_ECX] - MSR_MC0_CTL;
+ val = env->mce_banks[offset];
+ break;
+ }
+ /* XXX: exception? */
+ val = 0;
+ break;
+ }
+ env->regs[R_EAX] = (uint32_t)(val);
+ env->regs[R_EDX] = (uint32_t)(val >> 32);
+}
+#endif
+
+static void do_pause(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ /* Just let another CPU run. */
+ cs->exception_index = EXCP_INTERRUPT;
+ cpu_loop_exit(cs);
+}
+
+static void do_hlt(X86CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
+
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void helper_hlt(CPUX86State *env, int next_eip_addend)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
+ env->eip += next_eip_addend;
+
+ do_hlt(cpu);
+}
+
+void helper_monitor(CPUX86State *env, target_ulong ptr)
+{
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ /* XXX: store address? */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
+}
+
+void helper_mwait(CPUX86State *env, int next_eip_addend)
+{
+ CPUState *cs;
+ X86CPU *cpu;
+
+ if ((uint32_t)env->regs[R_ECX] != 0) {
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
+ env->eip += next_eip_addend;
+
+ cpu = x86_env_get_cpu(env);
+ cs = CPU(cpu);
+ /* XXX: not complete but not completely erroneous */
+ if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) {
+ do_pause(cpu);
+ } else {
+ do_hlt(cpu);
+ }
+}
+
+void helper_pause(CPUX86State *env, int next_eip_addend)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0);
+ env->eip += next_eip_addend;
+
+ do_pause(cpu);
+}
+
+void helper_debug(CPUX86State *env)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx)
+{
+ if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ if (ecx != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ return env->pkru;
+}
+
+void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ if ((env->cr[4] & CR4_PKE_MASK) == 0) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ if (ecx != 0 || (val & 0xFFFFFFFF00000000ull)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ env->pkru = val;
+ tlb_flush(cs, 1);
+}
diff --git a/target/i386/monitor.c b/target/i386/monitor.c
new file mode 100644
index 0000000000..9a3b4d746e
--- /dev/null
+++ b/target/i386/monitor.c
@@ -0,0 +1,513 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hw/i386/pc.h"
+#include "sysemu/kvm.h"
+#include "hmp.h"
+
+
+static void print_pte(Monitor *mon, hwaddr addr,
+ hwaddr pte,
+ hwaddr mask)
+{
+#ifdef TARGET_X86_64
+ if (addr & (1ULL << 47)) {
+ addr |= -1LL << 48;
+ }
+#endif
+ monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
+ " %c%c%c%c%c%c%c%c%c\n",
+ addr,
+ pte & mask,
+ pte & PG_NX_MASK ? 'X' : '-',
+ pte & PG_GLOBAL_MASK ? 'G' : '-',
+ pte & PG_PSE_MASK ? 'P' : '-',
+ pte & PG_DIRTY_MASK ? 'D' : '-',
+ pte & PG_ACCESSED_MASK ? 'A' : '-',
+ pte & PG_PCD_MASK ? 'C' : '-',
+ pte & PG_PWT_MASK ? 'T' : '-',
+ pte & PG_USER_MASK ? 'U' : '-',
+ pte & PG_RW_MASK ? 'W' : '-');
+}
+
+static void tlb_info_32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2;
+ uint32_t pgd, pde, pte;
+
+ pgd = env->cr[3] & ~0xfff;
+ for(l1 = 0; l1 < 1024; l1++) {
+ cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+ pde = le32_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ /* 4M pages */
+ print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1));
+ } else {
+ for(l2 = 0; l2 < 1024; l2++) {
+ cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+ pte = le32_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, (l1 << 22) + (l2 << 12),
+ pte & ~PG_PSE_MASK,
+ ~0xfff);
+ }
+ }
+ }
+ }
+ }
+}
+
+static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2, l3;
+ uint64_t pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+
+ pdp_addr = env->cr[3] & ~0x1f;
+ for (l1 = 0; l1 < 4; l1++) {
+ cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ if (pdpe & PG_PRESENT_MASK) {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ /* 2M pages with PAE, CR4.PSE is ignored */
+ print_pte(mon, (l1 << 30 ) + (l2 << 21), pde,
+ ~((hwaddr)(1 << 20) - 1));
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, (l1 << 30 ) + (l2 << 21)
+ + (l3 << 12),
+ pte & ~PG_PSE_MASK,
+ ~(hwaddr)0xfff);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+#ifdef TARGET_X86_64
+static void tlb_info_64(Monitor *mon, CPUArchState *env)
+{
+ uint64_t l1, l2, l3, l4;
+ uint64_t pml4e, pdpe, pde, pte;
+ uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr;
+
+ pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ if (pml4e & PG_PRESENT_MASK) {
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ if (pdpe & PG_PRESENT_MASK) {
+ if (pdpe & PG_PSE_MASK) {
+ /* 1G pages, CR4.PSE is ignored */
+ print_pte(mon, (l1 << 39) + (l2 << 30), pdpe,
+ 0x3ffffc0000000ULL);
+ } else {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ /* 2M pages, CR4.PSE is ignored */
+ print_pte(mon, (l1 << 39) + (l2 << 30) +
+ (l3 << 21), pde,
+ 0x3ffffffe00000ULL);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr
+ + l4 * 8,
+ &pte, 8);
+ pte = le64_to_cpu(pte);
+ if (pte & PG_PRESENT_MASK) {
+ print_pte(mon, (l1 << 39) +
+ (l2 << 30) +
+ (l3 << 21) + (l4 << 12),
+ pte & ~PG_PSE_MASK,
+ 0x3fffffffff000ULL);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env();
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ monitor_printf(mon, "PG disabled\n");
+ return;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ tlb_info_64(mon, env);
+ } else
+#endif
+ {
+ tlb_info_pae32(mon, env);
+ }
+ } else {
+ tlb_info_32(mon, env);
+ }
+}
+
+static void mem_print(Monitor *mon, hwaddr *pstart,
+ int *plast_prot,
+ hwaddr end, int prot)
+{
+ int prot1;
+ prot1 = *plast_prot;
+ if (prot != prot1) {
+ if (*pstart != -1) {
+ monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
+ TARGET_FMT_plx " %c%c%c\n",
+ *pstart, end, end - *pstart,
+ prot1 & PG_USER_MASK ? 'u' : '-',
+ 'r',
+ prot1 & PG_RW_MASK ? 'w' : '-');
+ }
+ if (prot != 0)
+ *pstart = end;
+ else
+ *pstart = -1;
+ *plast_prot = prot;
+ }
+}
+
+static void mem_info_32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2;
+ int prot, last_prot;
+ uint32_t pgd, pde, pte;
+ hwaddr start, end;
+
+ pgd = env->cr[3] & ~0xfff;
+ last_prot = 0;
+ start = -1;
+ for(l1 = 0; l1 < 1024; l1++) {
+ cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
+ pde = le32_to_cpu(pde);
+ end = l1 << 22;
+ if (pde & PG_PRESENT_MASK) {
+ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ for(l2 = 0; l2 < 1024; l2++) {
+ cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
+ pte = le32_to_cpu(pte);
+ end = (l1 << 22) + (l2 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & pde &
+ (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+static void mem_info_pae32(Monitor *mon, CPUArchState *env)
+{
+ unsigned int l1, l2, l3;
+ int prot, last_prot;
+ uint64_t pdpe, pde, pte;
+ uint64_t pdp_addr, pd_addr, pt_addr;
+ hwaddr start, end;
+
+ pdp_addr = env->cr[3] & ~0x1f;
+ last_prot = 0;
+ start = -1;
+ for (l1 = 0; l1 < 4; l1++) {
+ cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = l1 << 30;
+ if (pdpe & PG_PRESENT_MASK) {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l1 << 30) + (l2 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l1 << 30) + (l2 << 21) + (l3 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
+}
+
+
+#ifdef TARGET_X86_64
+static void mem_info_64(Monitor *mon, CPUArchState *env)
+{
+ int prot, last_prot;
+ uint64_t l1, l2, l3, l4;
+ uint64_t pml4e, pdpe, pde, pte;
+ uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
+
+ pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
+ last_prot = 0;
+ start = -1;
+ for (l1 = 0; l1 < 512; l1++) {
+ cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
+ pml4e = le64_to_cpu(pml4e);
+ end = l1 << 39;
+ if (pml4e & PG_PRESENT_MASK) {
+ pdp_addr = pml4e & 0x3fffffffff000ULL;
+ for (l2 = 0; l2 < 512; l2++) {
+ cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
+ pdpe = le64_to_cpu(pdpe);
+ end = (l1 << 39) + (l2 << 30);
+ if (pdpe & PG_PRESENT_MASK) {
+ if (pdpe & PG_PSE_MASK) {
+ prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e;
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ pd_addr = pdpe & 0x3fffffffff000ULL;
+ for (l3 = 0; l3 < 512; l3++) {
+ cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
+ pde = le64_to_cpu(pde);
+ end = (l1 << 39) + (l2 << 30) + (l3 << 21);
+ if (pde & PG_PRESENT_MASK) {
+ if (pde & PG_PSE_MASK) {
+ prot = pde & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e & pdpe;
+ mem_print(mon, &start, &last_prot, end, prot);
+ } else {
+ pt_addr = pde & 0x3fffffffff000ULL;
+ for (l4 = 0; l4 < 512; l4++) {
+ cpu_physical_memory_read(pt_addr
+ + l4 * 8,
+ &pte, 8);
+ pte = le64_to_cpu(pte);
+ end = (l1 << 39) + (l2 << 30) +
+ (l3 << 21) + (l4 << 12);
+ if (pte & PG_PRESENT_MASK) {
+ prot = pte & (PG_USER_MASK | PG_RW_MASK |
+ PG_PRESENT_MASK);
+ prot &= pml4e & pdpe & pde;
+ } else {
+ prot = 0;
+ }
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ } else {
+ prot = 0;
+ mem_print(mon, &start, &last_prot, end, prot);
+ }
+ }
+ /* Flush last range */
+ mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
+}
+#endif /* TARGET_X86_64 */
+
+void hmp_info_mem(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env;
+
+ env = mon_get_cpu_env();
+
+ if (!(env->cr[0] & CR0_PG_MASK)) {
+ monitor_printf(mon, "PG disabled\n");
+ return;
+ }
+ if (env->cr[4] & CR4_PAE_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ mem_info_64(mon, env);
+ } else
+#endif
+ {
+ mem_info_pae32(mon, env);
+ }
+ } else {
+ mem_info_32(mon, env);
+ }
+}
+
+void hmp_mce(Monitor *mon, const QDict *qdict)
+{
+ X86CPU *cpu;
+ CPUState *cs;
+ int cpu_index = qdict_get_int(qdict, "cpu_index");
+ int bank = qdict_get_int(qdict, "bank");
+ uint64_t status = qdict_get_int(qdict, "status");
+ uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
+ uint64_t addr = qdict_get_int(qdict, "addr");
+ uint64_t misc = qdict_get_int(qdict, "misc");
+ int flags = MCE_INJECT_UNCOND_AO;
+
+ if (qdict_get_try_bool(qdict, "broadcast", false)) {
+ flags |= MCE_INJECT_BROADCAST;
+ }
+ cs = qemu_get_cpu(cpu_index);
+ if (cs != NULL) {
+ cpu = X86_CPU(cs);
+ cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
+ flags);
+ }
+}
+
+static target_long monitor_get_pc(const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return env->eip + env->segs[R_CS].base;
+}
+
+const MonitorDef monitor_defs[] = {
+#define SEG(name, seg) \
+ { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
+ { name ".base", offsetof(CPUX86State, segs[seg].base) },\
+ { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
+
+ { "eax", offsetof(CPUX86State, regs[0]) },
+ { "ecx", offsetof(CPUX86State, regs[1]) },
+ { "edx", offsetof(CPUX86State, regs[2]) },
+ { "ebx", offsetof(CPUX86State, regs[3]) },
+ { "esp|sp", offsetof(CPUX86State, regs[4]) },
+ { "ebp|fp", offsetof(CPUX86State, regs[5]) },
+ { "esi", offsetof(CPUX86State, regs[6]) },
+ { "edi", offsetof(CPUX86State, regs[7]) },
+#ifdef TARGET_X86_64
+ { "r8", offsetof(CPUX86State, regs[8]) },
+ { "r9", offsetof(CPUX86State, regs[9]) },
+ { "r10", offsetof(CPUX86State, regs[10]) },
+ { "r11", offsetof(CPUX86State, regs[11]) },
+ { "r12", offsetof(CPUX86State, regs[12]) },
+ { "r13", offsetof(CPUX86State, regs[13]) },
+ { "r14", offsetof(CPUX86State, regs[14]) },
+ { "r15", offsetof(CPUX86State, regs[15]) },
+#endif
+ { "eflags", offsetof(CPUX86State, eflags) },
+ { "eip", offsetof(CPUX86State, eip) },
+ SEG("cs", R_CS)
+ SEG("ds", R_DS)
+ SEG("es", R_ES)
+ SEG("ss", R_SS)
+ SEG("fs", R_FS)
+ SEG("gs", R_GS)
+ { "pc", 0, monitor_get_pc, },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
+
+void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
+{
+ x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon, monitor_fprintf,
+ CPU_DUMP_FPU);
+}
+
+void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
+{
+ if (kvm_irqchip_in_kernel() &&
+ !kvm_irqchip_is_split()) {
+ kvm_ioapic_dump_state(mon, qdict);
+ } else {
+ ioapic_dump_state(mon, qdict);
+ }
+}
diff --git a/target/i386/mpx_helper.c b/target/i386/mpx_helper.c
new file mode 100644
index 0000000000..7e44820659
--- /dev/null
+++ b/target/i386/mpx_helper.c
@@ -0,0 +1,168 @@
+/*
+ * x86 MPX helpers
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+#include "exec/exec-all.h"
+
+
+void cpu_sync_bndcs_hflags(CPUX86State *env)
+{
+ uint32_t hflags = env->hflags;
+ uint32_t hflags2 = env->hflags2;
+ uint32_t bndcsr;
+
+ if ((hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ if ((env->cr[4] & CR4_OSXSAVE_MASK)
+ && (env->xcr0 & XSTATE_BNDCSR_MASK)
+ && (bndcsr & BNDCFG_ENABLE)) {
+ hflags |= HF_MPX_EN_MASK;
+ } else {
+ hflags &= ~HF_MPX_EN_MASK;
+ }
+
+ if (bndcsr & BNDCFG_BNDPRESERVE) {
+ hflags2 |= HF2_MPX_PR_MASK;
+ } else {
+ hflags2 &= ~HF2_MPX_PR_MASK;
+ }
+
+ env->hflags = hflags;
+ env->hflags2 = hflags2;
+}
+
+void helper_bndck(CPUX86State *env, uint32_t fail)
+{
+ if (unlikely(fail)) {
+ env->bndcs_regs.sts = 1;
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra)
+{
+ uint64_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12);
+ bt = cpu_ldq_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract64(base, 3, 17) << 5) + (bt & ~7);
+}
+
+static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra)
+{
+ uint32_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK);
+ bt = cpu_ldl_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract32(base, 2, 10) << 4) + (bt & ~3);
+}
+
+uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte, lb, ub, pt;
+
+ bte = lookup_bte64(env, base, ra);
+ lb = cpu_ldq_data_ra(env, bte, ra);
+ ub = cpu_ldq_data_ra(env, bte + 8, ra);
+ pt = cpu_ldq_data_ra(env, bte + 16, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ env->mmx_t0.MMX_Q(0) = ub;
+ return lb;
+}
+
+uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte, lb, ub, pt;
+
+ bte = lookup_bte32(env, base, ra);
+ lb = cpu_ldl_data_ra(env, bte, ra);
+ ub = cpu_ldl_data_ra(env, bte + 4, ra);
+ pt = cpu_ldl_data_ra(env, bte + 8, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ return ((uint64_t)ub << 32) | lb;
+}
+
+void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte;
+
+ bte = lookup_bte64(env, base, ra);
+ cpu_stq_data_ra(env, bte, lb, ra);
+ cpu_stq_data_ra(env, bte + 8, ub, ra);
+ cpu_stq_data_ra(env, bte + 16, ptr, ra);
+}
+
+void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte;
+
+ bte = lookup_bte32(env, base, ra);
+ cpu_stl_data_ra(env, bte, lb, ra);
+ cpu_stl_data_ra(env, bte + 4, ub, ra);
+ cpu_stl_data_ra(env, bte + 8, ptr, ra);
+}
+
+void helper_bnd_jmp(CPUX86State *env)
+{
+ if (!(env->hflags2 & HF2_MPX_PR_MASK)) {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+}
diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h
new file mode 100644
index 0000000000..7a98f53864
--- /dev/null
+++ b/target/i386/ops_sse.h
@@ -0,0 +1,2296 @@
+/*
+ * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ * Copyright (c) 2008 Intel Corporation <andrew.zaborowski@intel.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "crypto/aes.h"
+
+#if SHIFT == 0
+#define Reg MMXReg
+#define XMM_ONLY(...)
+#define B(n) MMX_B(n)
+#define W(n) MMX_W(n)
+#define L(n) MMX_L(n)
+#define Q(n) MMX_Q(n)
+#define SUFFIX _mmx
+#else
+#define Reg ZMMReg
+#define XMM_ONLY(...) __VA_ARGS__
+#define B(n) ZMM_B(n)
+#define W(n) ZMM_W(n)
+#define L(n) ZMM_L(n)
+#define Q(n) ZMM_Q(n)
+#define SUFFIX _xmm
+#endif
+
+void glue(helper_psrlw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 15) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->W(0) >>= shift;
+ d->W(1) >>= shift;
+ d->W(2) >>= shift;
+ d->W(3) >>= shift;
+#if SHIFT == 1
+ d->W(4) >>= shift;
+ d->W(5) >>= shift;
+ d->W(6) >>= shift;
+ d->W(7) >>= shift;
+#endif
+ }
+}
+
+void glue(helper_psraw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 15) {
+ shift = 15;
+ } else {
+ shift = s->B(0);
+ }
+ d->W(0) = (int16_t)d->W(0) >> shift;
+ d->W(1) = (int16_t)d->W(1) >> shift;
+ d->W(2) = (int16_t)d->W(2) >> shift;
+ d->W(3) = (int16_t)d->W(3) >> shift;
+#if SHIFT == 1
+ d->W(4) = (int16_t)d->W(4) >> shift;
+ d->W(5) = (int16_t)d->W(5) >> shift;
+ d->W(6) = (int16_t)d->W(6) >> shift;
+ d->W(7) = (int16_t)d->W(7) >> shift;
+#endif
+}
+
+void glue(helper_psllw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 15) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->W(0) <<= shift;
+ d->W(1) <<= shift;
+ d->W(2) <<= shift;
+ d->W(3) <<= shift;
+#if SHIFT == 1
+ d->W(4) <<= shift;
+ d->W(5) <<= shift;
+ d->W(6) <<= shift;
+ d->W(7) <<= shift;
+#endif
+ }
+}
+
+void glue(helper_psrld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 31) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->L(0) >>= shift;
+ d->L(1) >>= shift;
+#if SHIFT == 1
+ d->L(2) >>= shift;
+ d->L(3) >>= shift;
+#endif
+ }
+}
+
+void glue(helper_psrad, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 31) {
+ shift = 31;
+ } else {
+ shift = s->B(0);
+ }
+ d->L(0) = (int32_t)d->L(0) >> shift;
+ d->L(1) = (int32_t)d->L(1) >> shift;
+#if SHIFT == 1
+ d->L(2) = (int32_t)d->L(2) >> shift;
+ d->L(3) = (int32_t)d->L(3) >> shift;
+#endif
+}
+
+void glue(helper_pslld, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 31) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->L(0) <<= shift;
+ d->L(1) <<= shift;
+#if SHIFT == 1
+ d->L(2) <<= shift;
+ d->L(3) <<= shift;
+#endif
+ }
+}
+
+void glue(helper_psrlq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 63) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->Q(0) >>= shift;
+#if SHIFT == 1
+ d->Q(1) >>= shift;
+#endif
+ }
+}
+
+void glue(helper_psllq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift;
+
+ if (s->Q(0) > 63) {
+ d->Q(0) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+ } else {
+ shift = s->B(0);
+ d->Q(0) <<= shift;
+#if SHIFT == 1
+ d->Q(1) <<= shift;
+#endif
+ }
+}
+
+#if SHIFT == 1
+void glue(helper_psrldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift, i;
+
+ shift = s->L(0);
+ if (shift > 16) {
+ shift = 16;
+ }
+ for (i = 0; i < 16 - shift; i++) {
+ d->B(i) = d->B(i + shift);
+ }
+ for (i = 16 - shift; i < 16; i++) {
+ d->B(i) = 0;
+ }
+}
+
+void glue(helper_pslldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int shift, i;
+
+ shift = s->L(0);
+ if (shift > 16) {
+ shift = 16;
+ }
+ for (i = 15; i >= shift; i--) {
+ d->B(i) = d->B(i - shift);
+ }
+ for (i = 0; i < shift; i++) {
+ d->B(i) = 0;
+ }
+}
+#endif
+
+#define SSE_HELPER_B(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->B(0) = F(d->B(0), s->B(0)); \
+ d->B(1) = F(d->B(1), s->B(1)); \
+ d->B(2) = F(d->B(2), s->B(2)); \
+ d->B(3) = F(d->B(3), s->B(3)); \
+ d->B(4) = F(d->B(4), s->B(4)); \
+ d->B(5) = F(d->B(5), s->B(5)); \
+ d->B(6) = F(d->B(6), s->B(6)); \
+ d->B(7) = F(d->B(7), s->B(7)); \
+ XMM_ONLY( \
+ d->B(8) = F(d->B(8), s->B(8)); \
+ d->B(9) = F(d->B(9), s->B(9)); \
+ d->B(10) = F(d->B(10), s->B(10)); \
+ d->B(11) = F(d->B(11), s->B(11)); \
+ d->B(12) = F(d->B(12), s->B(12)); \
+ d->B(13) = F(d->B(13), s->B(13)); \
+ d->B(14) = F(d->B(14), s->B(14)); \
+ d->B(15) = F(d->B(15), s->B(15)); \
+ ) \
+ }
+
+#define SSE_HELPER_W(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->W(0) = F(d->W(0), s->W(0)); \
+ d->W(1) = F(d->W(1), s->W(1)); \
+ d->W(2) = F(d->W(2), s->W(2)); \
+ d->W(3) = F(d->W(3), s->W(3)); \
+ XMM_ONLY( \
+ d->W(4) = F(d->W(4), s->W(4)); \
+ d->W(5) = F(d->W(5), s->W(5)); \
+ d->W(6) = F(d->W(6), s->W(6)); \
+ d->W(7) = F(d->W(7), s->W(7)); \
+ ) \
+ }
+
+#define SSE_HELPER_L(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->L(0) = F(d->L(0), s->L(0)); \
+ d->L(1) = F(d->L(1), s->L(1)); \
+ XMM_ONLY( \
+ d->L(2) = F(d->L(2), s->L(2)); \
+ d->L(3) = F(d->L(3), s->L(3)); \
+ ) \
+ }
+
+#define SSE_HELPER_Q(name, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->Q(0) = F(d->Q(0), s->Q(0)); \
+ XMM_ONLY( \
+ d->Q(1) = F(d->Q(1), s->Q(1)); \
+ ) \
+ }
+
+#if SHIFT == 0
+static inline int satub(int x)
+{
+ if (x < 0) {
+ return 0;
+ } else if (x > 255) {
+ return 255;
+ } else {
+ return x;
+ }
+}
+
+static inline int satuw(int x)
+{
+ if (x < 0) {
+ return 0;
+ } else if (x > 65535) {
+ return 65535;
+ } else {
+ return x;
+ }
+}
+
+static inline int satsb(int x)
+{
+ if (x < -128) {
+ return -128;
+ } else if (x > 127) {
+ return 127;
+ } else {
+ return x;
+ }
+}
+
+static inline int satsw(int x)
+{
+ if (x < -32768) {
+ return -32768;
+ } else if (x > 32767) {
+ return 32767;
+ } else {
+ return x;
+ }
+}
+
+#define FADD(a, b) ((a) + (b))
+#define FADDUB(a, b) satub((a) + (b))
+#define FADDUW(a, b) satuw((a) + (b))
+#define FADDSB(a, b) satsb((int8_t)(a) + (int8_t)(b))
+#define FADDSW(a, b) satsw((int16_t)(a) + (int16_t)(b))
+
+#define FSUB(a, b) ((a) - (b))
+#define FSUBUB(a, b) satub((a) - (b))
+#define FSUBUW(a, b) satuw((a) - (b))
+#define FSUBSB(a, b) satsb((int8_t)(a) - (int8_t)(b))
+#define FSUBSW(a, b) satsw((int16_t)(a) - (int16_t)(b))
+#define FMINUB(a, b) ((a) < (b)) ? (a) : (b)
+#define FMINSW(a, b) ((int16_t)(a) < (int16_t)(b)) ? (a) : (b)
+#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
+#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
+
+#define FAND(a, b) ((a) & (b))
+#define FANDN(a, b) ((~(a)) & (b))
+#define FOR(a, b) ((a) | (b))
+#define FXOR(a, b) ((a) ^ (b))
+
+#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0)
+#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0)
+#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0)
+#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0)
+
+#define FMULLW(a, b) ((a) * (b))
+#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
+#define FMULHUW(a, b) ((a) * (b) >> 16)
+#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
+
+#define FAVG(a, b) (((a) + (b) + 1) >> 1)
+#endif
+
+SSE_HELPER_B(helper_paddb, FADD)
+SSE_HELPER_W(helper_paddw, FADD)
+SSE_HELPER_L(helper_paddl, FADD)
+SSE_HELPER_Q(helper_paddq, FADD)
+
+SSE_HELPER_B(helper_psubb, FSUB)
+SSE_HELPER_W(helper_psubw, FSUB)
+SSE_HELPER_L(helper_psubl, FSUB)
+SSE_HELPER_Q(helper_psubq, FSUB)
+
+SSE_HELPER_B(helper_paddusb, FADDUB)
+SSE_HELPER_B(helper_paddsb, FADDSB)
+SSE_HELPER_B(helper_psubusb, FSUBUB)
+SSE_HELPER_B(helper_psubsb, FSUBSB)
+
+SSE_HELPER_W(helper_paddusw, FADDUW)
+SSE_HELPER_W(helper_paddsw, FADDSW)
+SSE_HELPER_W(helper_psubusw, FSUBUW)
+SSE_HELPER_W(helper_psubsw, FSUBSW)
+
+SSE_HELPER_B(helper_pminub, FMINUB)
+SSE_HELPER_B(helper_pmaxub, FMAXUB)
+
+SSE_HELPER_W(helper_pminsw, FMINSW)
+SSE_HELPER_W(helper_pmaxsw, FMAXSW)
+
+SSE_HELPER_Q(helper_pand, FAND)
+SSE_HELPER_Q(helper_pandn, FANDN)
+SSE_HELPER_Q(helper_por, FOR)
+SSE_HELPER_Q(helper_pxor, FXOR)
+
+SSE_HELPER_B(helper_pcmpgtb, FCMPGTB)
+SSE_HELPER_W(helper_pcmpgtw, FCMPGTW)
+SSE_HELPER_L(helper_pcmpgtl, FCMPGTL)
+
+SSE_HELPER_B(helper_pcmpeqb, FCMPEQ)
+SSE_HELPER_W(helper_pcmpeqw, FCMPEQ)
+SSE_HELPER_L(helper_pcmpeql, FCMPEQ)
+
+SSE_HELPER_W(helper_pmullw, FMULLW)
+#if SHIFT == 0
+SSE_HELPER_W(helper_pmulhrw, FMULHRW)
+#endif
+SSE_HELPER_W(helper_pmulhuw, FMULHUW)
+SSE_HELPER_W(helper_pmulhw, FMULHW)
+
+SSE_HELPER_B(helper_pavgb, FAVG)
+SSE_HELPER_W(helper_pavgw, FAVG)
+
+void glue(helper_pmuludq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0);
+#if SHIFT == 1
+ d->Q(1) = (uint64_t)s->L(2) * (uint64_t)d->L(2);
+#endif
+}
+
+void glue(helper_pmaddwd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+
+ for (i = 0; i < (2 << SHIFT); i++) {
+ d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) +
+ (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1);
+ }
+}
+
+#if SHIFT == 0
+static inline int abs1(int a)
+{
+ if (a < 0) {
+ return -a;
+ } else {
+ return a;
+ }
+}
+#endif
+void glue(helper_psadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ unsigned int val;
+
+ val = 0;
+ val += abs1(d->B(0) - s->B(0));
+ val += abs1(d->B(1) - s->B(1));
+ val += abs1(d->B(2) - s->B(2));
+ val += abs1(d->B(3) - s->B(3));
+ val += abs1(d->B(4) - s->B(4));
+ val += abs1(d->B(5) - s->B(5));
+ val += abs1(d->B(6) - s->B(6));
+ val += abs1(d->B(7) - s->B(7));
+ d->Q(0) = val;
+#if SHIFT == 1
+ val = 0;
+ val += abs1(d->B(8) - s->B(8));
+ val += abs1(d->B(9) - s->B(9));
+ val += abs1(d->B(10) - s->B(10));
+ val += abs1(d->B(11) - s->B(11));
+ val += abs1(d->B(12) - s->B(12));
+ val += abs1(d->B(13) - s->B(13));
+ val += abs1(d->B(14) - s->B(14));
+ val += abs1(d->B(15) - s->B(15));
+ d->Q(1) = val;
+#endif
+}
+
+void glue(helper_maskmov, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ target_ulong a0)
+{
+ int i;
+
+ for (i = 0; i < (8 << SHIFT); i++) {
+ if (s->B(i) & 0x80) {
+ cpu_stb_data_ra(env, a0 + i, d->B(i), GETPC());
+ }
+ }
+}
+
+void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val)
+{
+ d->L(0) = val;
+ d->L(1) = 0;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+}
+
+#ifdef TARGET_X86_64
+void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val)
+{
+ d->Q(0) = val;
+#if SHIFT == 1
+ d->Q(1) = 0;
+#endif
+}
+#endif
+
+#if SHIFT == 0
+void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.W(0) = s->W(order & 3);
+ r.W(1) = s->W((order >> 2) & 3);
+ r.W(2) = s->W((order >> 4) & 3);
+ r.W(3) = s->W((order >> 6) & 3);
+ *d = r;
+}
+#else
+void helper_shufps(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.L(0) = d->L(order & 3);
+ r.L(1) = d->L((order >> 2) & 3);
+ r.L(2) = s->L((order >> 4) & 3);
+ r.L(3) = s->L((order >> 6) & 3);
+ *d = r;
+}
+
+void helper_shufpd(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.Q(0) = d->Q(order & 1);
+ r.Q(1) = s->Q((order >> 1) & 1);
+ *d = r;
+}
+
+void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.L(0) = s->L(order & 3);
+ r.L(1) = s->L((order >> 2) & 3);
+ r.L(2) = s->L((order >> 4) & 3);
+ r.L(3) = s->L((order >> 6) & 3);
+ *d = r;
+}
+
+void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.W(0) = s->W(order & 3);
+ r.W(1) = s->W((order >> 2) & 3);
+ r.W(2) = s->W((order >> 4) & 3);
+ r.W(3) = s->W((order >> 6) & 3);
+ r.Q(1) = s->Q(1);
+ *d = r;
+}
+
+void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order)
+{
+ Reg r;
+
+ r.Q(0) = s->Q(0);
+ r.W(4) = s->W(4 + (order & 3));
+ r.W(5) = s->W(4 + ((order >> 2) & 3));
+ r.W(6) = s->W(4 + ((order >> 4) & 3));
+ r.W(7) = s->W(4 + ((order >> 6) & 3));
+ *d = r;
+}
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_HELPER_S(name, F) \
+ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ d->ZMM_S(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \
+ d->ZMM_S(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \
+ d->ZMM_S(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \
+ } \
+ \
+ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_S(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ } \
+ \
+ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ d->ZMM_D(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \
+ } \
+ \
+ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_D(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ }
+
+#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
+#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
+#define FPU_MUL(size, a, b) float ## size ## _mul(a, b, &env->sse_status)
+#define FPU_DIV(size, a, b) float ## size ## _div(a, b, &env->sse_status)
+#define FPU_SQRT(size, a, b) float ## size ## _sqrt(b, &env->sse_status)
+
+/* Note that the choice of comparison op here is important to get the
+ * special cases right: for min and max Intel specifies that (-0,0),
+ * (NaN, anything) and (anything, NaN) return the second argument.
+ */
+#define FPU_MIN(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b))
+#define FPU_MAX(size, a, b) \
+ (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b))
+
+SSE_HELPER_S(add, FPU_ADD)
+SSE_HELPER_S(sub, FPU_SUB)
+SSE_HELPER_S(mul, FPU_MUL)
+SSE_HELPER_S(div, FPU_DIV)
+SSE_HELPER_S(min, FPU_MIN)
+SSE_HELPER_S(max, FPU_MAX)
+SSE_HELPER_S(sqrt, FPU_SQRT)
+
+
+/* float to float conversions */
+void helper_cvtps2pd(CPUX86State *env, Reg *d, Reg *s)
+{
+ float32 s0, s1;
+
+ s0 = s->ZMM_S(0);
+ s1 = s->ZMM_S(1);
+ d->ZMM_D(0) = float32_to_float64(s0, &env->sse_status);
+ d->ZMM_D(1) = float32_to_float64(s1, &env->sse_status);
+}
+
+void helper_cvtpd2ps(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_S(1) = float64_to_float32(s->ZMM_D(1), &env->sse_status);
+ d->Q(1) = 0;
+}
+
+void helper_cvtss2sd(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_D(0) = float32_to_float64(s->ZMM_S(0), &env->sse_status);
+}
+
+void helper_cvtsd2ss(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_S(0) = float64_to_float32(s->ZMM_D(0), &env->sse_status);
+}
+
+/* integer to float */
+void helper_cvtdq2ps(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->ZMM_S(0) = int32_to_float32(s->ZMM_L(0), &env->sse_status);
+ d->ZMM_S(1) = int32_to_float32(s->ZMM_L(1), &env->sse_status);
+ d->ZMM_S(2) = int32_to_float32(s->ZMM_L(2), &env->sse_status);
+ d->ZMM_S(3) = int32_to_float32(s->ZMM_L(3), &env->sse_status);
+}
+
+void helper_cvtdq2pd(CPUX86State *env, Reg *d, Reg *s)
+{
+ int32_t l0, l1;
+
+ l0 = (int32_t)s->ZMM_L(0);
+ l1 = (int32_t)s->ZMM_L(1);
+ d->ZMM_D(0) = int32_to_float64(l0, &env->sse_status);
+ d->ZMM_D(1) = int32_to_float64(l1, &env->sse_status);
+}
+
+void helper_cvtpi2ps(CPUX86State *env, ZMMReg *d, MMXReg *s)
+{
+ d->ZMM_S(0) = int32_to_float32(s->MMX_L(0), &env->sse_status);
+ d->ZMM_S(1) = int32_to_float32(s->MMX_L(1), &env->sse_status);
+}
+
+void helper_cvtpi2pd(CPUX86State *env, ZMMReg *d, MMXReg *s)
+{
+ d->ZMM_D(0) = int32_to_float64(s->MMX_L(0), &env->sse_status);
+ d->ZMM_D(1) = int32_to_float64(s->MMX_L(1), &env->sse_status);
+}
+
+void helper_cvtsi2ss(CPUX86State *env, ZMMReg *d, uint32_t val)
+{
+ d->ZMM_S(0) = int32_to_float32(val, &env->sse_status);
+}
+
+void helper_cvtsi2sd(CPUX86State *env, ZMMReg *d, uint32_t val)
+{
+ d->ZMM_D(0) = int32_to_float64(val, &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+void helper_cvtsq2ss(CPUX86State *env, ZMMReg *d, uint64_t val)
+{
+ d->ZMM_S(0) = int64_to_float32(val, &env->sse_status);
+}
+
+void helper_cvtsq2sd(CPUX86State *env, ZMMReg *d, uint64_t val)
+{
+ d->ZMM_D(0) = int64_to_float64(val, &env->sse_status);
+}
+#endif
+
+/* float to integer */
+void helper_cvtps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_L(2) = float32_to_int32(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_L(3) = float32_to_int32(s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_cvtpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
+ d->ZMM_Q(1) = 0;
+}
+
+void helper_cvtps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = float32_to_int32(s->ZMM_S(0), &env->sse_status);
+ d->MMX_L(1) = float32_to_int32(s->ZMM_S(1), &env->sse_status);
+}
+
+void helper_cvtpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = float64_to_int32(s->ZMM_D(0), &env->sse_status);
+ d->MMX_L(1) = float64_to_int32(s->ZMM_D(1), &env->sse_status);
+}
+
+int32_t helper_cvtss2si(CPUX86State *env, ZMMReg *s)
+{
+ return float32_to_int32(s->ZMM_S(0), &env->sse_status);
+}
+
+int32_t helper_cvtsd2si(CPUX86State *env, ZMMReg *s)
+{
+ return float64_to_int32(s->ZMM_D(0), &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+int64_t helper_cvtss2sq(CPUX86State *env, ZMMReg *s)
+{
+ return float32_to_int64(s->ZMM_S(0), &env->sse_status);
+}
+
+int64_t helper_cvtsd2sq(CPUX86State *env, ZMMReg *s)
+{
+ return float64_to_int64(s->ZMM_D(0), &env->sse_status);
+}
+#endif
+
+/* float to integer truncated */
+void helper_cvttps2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_L(2) = float32_to_int32_round_to_zero(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_L(3) = float32_to_int32_round_to_zero(s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_cvttpd2dq(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+ d->ZMM_Q(1) = 0;
+}
+
+void helper_cvttps2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+ d->MMX_L(1) = float32_to_int32_round_to_zero(s->ZMM_S(1), &env->sse_status);
+}
+
+void helper_cvttpd2pi(CPUX86State *env, MMXReg *d, ZMMReg *s)
+{
+ d->MMX_L(0) = float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+ d->MMX_L(1) = float64_to_int32_round_to_zero(s->ZMM_D(1), &env->sse_status);
+}
+
+int32_t helper_cvttss2si(CPUX86State *env, ZMMReg *s)
+{
+ return float32_to_int32_round_to_zero(s->ZMM_S(0), &env->sse_status);
+}
+
+int32_t helper_cvttsd2si(CPUX86State *env, ZMMReg *s)
+{
+ return float64_to_int32_round_to_zero(s->ZMM_D(0), &env->sse_status);
+}
+
+#ifdef TARGET_X86_64
+int64_t helper_cvttss2sq(CPUX86State *env, ZMMReg *s)
+{
+ return float32_to_int64_round_to_zero(s->ZMM_S(0), &env->sse_status);
+}
+
+int64_t helper_cvttsd2sq(CPUX86State *env, ZMMReg *s)
+{
+ return float64_to_int64_round_to_zero(s->ZMM_D(0), &env->sse_status);
+}
+#endif
+
+void helper_rsqrtps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_S(0) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(0), &env->sse_status),
+ &env->sse_status);
+ d->ZMM_S(1) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(1), &env->sse_status),
+ &env->sse_status);
+ d->ZMM_S(2) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(2), &env->sse_status),
+ &env->sse_status);
+ d->ZMM_S(3) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(3), &env->sse_status),
+ &env->sse_status);
+}
+
+void helper_rsqrtss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_S(0) = float32_div(float32_one,
+ float32_sqrt(s->ZMM_S(0), &env->sse_status),
+ &env->sse_status);
+}
+
+void helper_rcpps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_div(float32_one, s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_div(float32_one, s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_div(float32_one, s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_rcpss(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_S(0) = float32_div(float32_one, s->ZMM_S(0), &env->sse_status);
+}
+
+static inline uint64_t helper_extrq(uint64_t src, int shift, int len)
+{
+ uint64_t mask;
+
+ if (len == 0) {
+ mask = ~0LL;
+ } else {
+ mask = (1ULL << len) - 1;
+ }
+ return (src >> shift) & mask;
+}
+
+void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0));
+}
+
+void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+{
+ d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), index, length);
+}
+
+static inline uint64_t helper_insertq(uint64_t src, int shift, int len)
+{
+ uint64_t mask;
+
+ if (len == 0) {
+ mask = ~0ULL;
+ } else {
+ mask = (1ULL << len) - 1;
+ }
+ return (src & ~(mask << shift)) | ((src & mask) << shift);
+}
+
+void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8));
+}
+
+void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length)
+{
+ d->ZMM_Q(0) = helper_insertq(d->ZMM_Q(0), index, length);
+}
+
+void helper_haddps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_S(0) = float32_add(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(1) = float32_add(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+ r.ZMM_S(2) = float32_add(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(3) = float32_add(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
+ *d = r;
+}
+
+void helper_haddpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_D(0) = float64_add(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+ r.ZMM_D(1) = float64_add(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
+ *d = r;
+}
+
+void helper_hsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_S(0) = float32_sub(d->ZMM_S(0), d->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(1) = float32_sub(d->ZMM_S(2), d->ZMM_S(3), &env->sse_status);
+ r.ZMM_S(2) = float32_sub(s->ZMM_S(0), s->ZMM_S(1), &env->sse_status);
+ r.ZMM_S(3) = float32_sub(s->ZMM_S(2), s->ZMM_S(3), &env->sse_status);
+ *d = r;
+}
+
+void helper_hsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ ZMMReg r;
+
+ r.ZMM_D(0) = float64_sub(d->ZMM_D(0), d->ZMM_D(1), &env->sse_status);
+ r.ZMM_D(1) = float64_sub(s->ZMM_D(0), s->ZMM_D(1), &env->sse_status);
+ *d = r;
+}
+
+void helper_addsubps(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_S(0) = float32_sub(d->ZMM_S(0), s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_add(d->ZMM_S(1), s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_sub(d->ZMM_S(2), s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_add(d->ZMM_S(3), s->ZMM_S(3), &env->sse_status);
+}
+
+void helper_addsubpd(CPUX86State *env, ZMMReg *d, ZMMReg *s)
+{
+ d->ZMM_D(0) = float64_sub(d->ZMM_D(0), s->ZMM_D(0), &env->sse_status);
+ d->ZMM_D(1) = float64_add(d->ZMM_D(1), s->ZMM_D(1), &env->sse_status);
+}
+
+/* XXX: unordered */
+#define SSE_HELPER_CMP(name, F) \
+ void helper_ ## name ## ps(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ d->ZMM_L(1) = F(32, d->ZMM_S(1), s->ZMM_S(1)); \
+ d->ZMM_L(2) = F(32, d->ZMM_S(2), s->ZMM_S(2)); \
+ d->ZMM_L(3) = F(32, d->ZMM_S(3), s->ZMM_S(3)); \
+ } \
+ \
+ void helper_ ## name ## ss(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_L(0) = F(32, d->ZMM_S(0), s->ZMM_S(0)); \
+ } \
+ \
+ void helper_ ## name ## pd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ d->ZMM_Q(1) = F(64, d->ZMM_D(1), s->ZMM_D(1)); \
+ } \
+ \
+ void helper_ ## name ## sd(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->ZMM_Q(0) = F(64, d->ZMM_D(0), s->ZMM_D(0)); \
+ }
+
+#define FPU_CMPEQ(size, a, b) \
+ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLT(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLE(size, a, b) \
+ (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPUNORD(size, a, b) \
+ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPNEQ(size, a, b) \
+ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLT(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLE(size, a, b) \
+ (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPORD(size, a, b) \
+ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1)
+
+SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
+SSE_HELPER_CMP(cmplt, FPU_CMPLT)
+SSE_HELPER_CMP(cmple, FPU_CMPLE)
+SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
+SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
+SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
+SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
+SSE_HELPER_CMP(cmpord, FPU_CMPORD)
+
+static const int comis_eflags[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_ucomiss(CPUX86State *env, Reg *d, Reg *s)
+{
+ int ret;
+ float32 s0, s1;
+
+ s0 = d->ZMM_S(0);
+ s1 = s->ZMM_S(0);
+ ret = float32_compare_quiet(s0, s1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_comiss(CPUX86State *env, Reg *d, Reg *s)
+{
+ int ret;
+ float32 s0, s1;
+
+ s0 = d->ZMM_S(0);
+ s1 = s->ZMM_S(0);
+ ret = float32_compare(s0, s1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_ucomisd(CPUX86State *env, Reg *d, Reg *s)
+{
+ int ret;
+ float64 d0, d1;
+
+ d0 = d->ZMM_D(0);
+ d1 = s->ZMM_D(0);
+ ret = float64_compare_quiet(d0, d1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+void helper_comisd(CPUX86State *env, Reg *d, Reg *s)
+{
+ int ret;
+ float64 d0, d1;
+
+ d0 = d->ZMM_D(0);
+ d1 = s->ZMM_D(0);
+ ret = float64_compare(d0, d1, &env->sse_status);
+ CC_SRC = comis_eflags[ret + 1];
+}
+
+uint32_t helper_movmskps(CPUX86State *env, Reg *s)
+{
+ int b0, b1, b2, b3;
+
+ b0 = s->ZMM_L(0) >> 31;
+ b1 = s->ZMM_L(1) >> 31;
+ b2 = s->ZMM_L(2) >> 31;
+ b3 = s->ZMM_L(3) >> 31;
+ return b0 | (b1 << 1) | (b2 << 2) | (b3 << 3);
+}
+
+uint32_t helper_movmskpd(CPUX86State *env, Reg *s)
+{
+ int b0, b1;
+
+ b0 = s->ZMM_L(1) >> 31;
+ b1 = s->ZMM_L(3) >> 31;
+ return b0 | (b1 << 1);
+}
+
+#endif
+
+uint32_t glue(helper_pmovmskb, SUFFIX)(CPUX86State *env, Reg *s)
+{
+ uint32_t val;
+
+ val = 0;
+ val |= (s->B(0) >> 7);
+ val |= (s->B(1) >> 6) & 0x02;
+ val |= (s->B(2) >> 5) & 0x04;
+ val |= (s->B(3) >> 4) & 0x08;
+ val |= (s->B(4) >> 3) & 0x10;
+ val |= (s->B(5) >> 2) & 0x20;
+ val |= (s->B(6) >> 1) & 0x40;
+ val |= (s->B(7)) & 0x80;
+#if SHIFT == 1
+ val |= (s->B(8) << 1) & 0x0100;
+ val |= (s->B(9) << 2) & 0x0200;
+ val |= (s->B(10) << 3) & 0x0400;
+ val |= (s->B(11) << 4) & 0x0800;
+ val |= (s->B(12) << 5) & 0x1000;
+ val |= (s->B(13) << 6) & 0x2000;
+ val |= (s->B(14) << 7) & 0x4000;
+ val |= (s->B(15) << 8) & 0x8000;
+#endif
+ return val;
+}
+
+void glue(helper_packsswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.B(0) = satsb((int16_t)d->W(0));
+ r.B(1) = satsb((int16_t)d->W(1));
+ r.B(2) = satsb((int16_t)d->W(2));
+ r.B(3) = satsb((int16_t)d->W(3));
+#if SHIFT == 1
+ r.B(4) = satsb((int16_t)d->W(4));
+ r.B(5) = satsb((int16_t)d->W(5));
+ r.B(6) = satsb((int16_t)d->W(6));
+ r.B(7) = satsb((int16_t)d->W(7));
+#endif
+ r.B((4 << SHIFT) + 0) = satsb((int16_t)s->W(0));
+ r.B((4 << SHIFT) + 1) = satsb((int16_t)s->W(1));
+ r.B((4 << SHIFT) + 2) = satsb((int16_t)s->W(2));
+ r.B((4 << SHIFT) + 3) = satsb((int16_t)s->W(3));
+#if SHIFT == 1
+ r.B(12) = satsb((int16_t)s->W(4));
+ r.B(13) = satsb((int16_t)s->W(5));
+ r.B(14) = satsb((int16_t)s->W(6));
+ r.B(15) = satsb((int16_t)s->W(7));
+#endif
+ *d = r;
+}
+
+void glue(helper_packuswb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.B(0) = satub((int16_t)d->W(0));
+ r.B(1) = satub((int16_t)d->W(1));
+ r.B(2) = satub((int16_t)d->W(2));
+ r.B(3) = satub((int16_t)d->W(3));
+#if SHIFT == 1
+ r.B(4) = satub((int16_t)d->W(4));
+ r.B(5) = satub((int16_t)d->W(5));
+ r.B(6) = satub((int16_t)d->W(6));
+ r.B(7) = satub((int16_t)d->W(7));
+#endif
+ r.B((4 << SHIFT) + 0) = satub((int16_t)s->W(0));
+ r.B((4 << SHIFT) + 1) = satub((int16_t)s->W(1));
+ r.B((4 << SHIFT) + 2) = satub((int16_t)s->W(2));
+ r.B((4 << SHIFT) + 3) = satub((int16_t)s->W(3));
+#if SHIFT == 1
+ r.B(12) = satub((int16_t)s->W(4));
+ r.B(13) = satub((int16_t)s->W(5));
+ r.B(14) = satub((int16_t)s->W(6));
+ r.B(15) = satub((int16_t)s->W(7));
+#endif
+ *d = r;
+}
+
+void glue(helper_packssdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ Reg r;
+
+ r.W(0) = satsw(d->L(0));
+ r.W(1) = satsw(d->L(1));
+#if SHIFT == 1
+ r.W(2) = satsw(d->L(2));
+ r.W(3) = satsw(d->L(3));
+#endif
+ r.W((2 << SHIFT) + 0) = satsw(s->L(0));
+ r.W((2 << SHIFT) + 1) = satsw(s->L(1));
+#if SHIFT == 1
+ r.W(6) = satsw(s->L(2));
+ r.W(7) = satsw(s->L(3));
+#endif
+ *d = r;
+}
+
+#define UNPCK_OP(base_name, base) \
+ \
+ void glue(helper_punpck ## base_name ## bw, SUFFIX)(CPUX86State *env,\
+ Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
+ r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
+ r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
+ r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
+ r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
+ r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
+ r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
+ r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
+ XMM_ONLY( \
+ r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
+ r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
+ r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
+ r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
+ r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
+ r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
+ r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
+ r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
+ ) \
+ *d = r; \
+ } \
+ \
+ void glue(helper_punpck ## base_name ## wd, SUFFIX)(CPUX86State *env,\
+ Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
+ r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
+ r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
+ r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
+ XMM_ONLY( \
+ r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
+ r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
+ r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
+ r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
+ ) \
+ *d = r; \
+ } \
+ \
+ void glue(helper_punpck ## base_name ## dq, SUFFIX)(CPUX86State *env,\
+ Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.L(0) = d->L((base << SHIFT) + 0); \
+ r.L(1) = s->L((base << SHIFT) + 0); \
+ XMM_ONLY( \
+ r.L(2) = d->L((base << SHIFT) + 1); \
+ r.L(3) = s->L((base << SHIFT) + 1); \
+ ) \
+ *d = r; \
+ } \
+ \
+ XMM_ONLY( \
+ void glue(helper_punpck ## base_name ## qdq, SUFFIX)(CPUX86State \
+ *env, \
+ Reg *d, \
+ Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.Q(0) = d->Q(base); \
+ r.Q(1) = s->Q(base); \
+ *d = r; \
+ } \
+ )
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+/* 3DNow! float ops */
+#if SHIFT == 0
+void helper_pi2fd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = int32_to_float32(s->MMX_L(0), &env->mmx_status);
+ d->MMX_S(1) = int32_to_float32(s->MMX_L(1), &env->mmx_status);
+}
+
+void helper_pi2fw(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = int32_to_float32((int16_t)s->MMX_W(0), &env->mmx_status);
+ d->MMX_S(1) = int32_to_float32((int16_t)s->MMX_W(2), &env->mmx_status);
+}
+
+void helper_pf2id(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status);
+ d->MMX_L(1) = float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pf2iw(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0),
+ &env->mmx_status));
+ d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1),
+ &env->mmx_status));
+}
+
+void helper_pfacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+ r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+ *d = r;
+}
+
+void helper_pfadd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_add(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_add(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfcmpeq(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfcmpge(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfcmpgt(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
+}
+
+void helper_pfmax(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) {
+ d->MMX_S(0) = s->MMX_S(0);
+ }
+ if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) {
+ d->MMX_S(1) = s->MMX_S(1);
+ }
+}
+
+void helper_pfmin(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) {
+ d->MMX_S(0) = s->MMX_S(0);
+ }
+ if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) {
+ d->MMX_S(1) = s->MMX_S(1);
+ }
+}
+
+void helper_pfmul(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_mul(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_mul(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfnacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+ r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+ *d = r;
+}
+
+void helper_pfpnacc(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
+ r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
+ *d = r;
+}
+
+void helper_pfrcp(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_div(float32_one, s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = d->MMX_S(0);
+}
+
+void helper_pfrsqrt(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_L(1) = s->MMX_L(0) & 0x7fffffff;
+ d->MMX_S(1) = float32_div(float32_one,
+ float32_sqrt(d->MMX_S(1), &env->mmx_status),
+ &env->mmx_status);
+ d->MMX_L(1) |= s->MMX_L(0) & 0x80000000;
+ d->MMX_L(0) = d->MMX_L(1);
+}
+
+void helper_pfsub(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_sub(d->MMX_S(0), s->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_sub(d->MMX_S(1), s->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pfsubr(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ d->MMX_S(0) = float32_sub(s->MMX_S(0), d->MMX_S(0), &env->mmx_status);
+ d->MMX_S(1) = float32_sub(s->MMX_S(1), d->MMX_S(1), &env->mmx_status);
+}
+
+void helper_pswapd(CPUX86State *env, MMXReg *d, MMXReg *s)
+{
+ MMXReg r;
+
+ r.MMX_L(0) = s->MMX_L(1);
+ r.MMX_L(1) = s->MMX_L(0);
+ *d = r;
+}
+#endif
+
+/* SSSE3 op helpers */
+void glue(helper_pshufb, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg r;
+
+ for (i = 0; i < (8 << SHIFT); i++) {
+ r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1)));
+ }
+
+ *d = r;
+}
+
+void glue(helper_phaddw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
+ d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
+ XMM_ONLY(d->W(2) = (int16_t)d->W(4) + (int16_t)d->W(5));
+ XMM_ONLY(d->W(3) = (int16_t)d->W(6) + (int16_t)d->W(7));
+ d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) + (int16_t)s->W(1);
+ d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) + (int16_t)s->W(3);
+ XMM_ONLY(d->W(6) = (int16_t)s->W(4) + (int16_t)s->W(5));
+ XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
+}
+
+void glue(helper_phaddd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
+ XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
+ d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) + (int32_t)s->L(1);
+ XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
+}
+
+void glue(helper_phaddsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
+ d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
+ XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) + (int16_t)d->W(5)));
+ XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) + (int16_t)d->W(7)));
+ d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) + (int16_t)s->W(1));
+ d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) + (int16_t)s->W(3));
+ XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) + (int16_t)s->W(5)));
+ XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
+}
+
+void glue(helper_pmaddubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) +
+ (int8_t)s->B(1) * (uint8_t)d->B(1));
+ d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) +
+ (int8_t)s->B(3) * (uint8_t)d->B(3));
+ d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) +
+ (int8_t)s->B(5) * (uint8_t)d->B(5));
+ d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) +
+ (int8_t)s->B(7) * (uint8_t)d->B(7));
+#if SHIFT == 1
+ d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) +
+ (int8_t)s->B(9) * (uint8_t)d->B(9));
+ d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) +
+ (int8_t)s->B(11) * (uint8_t)d->B(11));
+ d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) +
+ (int8_t)s->B(13) * (uint8_t)d->B(13));
+ d->W(7) = satsw((int8_t)s->B(14) * (uint8_t)d->B(14) +
+ (int8_t)s->B(15) * (uint8_t)d->B(15));
+#endif
+}
+
+void glue(helper_phsubw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1);
+ d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3);
+ XMM_ONLY(d->W(2) = (int16_t)d->W(4) - (int16_t)d->W(5));
+ XMM_ONLY(d->W(3) = (int16_t)d->W(6) - (int16_t)d->W(7));
+ d->W((2 << SHIFT) + 0) = (int16_t)s->W(0) - (int16_t)s->W(1);
+ d->W((2 << SHIFT) + 1) = (int16_t)s->W(2) - (int16_t)s->W(3);
+ XMM_ONLY(d->W(6) = (int16_t)s->W(4) - (int16_t)s->W(5));
+ XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7));
+}
+
+void glue(helper_phsubd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1);
+ XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3));
+ d->L((1 << SHIFT) + 0) = (int32_t)s->L(0) - (int32_t)s->L(1);
+ XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3));
+}
+
+void glue(helper_phsubsw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1));
+ d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3));
+ XMM_ONLY(d->W(2) = satsw((int16_t)d->W(4) - (int16_t)d->W(5)));
+ XMM_ONLY(d->W(3) = satsw((int16_t)d->W(6) - (int16_t)d->W(7)));
+ d->W((2 << SHIFT) + 0) = satsw((int16_t)s->W(0) - (int16_t)s->W(1));
+ d->W((2 << SHIFT) + 1) = satsw((int16_t)s->W(2) - (int16_t)s->W(3));
+ XMM_ONLY(d->W(6) = satsw((int16_t)s->W(4) - (int16_t)s->W(5)));
+ XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7)));
+}
+
+#define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x)
+#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x)
+#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x)
+SSE_HELPER_B(helper_pabsb, FABSB)
+SSE_HELPER_W(helper_pabsw, FABSW)
+SSE_HELPER_L(helper_pabsd, FABSL)
+
+#define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15)
+SSE_HELPER_W(helper_pmulhrsw, FMULHRSW)
+
+#define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d)
+#define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d)
+#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d)
+SSE_HELPER_B(helper_psignb, FSIGNB)
+SSE_HELPER_W(helper_psignw, FSIGNW)
+SSE_HELPER_L(helper_psignd, FSIGNL)
+
+void glue(helper_palignr, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ int32_t shift)
+{
+ Reg r;
+
+ /* XXX could be checked during translation */
+ if (shift >= (16 << SHIFT)) {
+ r.Q(0) = 0;
+ XMM_ONLY(r.Q(1) = 0);
+ } else {
+ shift <<= 3;
+#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
+#if SHIFT == 0
+ r.Q(0) = SHR(s->Q(0), shift - 0) |
+ SHR(d->Q(0), shift - 64);
+#else
+ r.Q(0) = SHR(s->Q(0), shift - 0) |
+ SHR(s->Q(1), shift - 64) |
+ SHR(d->Q(0), shift - 128) |
+ SHR(d->Q(1), shift - 192);
+ r.Q(1) = SHR(s->Q(0), shift + 64) |
+ SHR(s->Q(1), shift - 0) |
+ SHR(d->Q(0), shift - 64) |
+ SHR(d->Q(1), shift - 128);
+#endif
+#undef SHR
+ }
+
+ *d = r;
+}
+
+#define XMM0 (env->xmm_regs[0])
+
+#if SHIFT == 1
+#define SSE_HELPER_V(name, elem, num, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \
+ d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \
+ if (num > 2) { \
+ d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \
+ d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \
+ if (num > 4) { \
+ d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \
+ d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \
+ d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \
+ d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \
+ if (num > 8) { \
+ d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \
+ d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \
+ d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \
+ d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \
+ d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \
+ d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \
+ d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \
+ d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \
+ } \
+ } \
+ } \
+ }
+
+#define SSE_HELPER_I(name, elem, num, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t imm) \
+ { \
+ d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \
+ d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \
+ if (num > 2) { \
+ d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \
+ d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \
+ if (num > 4) { \
+ d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \
+ d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \
+ d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \
+ d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \
+ if (num > 8) { \
+ d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \
+ d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \
+ d->elem(10) = F(d->elem(10), s->elem(10), \
+ ((imm >> 10) & 1)); \
+ d->elem(11) = F(d->elem(11), s->elem(11), \
+ ((imm >> 11) & 1)); \
+ d->elem(12) = F(d->elem(12), s->elem(12), \
+ ((imm >> 12) & 1)); \
+ d->elem(13) = F(d->elem(13), s->elem(13), \
+ ((imm >> 13) & 1)); \
+ d->elem(14) = F(d->elem(14), s->elem(14), \
+ ((imm >> 14) & 1)); \
+ d->elem(15) = F(d->elem(15), s->elem(15), \
+ ((imm >> 15) & 1)); \
+ } \
+ } \
+ } \
+ }
+
+/* SSE4.1 op helpers */
+#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d)
+#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d)
+#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d)
+SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB)
+SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS)
+SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD)
+
+void glue(helper_ptest, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1));
+ uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1));
+
+ CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C);
+}
+
+#define SSE_HELPER_F(name, elem, num, F) \
+ void glue(name, SUFFIX)(CPUX86State *env, Reg *d, Reg *s) \
+ { \
+ d->elem(0) = F(0); \
+ d->elem(1) = F(1); \
+ if (num > 2) { \
+ d->elem(2) = F(2); \
+ d->elem(3) = F(3); \
+ if (num > 4) { \
+ d->elem(4) = F(4); \
+ d->elem(5) = F(5); \
+ d->elem(6) = F(6); \
+ d->elem(7) = F(7); \
+ } \
+ } \
+ }
+
+SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxbq, Q, 2, (int8_t) s->B)
+SSE_HELPER_F(helper_pmovsxwd, L, 4, (int16_t) s->W)
+SSE_HELPER_F(helper_pmovsxwq, Q, 2, (int16_t) s->W)
+SSE_HELPER_F(helper_pmovsxdq, Q, 2, (int32_t) s->L)
+SSE_HELPER_F(helper_pmovzxbw, W, 8, s->B)
+SSE_HELPER_F(helper_pmovzxbd, L, 4, s->B)
+SSE_HELPER_F(helper_pmovzxbq, Q, 2, s->B)
+SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W)
+SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W)
+SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L)
+
+void glue(helper_pmuldq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0);
+ d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2);
+}
+
+#define FCMPEQQ(d, s) (d == s ? -1 : 0)
+SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ)
+
+void glue(helper_packusdw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ d->W(0) = satuw((int32_t) d->L(0));
+ d->W(1) = satuw((int32_t) d->L(1));
+ d->W(2) = satuw((int32_t) d->L(2));
+ d->W(3) = satuw((int32_t) d->L(3));
+ d->W(4) = satuw((int32_t) s->L(0));
+ d->W(5) = satuw((int32_t) s->L(1));
+ d->W(6) = satuw((int32_t) s->L(2));
+ d->W(7) = satuw((int32_t) s->L(3));
+}
+
+#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s)
+#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s)
+#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s)
+#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s)
+SSE_HELPER_B(helper_pminsb, FMINSB)
+SSE_HELPER_L(helper_pminsd, FMINSD)
+SSE_HELPER_W(helper_pminuw, MIN)
+SSE_HELPER_L(helper_pminud, MIN)
+SSE_HELPER_B(helper_pmaxsb, FMAXSB)
+SSE_HELPER_L(helper_pmaxsd, FMAXSD)
+SSE_HELPER_W(helper_pmaxuw, MAX)
+SSE_HELPER_L(helper_pmaxud, MAX)
+
+#define FMULLD(d, s) ((int32_t)d * (int32_t)s)
+SSE_HELPER_L(helper_pmulld, FMULLD)
+
+void glue(helper_phminposuw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int idx = 0;
+
+ if (s->W(1) < s->W(idx)) {
+ idx = 1;
+ }
+ if (s->W(2) < s->W(idx)) {
+ idx = 2;
+ }
+ if (s->W(3) < s->W(idx)) {
+ idx = 3;
+ }
+ if (s->W(4) < s->W(idx)) {
+ idx = 4;
+ }
+ if (s->W(5) < s->W(idx)) {
+ idx = 5;
+ }
+ if (s->W(6) < s->W(idx)) {
+ idx = 6;
+ }
+ if (s->W(7) < s->W(idx)) {
+ idx = 7;
+ }
+
+ d->Q(1) = 0;
+ d->L(1) = 0;
+ d->W(1) = idx;
+ d->W(0) = s->W(idx);
+}
+
+void glue(helper_roundps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+ d->ZMM_S(1) = float32_round_to_int(s->ZMM_S(1), &env->sse_status);
+ d->ZMM_S(2) = float32_round_to_int(s->ZMM_S(2), &env->sse_status);
+ d->ZMM_S(3) = float32_round_to_int(s->ZMM_S(3), &env->sse_status);
+
+#if 0 /* TODO */
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+#endif
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundpd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+ d->ZMM_D(1) = float64_round_to_int(s->ZMM_D(1), &env->sse_status);
+
+#if 0 /* TODO */
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+#endif
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundss, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_S(0) = float32_round_to_int(s->ZMM_S(0), &env->sse_status);
+
+#if 0 /* TODO */
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+#endif
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+void glue(helper_roundsd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t mode)
+{
+ signed char prev_rounding_mode;
+
+ prev_rounding_mode = env->sse_status.float_rounding_mode;
+ if (!(mode & (1 << 2))) {
+ switch (mode & 3) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_down, &env->sse_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->sse_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_to_zero, &env->sse_status);
+ break;
+ }
+ }
+
+ d->ZMM_D(0) = float64_round_to_int(s->ZMM_D(0), &env->sse_status);
+
+#if 0 /* TODO */
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
+#endif
+ env->sse_status.float_rounding_mode = prev_rounding_mode;
+}
+
+#define FBLENDP(d, s, m) (m ? s : d)
+SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
+SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
+SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
+
+void glue(helper_dpps, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+{
+ float32 iresult = float32_zero;
+
+ if (mask & (1 << 4)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(0), s->ZMM_S(0),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 5)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(1), s->ZMM_S(1),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 6)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(2), s->ZMM_S(2),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 7)) {
+ iresult = float32_add(iresult,
+ float32_mul(d->ZMM_S(3), s->ZMM_S(3),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ d->ZMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero;
+ d->ZMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero;
+ d->ZMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero;
+ d->ZMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero;
+}
+
+void glue(helper_dppd, SUFFIX)(CPUX86State *env, Reg *d, Reg *s, uint32_t mask)
+{
+ float64 iresult = float64_zero;
+
+ if (mask & (1 << 4)) {
+ iresult = float64_add(iresult,
+ float64_mul(d->ZMM_D(0), s->ZMM_D(0),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 5)) {
+ iresult = float64_add(iresult,
+ float64_mul(d->ZMM_D(1), s->ZMM_D(1),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ d->ZMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero;
+ d->ZMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero;
+}
+
+void glue(helper_mpsadbw, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t offset)
+{
+ int s0 = (offset & 3) << 2;
+ int d0 = (offset & 4) << 0;
+ int i;
+ Reg r;
+
+ for (i = 0; i < 8; i++, d0++) {
+ r.W(i) = 0;
+ r.W(i) += abs1(d->B(d0 + 0) - s->B(s0 + 0));
+ r.W(i) += abs1(d->B(d0 + 1) - s->B(s0 + 1));
+ r.W(i) += abs1(d->B(d0 + 2) - s->B(s0 + 2));
+ r.W(i) += abs1(d->B(d0 + 3) - s->B(s0 + 3));
+ }
+
+ *d = r;
+}
+
+/* SSE4.2 op helpers */
+#define FCMPGTQ(d, s) ((int64_t)d > (int64_t)s ? -1 : 0)
+SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ)
+
+static inline int pcmp_elen(CPUX86State *env, int reg, uint32_t ctrl)
+{
+ int val;
+
+ /* Presence of REX.W is indicated by a bit higher than 7 set */
+ if (ctrl >> 8) {
+ val = abs1((int64_t)env->regs[reg]);
+ } else {
+ val = abs1((int32_t)env->regs[reg]);
+ }
+
+ if (ctrl & 1) {
+ if (val > 8) {
+ return 8;
+ }
+ } else {
+ if (val > 16) {
+ return 16;
+ }
+ }
+ return val;
+}
+
+static inline int pcmp_ilen(Reg *r, uint8_t ctrl)
+{
+ int val = 0;
+
+ if (ctrl & 1) {
+ while (val < 8 && r->W(val)) {
+ val++;
+ }
+ } else {
+ while (val < 16 && r->B(val)) {
+ val++;
+ }
+ }
+
+ return val;
+}
+
+static inline int pcmp_val(Reg *r, uint8_t ctrl, int i)
+{
+ switch ((ctrl >> 0) & 3) {
+ case 0:
+ return r->B(i);
+ case 1:
+ return r->W(i);
+ case 2:
+ return (int8_t)r->B(i);
+ case 3:
+ default:
+ return (int16_t)r->W(i);
+ }
+}
+
+static inline unsigned pcmpxstrx(CPUX86State *env, Reg *d, Reg *s,
+ int8_t ctrl, int valids, int validd)
+{
+ unsigned int res = 0;
+ int v;
+ int j, i;
+ int upper = (ctrl & 1) ? 7 : 15;
+
+ valids--;
+ validd--;
+
+ CC_SRC = (valids < upper ? CC_Z : 0) | (validd < upper ? CC_S : 0);
+
+ switch ((ctrl >> 2) & 3) {
+ case 0:
+ for (j = valids; j >= 0; j--) {
+ res <<= 1;
+ v = pcmp_val(s, ctrl, j);
+ for (i = validd; i >= 0; i--) {
+ res |= (v == pcmp_val(d, ctrl, i));
+ }
+ }
+ break;
+ case 1:
+ for (j = valids; j >= 0; j--) {
+ res <<= 1;
+ v = pcmp_val(s, ctrl, j);
+ for (i = ((validd - 1) | 1); i >= 0; i -= 2) {
+ res |= (pcmp_val(d, ctrl, i - 0) >= v &&
+ pcmp_val(d, ctrl, i - 1) <= v);
+ }
+ }
+ break;
+ case 2:
+ res = (1 << (upper - MAX(valids, validd))) - 1;
+ res <<= MAX(valids, validd) - MIN(valids, validd);
+ for (i = MIN(valids, validd); i >= 0; i--) {
+ res <<= 1;
+ v = pcmp_val(s, ctrl, i);
+ res |= (v == pcmp_val(d, ctrl, i));
+ }
+ break;
+ case 3:
+ for (j = valids; j >= 0; j--) {
+ res <<= 1;
+ v = 1;
+ for (i = MIN(valids - j, validd); i >= 0; i--) {
+ v &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
+ }
+ res |= v;
+ }
+ break;
+ }
+
+ switch ((ctrl >> 4) & 3) {
+ case 1:
+ res ^= (2 << upper) - 1;
+ break;
+ case 3:
+ res ^= (1 << (valids + 1)) - 1;
+ break;
+ }
+
+ if (res) {
+ CC_SRC |= CC_C;
+ }
+ if (res & 1) {
+ CC_SRC |= CC_O;
+ }
+
+ return res;
+}
+
+void glue(helper_pcmpestri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_elen(env, R_EDX, ctrl),
+ pcmp_elen(env, R_EAX, ctrl));
+
+ if (res) {
+ env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res);
+ } else {
+ env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+ }
+}
+
+void glue(helper_pcmpestrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ int i;
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_elen(env, R_EDX, ctrl),
+ pcmp_elen(env, R_EAX, ctrl));
+
+ if ((ctrl >> 6) & 1) {
+ if (ctrl & 1) {
+ for (i = 0; i < 8; i++, res >>= 1) {
+ env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0;
+ }
+ } else {
+ for (i = 0; i < 16; i++, res >>= 1) {
+ env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0;
+ }
+ }
+ } else {
+ env->xmm_regs[0].Q(1) = 0;
+ env->xmm_regs[0].Q(0) = res;
+ }
+}
+
+void glue(helper_pcmpistri, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_ilen(s, ctrl),
+ pcmp_ilen(d, ctrl));
+
+ if (res) {
+ env->regs[R_ECX] = (ctrl & (1 << 6)) ? 31 - clz32(res) : ctz32(res);
+ } else {
+ env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+ }
+}
+
+void glue(helper_pcmpistrm, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ int i;
+ unsigned int res = pcmpxstrx(env, d, s, ctrl,
+ pcmp_ilen(s, ctrl),
+ pcmp_ilen(d, ctrl));
+
+ if ((ctrl >> 6) & 1) {
+ if (ctrl & 1) {
+ for (i = 0; i < 8; i++, res >>= 1) {
+ env->xmm_regs[0].W(i) = (res & 1) ? ~0 : 0;
+ }
+ } else {
+ for (i = 0; i < 16; i++, res >>= 1) {
+ env->xmm_regs[0].B(i) = (res & 1) ? ~0 : 0;
+ }
+ }
+ } else {
+ env->xmm_regs[0].Q(1) = 0;
+ env->xmm_regs[0].Q(0) = res;
+ }
+}
+
+#define CRCPOLY 0x1edc6f41
+#define CRCPOLY_BITREV 0x82f63b78
+target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
+{
+ target_ulong crc = (msg & ((target_ulong) -1 >>
+ (TARGET_LONG_BITS - len))) ^ crc1;
+
+ while (len--) {
+ crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0);
+ }
+
+ return crc;
+}
+
+#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
+#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i)))
+target_ulong helper_popcnt(CPUX86State *env, target_ulong n, uint32_t type)
+{
+ CC_SRC = n ? 0 : CC_Z;
+
+ n = POPCOUNT(n, 0);
+ n = POPCOUNT(n, 1);
+ n = POPCOUNT(n, 2);
+ n = POPCOUNT(n, 3);
+ if (type == 1) {
+ return n & 0xff;
+ }
+
+ n = POPCOUNT(n, 4);
+#ifndef TARGET_X86_64
+ return n;
+#else
+ if (type == 2) {
+ return n & 0xff;
+ }
+
+ return POPCOUNT(n, 5);
+#endif
+}
+
+void glue(helper_pclmulqdq, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ uint64_t ah, al, b, resh, resl;
+
+ ah = 0;
+ al = d->Q((ctrl & 1) != 0);
+ b = s->Q((ctrl & 16) != 0);
+ resh = resl = 0;
+
+ while (b) {
+ if (b & 1) {
+ resl ^= al;
+ resh ^= ah;
+ }
+ ah = (ah << 1) | (al >> 63);
+ al <<= 1;
+ b >>= 1;
+ }
+
+ d->Q(0) = resl;
+ d->Q(1) = resh;
+}
+
+void glue(helper_aesdec, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->L(i) = rk.L(i) ^ bswap32(AES_Td0[st.B(AES_ishifts[4*i+0])] ^
+ AES_Td1[st.B(AES_ishifts[4*i+1])] ^
+ AES_Td2[st.B(AES_ishifts[4*i+2])] ^
+ AES_Td3[st.B(AES_ishifts[4*i+3])]);
+ }
+}
+
+void glue(helper_aesdeclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0; i < 16; i++) {
+ d->B(i) = rk.B(i) ^ (AES_isbox[st.B(AES_ishifts[i])]);
+ }
+}
+
+void glue(helper_aesenc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->L(i) = rk.L(i) ^ bswap32(AES_Te0[st.B(AES_shifts[4*i+0])] ^
+ AES_Te1[st.B(AES_shifts[4*i+1])] ^
+ AES_Te2[st.B(AES_shifts[4*i+2])] ^
+ AES_Te3[st.B(AES_shifts[4*i+3])]);
+ }
+}
+
+void glue(helper_aesenclast, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg st = *d;
+ Reg rk = *s;
+
+ for (i = 0; i < 16; i++) {
+ d->B(i) = rk.B(i) ^ (AES_sbox[st.B(AES_shifts[i])]);
+ }
+
+}
+
+void glue(helper_aesimc, SUFFIX)(CPUX86State *env, Reg *d, Reg *s)
+{
+ int i;
+ Reg tmp = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->L(i) = bswap32(AES_imc[tmp.B(4*i+0)][0] ^
+ AES_imc[tmp.B(4*i+1)][1] ^
+ AES_imc[tmp.B(4*i+2)][2] ^
+ AES_imc[tmp.B(4*i+3)][3]);
+ }
+}
+
+void glue(helper_aeskeygenassist, SUFFIX)(CPUX86State *env, Reg *d, Reg *s,
+ uint32_t ctrl)
+{
+ int i;
+ Reg tmp = *s;
+
+ for (i = 0 ; i < 4 ; i++) {
+ d->B(i) = AES_sbox[tmp.B(i + 4)];
+ d->B(i + 8) = AES_sbox[tmp.B(i + 12)];
+ }
+ d->L(1) = (d->L(0) << 24 | d->L(0) >> 8) ^ ctrl;
+ d->L(3) = (d->L(2) << 24 | d->L(2) >> 8) ^ ctrl;
+}
+#endif
+
+#undef SHIFT
+#undef XMM_ONLY
+#undef Reg
+#undef B
+#undef W
+#undef L
+#undef Q
+#undef SUFFIX
diff --git a/target/i386/ops_sse_header.h b/target/i386/ops_sse_header.h
new file mode 100644
index 0000000000..64c5857cf4
--- /dev/null
+++ b/target/i386/ops_sse_header.h
@@ -0,0 +1,360 @@
+/*
+ * MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI support
+ *
+ * Copyright (c) 2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#if SHIFT == 0
+#define Reg MMXReg
+#define SUFFIX _mmx
+#else
+#define Reg ZMMReg
+#define SUFFIX _xmm
+#endif
+
+#define dh_alias_Reg ptr
+#define dh_alias_ZMMReg ptr
+#define dh_alias_MMXReg ptr
+#define dh_ctype_Reg Reg *
+#define dh_ctype_ZMMReg ZMMReg *
+#define dh_ctype_MMXReg MMXReg *
+#define dh_is_signed_Reg dh_is_signed_ptr
+#define dh_is_signed_ZMMReg dh_is_signed_ptr
+#define dh_is_signed_MMXReg dh_is_signed_ptr
+
+DEF_HELPER_3(glue(psrlw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psraw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psllw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrad, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pslld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psrlq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psllq, SUFFIX), void, env, Reg, Reg)
+
+#if SHIFT == 1
+DEF_HELPER_3(glue(psrldq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pslldq, SUFFIX), void, env, Reg, Reg)
+#endif
+
+#define SSE_HELPER_B(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_W(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_L(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+#define SSE_HELPER_Q(name, F)\
+ DEF_HELPER_3(glue(name, SUFFIX), void, env, Reg, Reg)
+
+SSE_HELPER_B(paddb, FADD)
+SSE_HELPER_W(paddw, FADD)
+SSE_HELPER_L(paddl, FADD)
+SSE_HELPER_Q(paddq, FADD)
+
+SSE_HELPER_B(psubb, FSUB)
+SSE_HELPER_W(psubw, FSUB)
+SSE_HELPER_L(psubl, FSUB)
+SSE_HELPER_Q(psubq, FSUB)
+
+SSE_HELPER_B(paddusb, FADDUB)
+SSE_HELPER_B(paddsb, FADDSB)
+SSE_HELPER_B(psubusb, FSUBUB)
+SSE_HELPER_B(psubsb, FSUBSB)
+
+SSE_HELPER_W(paddusw, FADDUW)
+SSE_HELPER_W(paddsw, FADDSW)
+SSE_HELPER_W(psubusw, FSUBUW)
+SSE_HELPER_W(psubsw, FSUBSW)
+
+SSE_HELPER_B(pminub, FMINUB)
+SSE_HELPER_B(pmaxub, FMAXUB)
+
+SSE_HELPER_W(pminsw, FMINSW)
+SSE_HELPER_W(pmaxsw, FMAXSW)
+
+SSE_HELPER_Q(pand, FAND)
+SSE_HELPER_Q(pandn, FANDN)
+SSE_HELPER_Q(por, FOR)
+SSE_HELPER_Q(pxor, FXOR)
+
+SSE_HELPER_B(pcmpgtb, FCMPGTB)
+SSE_HELPER_W(pcmpgtw, FCMPGTW)
+SSE_HELPER_L(pcmpgtl, FCMPGTL)
+
+SSE_HELPER_B(pcmpeqb, FCMPEQ)
+SSE_HELPER_W(pcmpeqw, FCMPEQ)
+SSE_HELPER_L(pcmpeql, FCMPEQ)
+
+SSE_HELPER_W(pmullw, FMULLW)
+#if SHIFT == 0
+SSE_HELPER_W(pmulhrw, FMULHRW)
+#endif
+SSE_HELPER_W(pmulhuw, FMULHUW)
+SSE_HELPER_W(pmulhw, FMULHW)
+
+SSE_HELPER_B(pavgb, FAVG)
+SSE_HELPER_W(pavgw, FAVG)
+
+DEF_HELPER_3(glue(pmuludq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaddwd, SUFFIX), void, env, Reg, Reg)
+
+DEF_HELPER_3(glue(psadbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(maskmov, SUFFIX), void, env, Reg, Reg, tl)
+DEF_HELPER_2(glue(movl_mm_T0, SUFFIX), void, Reg, i32)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(glue(movq_mm_T0, SUFFIX), void, Reg, i64)
+#endif
+
+#if SHIFT == 0
+DEF_HELPER_3(glue(pshufw, SUFFIX), void, Reg, Reg, int)
+#else
+DEF_HELPER_3(shufps, void, Reg, Reg, int)
+DEF_HELPER_3(shufpd, void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshufd, SUFFIX), void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshuflw, SUFFIX), void, Reg, Reg, int)
+DEF_HELPER_3(glue(pshufhw, SUFFIX), void, Reg, Reg, int)
+#endif
+
+#if SHIFT == 1
+/* FPU ops */
+/* XXX: not accurate */
+
+#define SSE_HELPER_S(name, F) \
+ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
+
+SSE_HELPER_S(add, FPU_ADD)
+SSE_HELPER_S(sub, FPU_SUB)
+SSE_HELPER_S(mul, FPU_MUL)
+SSE_HELPER_S(div, FPU_DIV)
+SSE_HELPER_S(min, FPU_MIN)
+SSE_HELPER_S(max, FPU_MAX)
+SSE_HELPER_S(sqrt, FPU_SQRT)
+
+
+DEF_HELPER_3(cvtps2pd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtpd2ps, void, env, Reg, Reg)
+DEF_HELPER_3(cvtss2sd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtsd2ss, void, env, Reg, Reg)
+DEF_HELPER_3(cvtdq2ps, void, env, Reg, Reg)
+DEF_HELPER_3(cvtdq2pd, void, env, Reg, Reg)
+DEF_HELPER_3(cvtpi2ps, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtpi2pd, void, env, ZMMReg, MMXReg)
+DEF_HELPER_3(cvtsi2ss, void, env, ZMMReg, i32)
+DEF_HELPER_3(cvtsi2sd, void, env, ZMMReg, i32)
+
+#ifdef TARGET_X86_64
+DEF_HELPER_3(cvtsq2ss, void, env, ZMMReg, i64)
+DEF_HELPER_3(cvtsq2sd, void, env, ZMMReg, i64)
+#endif
+
+DEF_HELPER_3(cvtps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvtps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvtpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvtss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvtsd2si, s32, env, ZMMReg)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cvtss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvtsd2sq, s64, env, ZMMReg)
+#endif
+
+DEF_HELPER_3(cvttps2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttpd2dq, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(cvttps2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_3(cvttpd2pi, void, env, MMXReg, ZMMReg)
+DEF_HELPER_2(cvttss2si, s32, env, ZMMReg)
+DEF_HELPER_2(cvttsd2si, s32, env, ZMMReg)
+#ifdef TARGET_X86_64
+DEF_HELPER_2(cvttss2sq, s64, env, ZMMReg)
+DEF_HELPER_2(cvttsd2sq, s64, env, ZMMReg)
+#endif
+
+DEF_HELPER_3(rsqrtps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rsqrtss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(rcpss, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(extrq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(extrq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(insertq_r, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_4(insertq_i, void, env, ZMMReg, int, int)
+DEF_HELPER_3(haddps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(haddpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(hsubpd, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubps, void, env, ZMMReg, ZMMReg)
+DEF_HELPER_3(addsubpd, void, env, ZMMReg, ZMMReg)
+
+#define SSE_HELPER_CMP(name, F) \
+ DEF_HELPER_3(name ## ps, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## ss, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## pd, void, env, Reg, Reg) \
+ DEF_HELPER_3(name ## sd, void, env, Reg, Reg)
+
+SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
+SSE_HELPER_CMP(cmplt, FPU_CMPLT)
+SSE_HELPER_CMP(cmple, FPU_CMPLE)
+SSE_HELPER_CMP(cmpunord, FPU_CMPUNORD)
+SSE_HELPER_CMP(cmpneq, FPU_CMPNEQ)
+SSE_HELPER_CMP(cmpnlt, FPU_CMPNLT)
+SSE_HELPER_CMP(cmpnle, FPU_CMPNLE)
+SSE_HELPER_CMP(cmpord, FPU_CMPORD)
+
+DEF_HELPER_3(ucomiss, void, env, Reg, Reg)
+DEF_HELPER_3(comiss, void, env, Reg, Reg)
+DEF_HELPER_3(ucomisd, void, env, Reg, Reg)
+DEF_HELPER_3(comisd, void, env, Reg, Reg)
+DEF_HELPER_2(movmskps, i32, env, Reg)
+DEF_HELPER_2(movmskpd, i32, env, Reg)
+#endif
+
+DEF_HELPER_2(glue(pmovmskb, SUFFIX), i32, env, Reg)
+DEF_HELPER_3(glue(packsswb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packuswb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packssdw, SUFFIX), void, env, Reg, Reg)
+#define UNPCK_OP(base_name, base) \
+ DEF_HELPER_3(glue(punpck ## base_name ## bw, SUFFIX), void, env, Reg, Reg) \
+ DEF_HELPER_3(glue(punpck ## base_name ## wd, SUFFIX), void, env, Reg, Reg) \
+ DEF_HELPER_3(glue(punpck ## base_name ## dq, SUFFIX), void, env, Reg, Reg)
+
+UNPCK_OP(l, 0)
+UNPCK_OP(h, 1)
+
+#if SHIFT == 1
+DEF_HELPER_3(glue(punpcklqdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(punpckhqdq, SUFFIX), void, env, Reg, Reg)
+#endif
+
+/* 3DNow! float ops */
+#if SHIFT == 0
+DEF_HELPER_3(pi2fd, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pi2fw, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pf2id, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pf2iw, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfadd, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpeq, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpge, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfcmpgt, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmax, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmin, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfmul, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfnacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfpnacc, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfrcp, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfrsqrt, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfsub, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pfsubr, void, env, MMXReg, MMXReg)
+DEF_HELPER_3(pswapd, void, env, MMXReg, MMXReg)
+#endif
+
+/* SSSE3 op helpers */
+DEF_HELPER_3(glue(phaddw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phaddd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phaddsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phsubsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pabsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaddubsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmulhrsw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pshufb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(psignd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(palignr, SUFFIX), void, env, Reg, Reg, s32)
+
+/* SSE4.1 op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(pblendvb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(blendvps, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(blendvpd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(ptest, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxbq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxwd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxwq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovsxdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxbq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxwd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxwq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmovzxdq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmuldq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pcmpeqq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(packusdw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pminud, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxsb, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxsd, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmaxud, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(pmulld, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(phminposuw, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(roundps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundpd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundss, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(roundsd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(blendps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(blendpd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pblendw, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(dpps, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(dppd, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(mpsadbw, SUFFIX), void, env, Reg, Reg, i32)
+#endif
+
+/* SSE4.2 op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(pcmpgtq, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(pcmpestri, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpestrm, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpistri, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pcmpistrm, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_3(crc32, tl, i32, tl, i32)
+DEF_HELPER_3(popcnt, tl, env, tl, i32)
+#endif
+
+/* AES-NI op helpers */
+#if SHIFT == 1
+DEF_HELPER_3(glue(aesdec, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesdeclast, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesenc, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesenclast, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_3(glue(aesimc, SUFFIX), void, env, Reg, Reg)
+DEF_HELPER_4(glue(aeskeygenassist, SUFFIX), void, env, Reg, Reg, i32)
+DEF_HELPER_4(glue(pclmulqdq, SUFFIX), void, env, Reg, Reg, i32)
+#endif
+
+#undef SHIFT
+#undef Reg
+#undef SUFFIX
+
+#undef SSE_HELPER_B
+#undef SSE_HELPER_W
+#undef SSE_HELPER_L
+#undef SSE_HELPER_Q
+#undef SSE_HELPER_S
+#undef SSE_HELPER_CMP
+#undef UNPCK_OP
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c
new file mode 100644
index 0000000000..fb79f3180d
--- /dev/null
+++ b/target/i386/seg_helper.c
@@ -0,0 +1,2568 @@
+/*
+ * x86 segmentation related helpers:
+ * TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+
+//#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
+# define LOG_PCALL_STATE(cpu) \
+ log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
+#else
+# define LOG_PCALL(...) do { } while (0)
+# define LOG_PCALL_STATE(cpu) do { } while (0)
+#endif
+
+#ifdef CONFIG_USER_ONLY
+#define MEMSUFFIX _kernel
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_useronly_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_useronly_template.h"
+#undef MEMSUFFIX
+#else
+#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
+#define MEMSUFFIX _kernel
+#define DATA_SIZE 1
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 2
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 4
+#include "exec/cpu_ldst_template.h"
+
+#define DATA_SIZE 8
+#include "exec/cpu_ldst_template.h"
+#undef CPU_MMU_INDEX
+#undef MEMSUFFIX
+#endif
+
+/* return non zero if error */
+static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector,
+ uintptr_t retaddr)
+{
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ if (selector & 0x4) {
+ dt = &env->ldt;
+ } else {
+ dt = &env->gdt;
+ }
+ index = selector & ~7;
+ if ((index + 7) > dt->limit) {
+ return -1;
+ }
+ ptr = dt->base + index;
+ *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
+ *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ return 0;
+}
+
+static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector)
+{
+ return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
+}
+
+static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
+{
+ unsigned int limit;
+
+ limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & DESC_G_MASK) {
+ limit = (limit << 12) | 0xfff;
+ }
+ return limit;
+}
+
+static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
+{
+ return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
+}
+
+static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
+ uint32_t e2)
+{
+ sc->base = get_seg_base(e1, e2);
+ sc->limit = get_seg_limit(e1, e2);
+ sc->flags = e2;
+}
+
+/* init the segment cache in vm86 mode. */
+static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
+{
+ selector &= 0xffff;
+
+ cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (3 << DESC_DPL_SHIFT));
+}
+
+static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
+ uint32_t *esp_ptr, int dpl,
+ uintptr_t retaddr)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ int type, index, shift;
+
+#if 0
+ {
+ int i;
+ printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
+ for (i = 0; i < env->tr.limit; i++) {
+ printf("%02x ", env->tr.base[i]);
+ if ((i & 7) == 7) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+ }
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK)) {
+ cpu_abort(CPU(cpu), "invalid tss");
+ }
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1) {
+ cpu_abort(CPU(cpu), "invalid tss type");
+ }
+ shift = type >> 3;
+ index = (dpl * 4 + 2) << shift;
+ if (index + (4 << shift) - 1 > env->tr.limit) {
+ raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
+ }
+ if (shift == 0) {
+ *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
+ *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
+ } else {
+ *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
+ *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
+ }
+}
+
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
+ uintptr_t retaddr)
+{
+ uint32_t e1, e2;
+ int rpl, dpl;
+
+ if ((selector & 0xfffc) != 0) {
+ if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (seg_reg == R_CS) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ if (dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ } else if (seg_reg == R_SS) {
+ /* SS must be writable data */
+ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ if (dpl != cpl || dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ } else {
+ /* not readable code */
+ if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ /* if data or non conforming code, checks the rights */
+ if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ } else {
+ if (seg_reg == R_SS || seg_reg == R_CS) {
+ raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
+ }
+ }
+}
+
+#define SWITCH_TSS_JMP 0
+#define SWITCH_TSS_IRET 1
+#define SWITCH_TSS_CALL 2
+
+/* XXX: restore CPU state in registers (PowerPC case) */
+static void switch_tss_ra(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip, uintptr_t retaddr)
+{
+ int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
+ target_ulong tss_base;
+ uint32_t new_regs[8], new_segs[6];
+ uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
+ uint32_t old_eflags, eflags_mask;
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
+ source);
+
+ /* if task gate, we read the TSS segment and we load it */
+ if (type == 5) {
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
+ }
+ tss_selector = e1 >> 16;
+ if (tss_selector & 4) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+ }
+ if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+ }
+ if (e2 & DESC_S_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1) {
+ raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
+ }
+
+ if (type & 8) {
+ tss_limit_max = 103;
+ } else {
+ tss_limit_max = 43;
+ }
+ tss_limit = get_seg_limit(e1, e2);
+ tss_base = get_seg_base(e1, e2);
+ if ((tss_selector & 4) != 0 ||
+ tss_limit < tss_limit_max) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
+ }
+ old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if (old_type & 8) {
+ old_tss_limit_max = 103;
+ } else {
+ old_tss_limit_max = 43;
+ }
+
+ /* read all the registers from the new TSS */
+ if (type & 8) {
+ /* 32 bit */
+ new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
+ new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
+ new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
+ for (i = 0; i < 8; i++) {
+ new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
+ retaddr);
+ }
+ for (i = 0; i < 6; i++) {
+ new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
+ retaddr);
+ }
+ new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
+ new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
+ } else {
+ /* 16 bit */
+ new_cr3 = 0;
+ new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
+ new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
+ for (i = 0; i < 8; i++) {
+ new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
+ retaddr) | 0xffff0000;
+ }
+ for (i = 0; i < 4; i++) {
+ new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
+ retaddr);
+ }
+ new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
+ new_segs[R_FS] = 0;
+ new_segs[R_GS] = 0;
+ new_trap = 0;
+ }
+ /* XXX: avoid a compiler warning, see
+ http://support.amd.com/us/Processor_TechDocs/24593.pdf
+ chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
+ (void)new_trap;
+
+ /* NOTE: we must avoid memory exceptions during the task switch,
+ so we make dummy accesses before */
+ /* XXX: it can still fail in some cases, so a bigger hack is
+ necessary to valid the TLB after having done the accesses */
+
+ v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
+ v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
+ cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
+ cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
+
+ /* clear busy bit (it is restartable) */
+ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
+ target_ulong ptr;
+ uint32_t e2;
+
+ ptr = env->gdt.base + (env->tr.selector & ~7);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ e2 &= ~DESC_TSS_BUSY_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+ }
+ old_eflags = cpu_compute_eflags(env);
+ if (source == SWITCH_TSS_IRET) {
+ old_eflags &= ~NT_MASK;
+ }
+
+ /* save the current state in the old TSS */
+ if (type & 8) {
+ /* 32 bit */
+ cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
+ cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
+ for (i = 0; i < 6; i++) {
+ cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
+ env->segs[i].selector, retaddr);
+ }
+ } else {
+ /* 16 bit */
+ cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
+ cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
+ for (i = 0; i < 4; i++) {
+ cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
+ env->segs[i].selector, retaddr);
+ }
+ }
+
+ /* now if an exception occurs, it will occurs in the next task
+ context */
+
+ if (source == SWITCH_TSS_CALL) {
+ cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
+ new_eflags |= NT_MASK;
+ }
+
+ /* set busy bit */
+ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
+ target_ulong ptr;
+ uint32_t e2;
+
+ ptr = env->gdt.base + (tss_selector & ~7);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ e2 |= DESC_TSS_BUSY_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
+ }
+
+ /* set the new CPU state */
+ /* from this point, any exception which occurs can give problems */
+ env->cr[0] |= CR0_TS_MASK;
+ env->hflags |= HF_TS_MASK;
+ env->tr.selector = tss_selector;
+ env->tr.base = tss_base;
+ env->tr.limit = tss_limit;
+ env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
+
+ if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
+ cpu_x86_update_cr3(env, new_cr3);
+ }
+
+ /* load all registers without an exception, then reload them with
+ possible exception */
+ env->eip = new_eip;
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK |
+ IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
+ if (!(type & 8)) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ /* XXX: what to do in 16 bit case? */
+ env->regs[R_EAX] = new_regs[0];
+ env->regs[R_ECX] = new_regs[1];
+ env->regs[R_EDX] = new_regs[2];
+ env->regs[R_EBX] = new_regs[3];
+ env->regs[R_ESP] = new_regs[4];
+ env->regs[R_EBP] = new_regs[5];
+ env->regs[R_ESI] = new_regs[6];
+ env->regs[R_EDI] = new_regs[7];
+ if (new_eflags & VM_MASK) {
+ for (i = 0; i < 6; i++) {
+ load_seg_vm(env, i, new_segs[i]);
+ }
+ } else {
+ /* first just selectors as the rest may trigger exceptions */
+ for (i = 0; i < 6; i++) {
+ cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
+ }
+ }
+
+ env->ldt.selector = new_ldt & ~4;
+ env->ldt.base = 0;
+ env->ldt.limit = 0;
+ env->ldt.flags = 0;
+
+ /* load the LDT */
+ if (new_ldt & 4) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+
+ if ((new_ldt & 0xfffc) != 0) {
+ dt = &env->gdt;
+ index = new_ldt & ~7;
+ if ((index + 7) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
+ }
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+
+ /* load the segments */
+ if (!(new_eflags & VM_MASK)) {
+ int cpl = new_segs[R_CS] & 3;
+ tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
+ tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
+ tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
+ tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
+ tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
+ tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
+ }
+
+ /* check that env->eip is in the CS segment limits */
+ if (new_eip > env->segs[R_CS].limit) {
+ /* XXX: different exception if CALL? */
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* reset local breakpoints */
+ if (env->dr[7] & DR7_LOCAL_BP_MASK) {
+ cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
+ }
+#endif
+}
+
+static void switch_tss(CPUX86State *env, int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip)
+{
+ switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
+}
+
+static inline unsigned int get_sp_mask(unsigned int e2)
+{
+ if (e2 & DESC_B_MASK) {
+ return 0xffffffff;
+ } else {
+ return 0xffff;
+ }
+}
+
+static int exception_has_error_code(int intno)
+{
+ switch (intno) {
+ case 8:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 17:
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef TARGET_X86_64
+#define SET_ESP(val, sp_mask) \
+ do { \
+ if ((sp_mask) == 0xffff) { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
+ ((val) & 0xffff); \
+ } else if ((sp_mask) == 0xffffffffLL) { \
+ env->regs[R_ESP] = (uint32_t)(val); \
+ } else { \
+ env->regs[R_ESP] = (val); \
+ } \
+ } while (0)
+#else
+#define SET_ESP(val, sp_mask) \
+ do { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
+ ((val) & (sp_mask)); \
+ } while (0)
+#endif
+
+/* in 64-bit machines, this can overflow. So this segment addition macro
+ * can be used to trim the value to 32-bit whenever needed */
+#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
+
+/* XXX: add a is_user flag to have proper security support */
+#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ sp -= 2; \
+ cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
+ }
+
+#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ sp -= 4; \
+ cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
+ }
+
+#define POPW_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
+ sp += 2; \
+ }
+
+#define POPL_RA(ssp, sp, sp_mask, val, ra) \
+ { \
+ val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
+ sp += 4; \
+ }
+
+#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
+#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
+#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
+#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
+
+/* protected mode interrupt */
+static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip,
+ int is_hw)
+{
+ SegmentCache *dt;
+ target_ulong ptr, ssp;
+ int type, dpl, selector, ss_dpl, cpl;
+ int has_error_code, new_stack, shift;
+ uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
+ uint32_t old_eip, sp_mask;
+ int vm86 = env->eflags & VM_MASK;
+
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ has_error_code = exception_has_error_code(intno);
+ }
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+
+ dt = &env->idt;
+ if (intno * 8 + 7 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ ptr = dt->base + intno * 8;
+ e1 = cpu_ldl_kernel(env, ptr);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch (type) {
+ case 5: /* task gate */
+ /* must do that check here to return the correct error code */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+ }
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+ if (has_error_code) {
+ int type;
+ uint32_t mask;
+
+ /* push the error code */
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ shift = type >> 3;
+ if (env->segs[R_SS].flags & DESC_B_MASK) {
+ mask = 0xffffffff;
+ } else {
+ mask = 0xffff;
+ }
+ esp = (env->regs[R_ESP] - (2 << shift)) & mask;
+ ssp = env->segs[R_SS].base + esp;
+ if (shift) {
+ cpu_stl_kernel(env, ssp, error_code);
+ } else {
+ cpu_stw_kernel(env, ssp, error_code);
+ }
+ SET_ESP(esp, mask);
+ }
+ return;
+ case 6: /* 286 interrupt gate */
+ case 7: /* 286 trap gate */
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+ }
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ if (load_segment(env, &e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner privilege */
+ get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ new_stack = 1;
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
+ } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+ /* to same privilege */
+ if (vm86) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ new_stack = 0;
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ esp = env->regs[R_ESP];
+ dpl = cpl;
+ } else {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ new_stack = 0; /* avoid warning */
+ sp_mask = 0; /* avoid warning */
+ ssp = 0; /* avoid warning */
+ esp = 0; /* avoid warning */
+ }
+
+ shift = type >> 3;
+
+#if 0
+ /* XXX: check that enough room is available */
+ push_size = 6 + (new_stack << 2) + (has_error_code << 1);
+ if (vm86) {
+ push_size += 8;
+ }
+ push_size <<= shift;
+#endif
+ if (shift == 1) {
+ if (new_stack) {
+ if (vm86) {
+ PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ }
+ PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
+ PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
+ }
+ PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
+ PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
+ PUSHL(ssp, esp, sp_mask, old_eip);
+ if (has_error_code) {
+ PUSHL(ssp, esp, sp_mask, error_code);
+ }
+ } else {
+ if (new_stack) {
+ if (vm86) {
+ PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ }
+ PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
+ PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
+ }
+ PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
+ PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
+ PUSHW(ssp, esp, sp_mask, old_eip);
+ if (has_error_code) {
+ PUSHW(ssp, esp, sp_mask, error_code);
+ }
+ }
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
+ if (new_stack) {
+ if (vm86) {
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
+ }
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
+ }
+ SET_ESP(esp, sp_mask);
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ env->eip = offset;
+}
+
+#ifdef TARGET_X86_64
+
+#define PUSHQ_RA(sp, val, ra) \
+ { \
+ sp -= 8; \
+ cpu_stq_kernel_ra(env, sp, (val), ra); \
+ }
+
+#define POPQ_RA(sp, val, ra) \
+ { \
+ val = cpu_ldq_kernel_ra(env, sp, ra); \
+ sp += 8; \
+ }
+
+#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
+#define POPQ(sp, val) POPQ_RA(sp, val, 0)
+
+static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ int index;
+
+#if 0
+ printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
+ env->tr.base, env->tr.limit);
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK)) {
+ cpu_abort(CPU(cpu), "invalid tss");
+ }
+ index = 8 * level + 4;
+ if ((index + 7) > env->tr.limit) {
+ raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
+ }
+ return cpu_ldq_kernel(env, env->tr.base + index);
+}
+
+/* 64 bit interrupt */
+static void do_interrupt64(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
+{
+ SegmentCache *dt;
+ target_ulong ptr;
+ int type, dpl, selector, cpl, ist;
+ int has_error_code, new_stack;
+ uint32_t e1, e2, e3, ss;
+ target_ulong old_eip, esp, offset;
+
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ has_error_code = exception_has_error_code(intno);
+ }
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+
+ dt = &env->idt;
+ if (intno * 16 + 15 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ }
+ ptr = dt->base + intno * 16;
+ e1 = cpu_ldl_kernel(env, ptr);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
+ e3 = cpu_ldl_kernel(env, ptr + 8);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch (type) {
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
+ }
+ selector = e1 >> 16;
+ offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ ist = e2 & 7;
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+
+ if (load_segment(env, &e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
+ /* to inner privilege */
+ new_stack = 1;
+ esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
+ ss = 0;
+ } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+ /* to same privilege */
+ if (env->eflags & VM_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ new_stack = 0;
+ esp = env->regs[R_ESP];
+ dpl = cpl;
+ } else {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ new_stack = 0; /* avoid warning */
+ esp = 0; /* avoid warning */
+ }
+ esp &= ~0xfLL; /* align stack */
+
+ PUSHQ(esp, env->segs[R_SS].selector);
+ PUSHQ(esp, env->regs[R_ESP]);
+ PUSHQ(esp, cpu_compute_eflags(env));
+ PUSHQ(esp, env->segs[R_CS].selector);
+ PUSHQ(esp, old_eip);
+ if (has_error_code) {
+ PUSHQ(esp, error_code);
+ }
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+
+ if (new_stack) {
+ ss = 0 | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
+ }
+ env->regs[R_ESP] = esp;
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ env->eip = offset;
+}
+#endif
+
+#ifdef TARGET_X86_64
+#if defined(CONFIG_USER_ONLY)
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->exception_index = EXCP_SYSCALL;
+ env->exception_next_eip = env->eip + next_eip_addend;
+ cpu_loop_exit(cs);
+}
+#else
+void helper_syscall(CPUX86State *env, int next_eip_addend)
+{
+ int selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ selector = (env->star >> 32) & 0xffff;
+ if (env->hflags & HF_LMA_MASK) {
+ int code64;
+
+ env->regs[R_ECX] = env->eip + next_eip_addend;
+ env->regs[11] = cpu_compute_eflags(env);
+
+ code64 = env->hflags & HF_CS64_MASK;
+
+ env->eflags &= ~env->fmask;
+ cpu_load_eflags(env, env->eflags, 0);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ if (code64) {
+ env->eip = env->lstar;
+ } else {
+ env->eip = env->cstar;
+ }
+ } else {
+ env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
+
+ env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->star;
+ }
+}
+#endif
+#endif
+
+#ifdef TARGET_X86_64
+void helper_sysret(CPUX86State *env, int dflag)
+{
+ int cpl, selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ selector = (env->star >> 48) & 0xffff;
+ if (env->hflags & HF_LMA_MASK) {
+ cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
+ | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
+ NT_MASK);
+ if (dflag == 2) {
+ cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ env->eip = env->regs[R_ECX];
+ } else {
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->regs[R_ECX];
+ }
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ } else {
+ env->eflags |= IF_MASK;
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)env->regs[R_ECX];
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ }
+}
+#endif
+
+/* real mode interrupt */
+static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip)
+{
+ SegmentCache *dt;
+ target_ulong ptr, ssp;
+ int selector;
+ uint32_t offset, esp;
+ uint32_t old_cs, old_eip;
+
+ /* real mode (simpler!) */
+ dt = &env->idt;
+ if (intno * 4 + 3 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ ptr = dt->base + intno * 4;
+ offset = cpu_lduw_kernel(env, ptr);
+ selector = cpu_lduw_kernel(env, ptr + 2);
+ esp = env->regs[R_ESP];
+ ssp = env->segs[R_SS].base;
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+ old_cs = env->segs[R_CS].selector;
+ /* XXX: use SS segment size? */
+ PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
+ PUSHW(ssp, esp, 0xffff, old_cs);
+ PUSHW(ssp, esp, 0xffff, old_eip);
+
+ /* update processor state */
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
+ env->eip = offset;
+ env->segs[R_CS].selector = selector;
+ env->segs[R_CS].base = (selector << 4);
+ env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
+}
+
+#if defined(CONFIG_USER_ONLY)
+/* fake user mode interrupt. is_int is TRUE if coming from the int
+ * instruction. next_eip is the env->eip value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE or if intno
+ * is EXCP_SYSCALL.
+ */
+static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip)
+{
+ if (is_int) {
+ SegmentCache *dt;
+ target_ulong ptr;
+ int dpl, cpl, shift;
+ uint32_t e2;
+
+ dt = &env->idt;
+ if (env->hflags & HF_LMA_MASK) {
+ shift = 4;
+ } else {
+ shift = 3;
+ }
+ ptr = dt->base + (intno << shift);
+ e2 = cpu_ldl_kernel(env, ptr + 4);
+
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
+ }
+ }
+
+ /* Since we emulate only user space, we cannot do more than
+ exiting the emulation with the suitable exception and error
+ code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
+ if (is_int || intno == EXCP_SYSCALL) {
+ env->eip = next_eip;
+ }
+}
+
+#else
+
+static void handle_even_inj(CPUX86State *env, int intno, int is_int,
+ int error_code, int is_hw, int rm)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+
+ if (!(event_inj & SVM_EVTINJ_VALID)) {
+ int type;
+
+ if (is_int) {
+ type = SVM_EVTINJ_TYPE_SOFT;
+ } else {
+ type = SVM_EVTINJ_TYPE_EXEPT;
+ }
+ event_inj = intno | type | SVM_EVTINJ_VALID;
+ if (!rm && exception_has_error_code(intno)) {
+ event_inj |= SVM_EVTINJ_VALID_ERR;
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err),
+ error_code);
+ }
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj);
+ }
+}
+#endif
+
+/*
+ * Begin execution of an interruption. is_int is TRUE if coming from
+ * the int instruction. next_eip is the env->eip value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE.
+ */
+static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
+{
+ CPUX86State *env = &cpu->env;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ if ((env->cr[0] & CR0_PE_MASK)) {
+ static int count;
+
+ qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
+ " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
+ count, intno, error_code, is_int,
+ env->hflags & HF_CPL_MASK,
+ env->segs[R_CS].selector, env->eip,
+ (int)env->segs[R_CS].base + env->eip,
+ env->segs[R_SS].selector, env->regs[R_ESP]);
+ if (intno == 0x0e) {
+ qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
+ } else {
+ qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
+ }
+ qemu_log("\n");
+ log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
+#if 0
+ {
+ int i;
+ target_ulong ptr;
+
+ qemu_log(" code=");
+ ptr = env->segs[R_CS].base + env->eip;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+ }
+ if (env->cr[0] & CR0_PE_MASK) {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_SVMI_MASK) {
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
+ }
+#endif
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
+ } else
+#endif
+ {
+ do_interrupt_protected(env, intno, is_int, error_code, next_eip,
+ is_hw);
+ }
+ } else {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_SVMI_MASK) {
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
+ }
+#endif
+ do_interrupt_real(env, intno, is_int, error_code, next_eip);
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_SVMI_MASK) {
+ CPUState *cs = CPU(cpu);
+ uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj));
+
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj & ~SVM_EVTINJ_VALID);
+ }
+#endif
+}
+
+void x86_cpu_do_interrupt(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+#if defined(CONFIG_USER_ONLY)
+ /* if user mode only, we simulate a fake exception
+ which will be handled outside the cpu execution
+ loop */
+ do_interrupt_user(env, cs->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip);
+ /* successfully delivered */
+ env->old_exception = -1;
+#else
+ /* simulate a real cpu exception. On i386, it can
+ trigger new exceptions, but we do not handle
+ double or triple faults yet. */
+ do_interrupt_all(cpu, cs->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip, 0);
+ /* successfully delivered */
+ env->old_exception = -1;
+#endif
+}
+
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
+{
+ do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
+}
+
+bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ bool ret = false;
+
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(cpu->apic_state);
+ /* Don't process multiple interrupt requests in a single call.
+ This is required to make icount-driven execution deterministic. */
+ return true;
+ }
+#endif
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
+ do_cpu_sipi(cpu);
+ } else if (env->hflags2 & HF2_GIF_MASK) {
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
+ cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ do_smm_enter(cpu);
+ ret = true;
+ } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(env->hflags2 & HF2_NMI_MASK)) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ env->hflags2 |= HF2_NMI_MASK;
+ do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
+ ret = true;
+ } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
+ ret = true;
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
+ (env->hflags2 & HF2_HIF_MASK)) ||
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
+ int intno;
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
+ cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_VIRQ);
+ intno = cpu_get_pic_interrupt(env);
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ /* ensure that no TB jump will be modified as
+ the program flow was changed */
+ ret = true;
+#if !defined(CONFIG_USER_ONLY)
+ } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
+ int intno;
+ /* FIXME: this should respect TPR */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
+ intno = x86_ldl_phys(cs, env->vm_vmcb
+ + offsetof(struct vmcb, control.int_vector));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM,
+ "Servicing virtual hardware INT=0x%02x\n", intno);
+ do_interrupt_x86_hardirq(env, intno, 1);
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ ret = true;
+#endif
+ }
+ }
+
+ return ret;
+}
+
+void helper_lldt(CPUX86State *env, int selector)
+{
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, entry_limit;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* XXX: NULL selector case: invalid LDT */
+ env->ldt.base = 0;
+ env->ldt.limit = 0;
+ } else {
+ if (selector & 0x4) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ dt = &env->gdt;
+ index = selector & ~7;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ entry_limit = 15;
+ } else
+#endif
+ {
+ entry_limit = 7;
+ }
+ if ((index + entry_limit) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint32_t e3;
+
+ e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ env->ldt.base |= (target_ulong)e3 << 32;
+ } else
+#endif
+ {
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+ }
+ env->ldt.selector = selector;
+}
+
+void helper_ltr(CPUX86State *env, int selector)
+{
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, type, entry_limit;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* NULL selector case: invalid TR */
+ env->tr.base = 0;
+ env->tr.limit = 0;
+ env->tr.flags = 0;
+ } else {
+ if (selector & 0x4) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ dt = &env->gdt;
+ index = selector & ~7;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ entry_limit = 15;
+ } else
+#endif
+ {
+ entry_limit = 7;
+ }
+ if ((index + entry_limit) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((e2 & DESC_S_MASK) ||
+ (type != 1 && type != 9)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint32_t e3, e4;
+
+ e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
+ e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
+ if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ env->tr.base |= (target_ulong)e3 << 32;
+ } else
+#endif
+ {
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ }
+ e2 |= DESC_TSS_BUSY_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
+ }
+ env->tr.selector = selector;
+}
+
+/* only works if protected mode and not VM86. seg_reg must be != R_CS */
+void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
+{
+ uint32_t e1, e2;
+ int cpl, dpl, rpl;
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ cpl = env->hflags & HF_CPL_MASK;
+ if ((selector & 0xfffc) == 0) {
+ /* null selector case */
+ if (seg_reg == R_SS
+#ifdef TARGET_X86_64
+ && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
+#endif
+ ) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
+ } else {
+
+ if (selector & 0x4) {
+ dt = &env->ldt;
+ } else {
+ dt = &env->gdt;
+ }
+ index = selector & ~7;
+ if ((index + 7) > dt->limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ ptr = dt->base + index;
+ e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
+ e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
+
+ if (!(e2 & DESC_S_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (seg_reg == R_SS) {
+ /* must be writable segment */
+ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (rpl != cpl || dpl != cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ } else {
+ /* must be readable segment */
+ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+
+ if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+ /* if not conforming code, test rights */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ if (seg_reg == R_SS) {
+ raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
+ } else {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+ }
+
+ /* set the access bit if not already set */
+ if (!(e2 & DESC_A_MASK)) {
+ e2 |= DESC_A_MASK;
+ cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
+ }
+
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+#if 0
+ qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
+ selector, (unsigned long)sc->base, sc->limit, sc->flags);
+#endif
+ }
+}
+
+/* protected mode jump */
+void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
+ target_ulong next_eip)
+{
+ int gate_cs, type;
+ uint32_t e1, e2, cpl, dpl, rpl, limit;
+
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ if (dpl != cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit &&
+ !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ env->eip = new_eip;
+ } else {
+ /* jump to call or task gate */
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1: /* 286 TSS */
+ case 9: /* 386 TSS */
+ case 5: /* task gate */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
+ break;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ if ((dpl < cpl) || (dpl < rpl)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+ gate_cs = e1 >> 16;
+ new_eip = (e1 & 0xffff);
+ if (type == 12) {
+ new_eip |= (e2 & 0xffff0000);
+ }
+ if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ /* must be code segment */
+ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
+ (DESC_S_MASK | DESC_CS_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
+ (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
+ }
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ env->eip = new_eip;
+ break;
+ default:
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ break;
+ }
+ }
+}
+
+/* real mode call */
+void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
+ int shift, int next_eip)
+{
+ int new_eip;
+ uint32_t esp, esp_mask;
+ target_ulong ssp;
+
+ new_eip = new_eip1;
+ esp = env->regs[R_ESP];
+ esp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
+ } else {
+ PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
+ }
+
+ SET_ESP(esp, esp_mask);
+ env->eip = new_eip;
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (new_cs << 4);
+}
+
+/* protected mode call */
+void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
+ int shift, target_ulong next_eip)
+{
+ int new_stack, i;
+ uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
+ uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
+ uint32_t val, limit, old_sp_mask;
+ target_ulong ssp, old_ssp;
+
+ LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
+ LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ LOG_PCALL("desc=%08x:%08x\n", e1, e2);
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ if (dpl != cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+
+#ifdef TARGET_X86_64
+ /* XXX: check 16/32 bit cases in long mode */
+ if (shift == 2) {
+ target_ulong rsp;
+
+ /* 64 bit case */
+ rsp = env->regs[R_ESP];
+ PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
+ PUSHQ_RA(rsp, next_eip, GETPC());
+ /* from this point, not restartable */
+ env->regs[R_ESP] = rsp;
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2), e2);
+ env->eip = new_eip;
+ } else
+#endif
+ {
+ sp = env->regs[R_ESP];
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ } else {
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ }
+
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ /* from this point, not restartable */
+ SET_ESP(sp, sp_mask);
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ env->eip = new_eip;
+ }
+ } else {
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+ switch (type) {
+ case 1: /* available 286 TSS */
+ case 9: /* available 386 TSS */
+ case 5: /* task gate */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
+ return;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ break;
+ default:
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ break;
+ }
+ shift = type >> 3;
+
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
+ }
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ param_count = e2 & 0x1f;
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
+ }
+
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner privilege */
+ get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
+ LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
+ TARGET_FMT_lx "\n", ss, sp, param_count,
+ env->regs[R_ESP]);
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
+ }
+
+ /* push_size = ((param_count * 2) + 8) << shift; */
+
+ old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ old_ssp = env->segs[R_SS].base;
+
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
+ if (shift) {
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+ for (i = param_count - 1; i >= 0; i--) {
+ val = cpu_ldl_kernel_ra(env, old_ssp +
+ ((env->regs[R_ESP] + i * 4) &
+ old_sp_mask), GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
+ }
+ } else {
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
+ for (i = param_count - 1; i >= 0; i--) {
+ val = cpu_lduw_kernel_ra(env, old_ssp +
+ ((env->regs[R_ESP] + i * 2) &
+ old_sp_mask), GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
+ }
+ }
+ new_stack = 1;
+ } else {
+ /* to same privilege */
+ sp = env->regs[R_ESP];
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ /* push_size = (4 << shift); */
+ new_stack = 0;
+ }
+
+ if (shift) {
+ PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ } else {
+ PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
+ PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
+ }
+
+ /* from this point, not restartable */
+
+ if (new_stack) {
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp,
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ SET_ESP(sp, sp_mask);
+ env->eip = offset;
+ }
+}
+
+/* real and vm86 mode iret */
+void helper_iret_real(CPUX86State *env, int shift)
+{
+ uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
+ target_ulong ssp;
+ int eflags_mask;
+
+ sp_mask = 0xffff; /* XXXX: use SS segment size? */
+ sp = env->regs[R_ESP];
+ ssp = env->segs[R_SS].base;
+ if (shift == 1) {
+ /* 32 bits */
+ POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
+ POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
+ new_cs &= 0xffff;
+ POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+ } else {
+ /* 16 bits */
+ POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
+ POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
+ POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
+ }
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (new_cs << 4);
+ env->eip = new_eip;
+ if (env->eflags & VM_MASK) {
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
+ NT_MASK;
+ } else {
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
+ RF_MASK | NT_MASK;
+ }
+ if (shift == 0) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
+{
+ int dpl;
+ uint32_t e2;
+
+ /* XXX: on x86_64, we do not want to nullify FS and GS because
+ they may still contain a valid base. I would be interested to
+ know how a real x86_64 CPU behaves */
+ if ((seg_reg == R_FS || seg_reg == R_GS) &&
+ (env->segs[seg_reg].selector & 0xfffc) == 0) {
+ return;
+ }
+
+ e2 = env->segs[seg_reg].flags;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+ /* data or non conforming code segment */
+ if (dpl < cpl) {
+ cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
+ }
+ }
+}
+
+/* protected mode iret */
+static inline void helper_ret_protected(CPUX86State *env, int shift,
+ int is_iret, int addend,
+ uintptr_t retaddr)
+{
+ uint32_t new_cs, new_eflags, new_ss;
+ uint32_t new_es, new_ds, new_fs, new_gs;
+ uint32_t e1, e2, ss_e1, ss_e2;
+ int cpl, dpl, rpl, eflags_mask, iopl;
+ target_ulong ssp, sp, new_eip, new_esp, sp_mask;
+
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ sp_mask = -1;
+ } else
+#endif
+ {
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ }
+ sp = env->regs[R_ESP];
+ ssp = env->segs[R_SS].base;
+ new_eflags = 0; /* avoid warning */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ POPQ_RA(sp, new_eip, retaddr);
+ POPQ_RA(sp, new_cs, retaddr);
+ new_cs &= 0xffff;
+ if (is_iret) {
+ POPQ_RA(sp, new_eflags, retaddr);
+ }
+ } else
+#endif
+ {
+ if (shift == 1) {
+ /* 32 bits */
+ POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
+ new_cs &= 0xffff;
+ if (is_iret) {
+ POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+ if (new_eflags & VM_MASK) {
+ goto return_to_vm86;
+ }
+ }
+ } else {
+ /* 16 bits */
+ POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
+ POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
+ if (is_iret) {
+ POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
+ }
+ }
+ }
+ LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
+ new_cs, new_eip, shift, addend);
+ LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ if (!(e2 & DESC_S_MASK) ||
+ !(e2 & DESC_CS_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ rpl = new_cs & 3;
+ if (rpl < cpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ if (dpl > rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ } else {
+ if (dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
+ }
+
+ sp += addend;
+ if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
+ ((env->hflags & HF_CS64_MASK) && !is_iret))) {
+ /* return to same privilege level */
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ } else {
+ /* return to different privilege level */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ POPQ_RA(sp, new_esp, retaddr);
+ POPQ_RA(sp, new_ss, retaddr);
+ new_ss &= 0xffff;
+ } else
+#endif
+ {
+ if (shift == 1) {
+ /* 32 bits */
+ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ new_ss &= 0xffff;
+ } else {
+ /* 16 bits */
+ POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ }
+ }
+ LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
+ new_ss, new_esp);
+ if ((new_ss & 0xfffc) == 0) {
+#ifdef TARGET_X86_64
+ /* NULL ss is allowed in long mode if cpl != 3 */
+ /* XXX: test CS64? */
+ if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
+ } else
+#endif
+ {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+ } else {
+ if ((new_ss & 3) != rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl != rpl) {
+ raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
+ }
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ get_seg_base(ss_e1, ss_e2),
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
+
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ sp = new_esp;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ sp_mask = -1;
+ } else
+#endif
+ {
+ sp_mask = get_sp_mask(ss_e2);
+ }
+
+ /* validate data segments */
+ validate_seg(env, R_ES, rpl);
+ validate_seg(env, R_DS, rpl);
+ validate_seg(env, R_FS, rpl);
+ validate_seg(env, R_GS, rpl);
+
+ sp += addend;
+ }
+ SET_ESP(sp, sp_mask);
+ env->eip = new_eip;
+ if (is_iret) {
+ /* NOTE: 'cpl' is the _old_ CPL */
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
+ if (cpl == 0) {
+ eflags_mask |= IOPL_MASK;
+ }
+ iopl = (env->eflags >> IOPL_SHIFT) & 3;
+ if (cpl <= iopl) {
+ eflags_mask |= IF_MASK;
+ }
+ if (shift == 0) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ }
+ return;
+
+ return_to_vm86:
+ POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
+ POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
+
+ /* modify processor state */
+ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
+ IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
+ VIP_MASK);
+ load_seg_vm(env, R_CS, new_cs & 0xffff);
+ load_seg_vm(env, R_SS, new_ss & 0xffff);
+ load_seg_vm(env, R_ES, new_es & 0xffff);
+ load_seg_vm(env, R_DS, new_ds & 0xffff);
+ load_seg_vm(env, R_FS, new_fs & 0xffff);
+ load_seg_vm(env, R_GS, new_gs & 0xffff);
+
+ env->eip = new_eip & 0xffff;
+ env->regs[R_ESP] = new_esp;
+}
+
+void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
+{
+ int tss_selector, type;
+ uint32_t e1, e2;
+
+ /* specific case for TSS */
+ if (env->eflags & NT_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+#endif
+ tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
+ if (tss_selector & 4) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+ }
+ if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
+ /* NOTE: we check both segment and busy TSS */
+ if (type != 3) {
+ raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
+ }
+ switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
+ } else {
+ helper_ret_protected(env, shift, 1, 0, GETPC());
+ }
+ env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+void helper_lret_protected(CPUX86State *env, int shift, int addend)
+{
+ helper_ret_protected(env, shift, 0, addend, GETPC());
+}
+
+void helper_sysenter(CPUX86State *env)
+{
+ if (env->sysenter_cs == 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+ env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ } else
+#endif
+ {
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ }
+ cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->regs[R_ESP] = env->sysenter_esp;
+ env->eip = env->sysenter_eip;
+}
+
+void helper_sysexit(CPUX86State *env, int dflag)
+{
+ int cpl;
+
+ cpl = env->hflags & HF_CPL_MASK;
+ if (env->sysenter_cs == 0 || cpl != 0) {
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
+ }
+#ifdef TARGET_X86_64
+ if (dflag == 2) {
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ } else
+#endif
+ {
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ }
+ env->regs[R_ESP] = env->regs[R_ECX];
+ env->eip = env->regs[R_EDX];
+}
+
+target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
+{
+ unsigned int limit;
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl, type;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+ /* conforming */
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1:
+ case 2:
+ case 3:
+ case 9:
+ case 11:
+ break;
+ default:
+ goto fail;
+ }
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return 0;
+ }
+ }
+ limit = get_seg_limit(e1, e2);
+ CC_SRC = eflags | CC_Z;
+ return limit;
+}
+
+target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl, type;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+ /* conforming */
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 9:
+ case 11:
+ case 12:
+ break;
+ default:
+ goto fail;
+ }
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return 0;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+ return e2 & 0x00f0ff00;
+}
+
+void helper_verr(CPUX86State *env, target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_CS_MASK) {
+ if (!(e2 & DESC_R_MASK)) {
+ goto fail;
+ }
+ if (!(e2 & DESC_C_MASK)) {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+}
+
+void helper_verw(CPUX86State *env, target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl;
+
+ selector = selector1 & 0xffff;
+ eflags = cpu_cc_compute_all(env, CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
+ goto fail;
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_CS_MASK) {
+ goto fail;
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ if (!(e2 & DESC_W_MASK)) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
+{
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ int dpl = (env->eflags & VM_MASK) ? 3 : 0;
+ selector &= 0xffff;
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ (selector << 4), 0xffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
+ } else {
+ helper_load_seg(env, seg_reg, selector);
+ }
+}
+#endif
+
+/* check if Port I/O is allowed in TSS */
+static inline void check_io(CPUX86State *env, int addr, int size,
+ uintptr_t retaddr)
+{
+ int io_offset, val, mask;
+
+ /* TSS must be a valid 32 bit one */
+ if (!(env->tr.flags & DESC_P_MASK) ||
+ ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
+ env->tr.limit < 103) {
+ goto fail;
+ }
+ io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
+ io_offset += (addr >> 3);
+ /* Note: the check needs two bytes */
+ if ((io_offset + 1) > env->tr.limit) {
+ goto fail;
+ }
+ val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
+ val >>= (addr & 7);
+ mask = (1 << size) - 1;
+ /* all bits must be zero to allow the I/O */
+ if ((val & mask) != 0) {
+ fail:
+ raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
+ }
+}
+
+void helper_check_iob(CPUX86State *env, uint32_t t0)
+{
+ check_io(env, t0, 1, GETPC());
+}
+
+void helper_check_iow(CPUX86State *env, uint32_t t0)
+{
+ check_io(env, t0, 2, GETPC());
+}
+
+void helper_check_iol(CPUX86State *env, uint32_t t0)
+{
+ check_io(env, t0, 4, GETPC());
+}
diff --git a/target/i386/shift_helper_template.h b/target/i386/shift_helper_template.h
new file mode 100644
index 0000000000..cf91a2d284
--- /dev/null
+++ b/target/i386/shift_helper_template.h
@@ -0,0 +1,108 @@
+/*
+ * x86 shift helpers
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+#define SHIFT_MASK (DATA_BITS - 1)
+#if DATA_BITS <= 32
+#define SHIFT1_MASK 0x1f
+#else
+#define SHIFT1_MASK 0x3f
+#endif
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_MASK 0xff
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_MASK 0xffff
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_MASK 0xffffffff
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_MASK 0xffffffffffffffffULL
+#else
+#error unhandled operand size
+#endif
+
+target_ulong glue(helper_rcl, SUFFIX)(CPUX86State *env, target_ulong t0,
+ target_ulong t1)
+{
+ int count, eflags;
+ target_ulong src;
+ target_long res;
+
+ count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = env->cc_src;
+ t0 &= DATA_MASK;
+ src = t0;
+ res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
+ if (count > 1) {
+ res |= t0 >> (DATA_BITS + 1 - count);
+ }
+ t0 = res;
+ env->cc_src = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (DATA_BITS - count)) & CC_C);
+ }
+ return t0;
+}
+
+target_ulong glue(helper_rcr, SUFFIX)(CPUX86State *env, target_ulong t0,
+ target_ulong t1)
+{
+ int count, eflags;
+ target_ulong src;
+ target_long res;
+
+ count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = env->cc_src;
+ t0 &= DATA_MASK;
+ src = t0;
+ res = (t0 >> count) |
+ ((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
+ if (count > 1) {
+ res |= t0 << (DATA_BITS + 1 - count);
+ }
+ t0 = res;
+ env->cc_src = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (count - 1)) & CC_C);
+ }
+ return t0;
+}
+
+#undef DATA_BITS
+#undef SHIFT_MASK
+#undef SHIFT1_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target/i386/smm_helper.c b/target/i386/smm_helper.c
new file mode 100644
index 0000000000..4dd6a2c544
--- /dev/null
+++ b/target/i386/smm_helper.c
@@ -0,0 +1,342 @@
+/*
+ * x86 SMM helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+
+/* SMM support */
+
+#if defined(CONFIG_USER_ONLY)
+
+void do_smm_enter(X86CPU *cpu)
+{
+}
+
+void helper_rsm(CPUX86State *env)
+{
+}
+
+#else
+
+#ifdef TARGET_X86_64
+#define SMM_REVISION_ID 0x00020064
+#else
+#define SMM_REVISION_ID 0x00020000
+#endif
+
+void cpu_smm_update(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ bool smm_enabled = (env->hflags & HF_SMM_MASK);
+
+ if (cpu->smram) {
+ memory_region_set_enabled(cpu->smram, smm_enabled);
+ }
+}
+
+void do_smm_enter(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ target_ulong sm_state;
+ SegmentCache *dt;
+ int i, offset;
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
+ log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+
+ env->hflags |= HF_SMM_MASK;
+ if (env->hflags2 & HF2_NMI_MASK) {
+ env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK;
+ } else {
+ env->hflags2 |= HF2_NMI_MASK;
+ }
+ cpu_smm_update(cpu);
+
+ sm_state = env->smbase + 0x8000;
+
+#ifdef TARGET_X86_64
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ offset = 0x7e00 + i * 16;
+ x86_stw_phys(cs, sm_state + offset, dt->selector);
+ x86_stw_phys(cs, sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+ x86_stq_phys(cs, sm_state + offset + 8, dt->base);
+ }
+
+ x86_stq_phys(cs, sm_state + 0x7e68, env->gdt.base);
+ x86_stl_phys(cs, sm_state + 0x7e64, env->gdt.limit);
+
+ x86_stw_phys(cs, sm_state + 0x7e70, env->ldt.selector);
+ x86_stq_phys(cs, sm_state + 0x7e78, env->ldt.base);
+ x86_stl_phys(cs, sm_state + 0x7e74, env->ldt.limit);
+ x86_stw_phys(cs, sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+
+ x86_stq_phys(cs, sm_state + 0x7e88, env->idt.base);
+ x86_stl_phys(cs, sm_state + 0x7e84, env->idt.limit);
+
+ x86_stw_phys(cs, sm_state + 0x7e90, env->tr.selector);
+ x86_stq_phys(cs, sm_state + 0x7e98, env->tr.base);
+ x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
+ x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+
+ /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
+ is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
+ 7EA0-7ED7 as "reserved". What's this, and what's really
+ supposed to happen? */
+ x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
+
+ x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
+ x86_stq_phys(cs, sm_state + 0x7ff0, env->regs[R_ECX]);
+ x86_stq_phys(cs, sm_state + 0x7fe8, env->regs[R_EDX]);
+ x86_stq_phys(cs, sm_state + 0x7fe0, env->regs[R_EBX]);
+ x86_stq_phys(cs, sm_state + 0x7fd8, env->regs[R_ESP]);
+ x86_stq_phys(cs, sm_state + 0x7fd0, env->regs[R_EBP]);
+ x86_stq_phys(cs, sm_state + 0x7fc8, env->regs[R_ESI]);
+ x86_stq_phys(cs, sm_state + 0x7fc0, env->regs[R_EDI]);
+ for (i = 8; i < 16; i++) {
+ x86_stq_phys(cs, sm_state + 0x7ff8 - i * 8, env->regs[i]);
+ }
+ x86_stq_phys(cs, sm_state + 0x7f78, env->eip);
+ x86_stl_phys(cs, sm_state + 0x7f70, cpu_compute_eflags(env));
+ x86_stl_phys(cs, sm_state + 0x7f68, env->dr[6]);
+ x86_stl_phys(cs, sm_state + 0x7f60, env->dr[7]);
+
+ x86_stl_phys(cs, sm_state + 0x7f48, env->cr[4]);
+ x86_stq_phys(cs, sm_state + 0x7f50, env->cr[3]);
+ x86_stl_phys(cs, sm_state + 0x7f58, env->cr[0]);
+
+ x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+ x86_stl_phys(cs, sm_state + 0x7f00, env->smbase);
+#else
+ x86_stl_phys(cs, sm_state + 0x7ffc, env->cr[0]);
+ x86_stl_phys(cs, sm_state + 0x7ff8, env->cr[3]);
+ x86_stl_phys(cs, sm_state + 0x7ff4, cpu_compute_eflags(env));
+ x86_stl_phys(cs, sm_state + 0x7ff0, env->eip);
+ x86_stl_phys(cs, sm_state + 0x7fec, env->regs[R_EDI]);
+ x86_stl_phys(cs, sm_state + 0x7fe8, env->regs[R_ESI]);
+ x86_stl_phys(cs, sm_state + 0x7fe4, env->regs[R_EBP]);
+ x86_stl_phys(cs, sm_state + 0x7fe0, env->regs[R_ESP]);
+ x86_stl_phys(cs, sm_state + 0x7fdc, env->regs[R_EBX]);
+ x86_stl_phys(cs, sm_state + 0x7fd8, env->regs[R_EDX]);
+ x86_stl_phys(cs, sm_state + 0x7fd4, env->regs[R_ECX]);
+ x86_stl_phys(cs, sm_state + 0x7fd0, env->regs[R_EAX]);
+ x86_stl_phys(cs, sm_state + 0x7fcc, env->dr[6]);
+ x86_stl_phys(cs, sm_state + 0x7fc8, env->dr[7]);
+
+ x86_stl_phys(cs, sm_state + 0x7fc4, env->tr.selector);
+ x86_stl_phys(cs, sm_state + 0x7f64, env->tr.base);
+ x86_stl_phys(cs, sm_state + 0x7f60, env->tr.limit);
+ x86_stl_phys(cs, sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+ x86_stl_phys(cs, sm_state + 0x7fc0, env->ldt.selector);
+ x86_stl_phys(cs, sm_state + 0x7f80, env->ldt.base);
+ x86_stl_phys(cs, sm_state + 0x7f7c, env->ldt.limit);
+ x86_stl_phys(cs, sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+ x86_stl_phys(cs, sm_state + 0x7f74, env->gdt.base);
+ x86_stl_phys(cs, sm_state + 0x7f70, env->gdt.limit);
+
+ x86_stl_phys(cs, sm_state + 0x7f58, env->idt.base);
+ x86_stl_phys(cs, sm_state + 0x7f54, env->idt.limit);
+
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ x86_stl_phys(cs, sm_state + 0x7fa8 + i * 4, dt->selector);
+ x86_stl_phys(cs, sm_state + offset + 8, dt->base);
+ x86_stl_phys(cs, sm_state + offset + 4, dt->limit);
+ x86_stl_phys(cs, sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+ }
+ x86_stl_phys(cs, sm_state + 0x7f14, env->cr[4]);
+
+ x86_stl_phys(cs, sm_state + 0x7efc, SMM_REVISION_ID);
+ x86_stl_phys(cs, sm_state + 0x7ef8, env->smbase);
+#endif
+ /* init SMM cpu state */
+
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, 0);
+#endif
+ cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
+ DF_MASK));
+ env->eip = 0x00008000;
+ cpu_x86_update_cr0(env,
+ env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
+ CR0_PG_MASK));
+ cpu_x86_update_cr4(env, 0);
+ env->dr[7] = 0x00000400;
+
+ cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
+ 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff,
+ DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
+ DESC_G_MASK | DESC_A_MASK);
+}
+
+void helper_rsm(CPUX86State *env)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ target_ulong sm_state;
+ int i, offset;
+ uint32_t val;
+
+ sm_state = env->smbase + 0x8000;
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, x86_ldq_phys(cs, sm_state + 0x7ed0));
+
+ env->gdt.base = x86_ldq_phys(cs, sm_state + 0x7e68);
+ env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7e64);
+
+ env->ldt.selector = x86_lduw_phys(cs, sm_state + 0x7e70);
+ env->ldt.base = x86_ldq_phys(cs, sm_state + 0x7e78);
+ env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7e74);
+ env->ldt.flags = (x86_lduw_phys(cs, sm_state + 0x7e72) & 0xf0ff) << 8;
+
+ env->idt.base = x86_ldq_phys(cs, sm_state + 0x7e88);
+ env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7e84);
+
+ env->tr.selector = x86_lduw_phys(cs, sm_state + 0x7e90);
+ env->tr.base = x86_ldq_phys(cs, sm_state + 0x7e98);
+ env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7e94);
+ env->tr.flags = (x86_lduw_phys(cs, sm_state + 0x7e92) & 0xf0ff) << 8;
+
+ env->regs[R_EAX] = x86_ldq_phys(cs, sm_state + 0x7ff8);
+ env->regs[R_ECX] = x86_ldq_phys(cs, sm_state + 0x7ff0);
+ env->regs[R_EDX] = x86_ldq_phys(cs, sm_state + 0x7fe8);
+ env->regs[R_EBX] = x86_ldq_phys(cs, sm_state + 0x7fe0);
+ env->regs[R_ESP] = x86_ldq_phys(cs, sm_state + 0x7fd8);
+ env->regs[R_EBP] = x86_ldq_phys(cs, sm_state + 0x7fd0);
+ env->regs[R_ESI] = x86_ldq_phys(cs, sm_state + 0x7fc8);
+ env->regs[R_EDI] = x86_ldq_phys(cs, sm_state + 0x7fc0);
+ for (i = 8; i < 16; i++) {
+ env->regs[i] = x86_ldq_phys(cs, sm_state + 0x7ff8 - i * 8);
+ }
+ env->eip = x86_ldq_phys(cs, sm_state + 0x7f78);
+ cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68);
+ env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60);
+
+ cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48));
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50));
+ cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7f58));
+
+ for (i = 0; i < 6; i++) {
+ offset = 0x7e00 + i * 16;
+ cpu_x86_load_seg_cache(env, i,
+ x86_lduw_phys(cs, sm_state + offset),
+ x86_ldq_phys(cs, sm_state + offset + 8),
+ x86_ldl_phys(cs, sm_state + offset + 4),
+ (x86_lduw_phys(cs, sm_state + offset + 2) &
+ 0xf0ff) << 8);
+ }
+
+ val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7f00);
+ }
+#else
+ cpu_x86_update_cr0(env, x86_ldl_phys(cs, sm_state + 0x7ffc));
+ cpu_x86_update_cr3(env, x86_ldl_phys(cs, sm_state + 0x7ff8));
+ cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7ff4),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->eip = x86_ldl_phys(cs, sm_state + 0x7ff0);
+ env->regs[R_EDI] = x86_ldl_phys(cs, sm_state + 0x7fec);
+ env->regs[R_ESI] = x86_ldl_phys(cs, sm_state + 0x7fe8);
+ env->regs[R_EBP] = x86_ldl_phys(cs, sm_state + 0x7fe4);
+ env->regs[R_ESP] = x86_ldl_phys(cs, sm_state + 0x7fe0);
+ env->regs[R_EBX] = x86_ldl_phys(cs, sm_state + 0x7fdc);
+ env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8);
+ env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4);
+ env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0);
+ env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc);
+ env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8);
+
+ env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff;
+ env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64);
+ env->tr.limit = x86_ldl_phys(cs, sm_state + 0x7f60);
+ env->tr.flags = (x86_ldl_phys(cs, sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+ env->ldt.selector = x86_ldl_phys(cs, sm_state + 0x7fc0) & 0xffff;
+ env->ldt.base = x86_ldl_phys(cs, sm_state + 0x7f80);
+ env->ldt.limit = x86_ldl_phys(cs, sm_state + 0x7f7c);
+ env->ldt.flags = (x86_ldl_phys(cs, sm_state + 0x7f78) & 0xf0ff) << 8;
+
+ env->gdt.base = x86_ldl_phys(cs, sm_state + 0x7f74);
+ env->gdt.limit = x86_ldl_phys(cs, sm_state + 0x7f70);
+
+ env->idt.base = x86_ldl_phys(cs, sm_state + 0x7f58);
+ env->idt.limit = x86_ldl_phys(cs, sm_state + 0x7f54);
+
+ for (i = 0; i < 6; i++) {
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ cpu_x86_load_seg_cache(env, i,
+ x86_ldl_phys(cs,
+ sm_state + 0x7fa8 + i * 4) & 0xffff,
+ x86_ldl_phys(cs, sm_state + offset + 8),
+ x86_ldl_phys(cs, sm_state + offset + 4),
+ (x86_ldl_phys(cs,
+ sm_state + offset) & 0xf0ff) << 8);
+ }
+ cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f14));
+
+ val = x86_ldl_phys(cs, sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = x86_ldl_phys(cs, sm_state + 0x7ef8);
+ }
+#endif
+ if ((env->hflags2 & HF2_SMM_INSIDE_NMI_MASK) == 0) {
+ env->hflags2 &= ~HF2_NMI_MASK;
+ }
+ env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK;
+ env->hflags &= ~HF_SMM_MASK;
+ cpu_smm_update(cpu);
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
+ log_cpu_state_mask(CPU_LOG_INT, CPU(cpu), CPU_DUMP_CCOP);
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target/i386/svm.h b/target/i386/svm.h
new file mode 100644
index 0000000000..922c8fd39c
--- /dev/null
+++ b/target/i386/svm.h
@@ -0,0 +1,222 @@
+#ifndef SVM_H
+#define SVM_H
+
+#define TLB_CONTROL_DO_NOTHING 0
+#define TLB_CONTROL_FLUSH_ALL_ASID 1
+
+#define V_TPR_MASK 0x0f
+
+#define V_IRQ_SHIFT 8
+#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+
+#define V_INTR_PRIO_SHIFT 16
+#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+
+#define V_IGN_TPR_SHIFT 20
+#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
+#define V_INTR_MASKING_SHIFT 24
+#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+#define SVM_INTERRUPT_SHADOW_MASK 1
+
+#define SVM_IOIO_STR_SHIFT 2
+#define SVM_IOIO_REP_SHIFT 3
+#define SVM_IOIO_SIZE_SHIFT 4
+#define SVM_IOIO_ASIZE_SHIFT 7
+
+#define SVM_IOIO_TYPE_MASK 1
+#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+
+#define SVM_EVTINJ_VEC_MASK 0xff
+
+#define SVM_EVTINJ_TYPE_SHIFT 8
+#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_VALID (1 << 31)
+#define SVM_EVTINJ_VALID_ERR (1 << 11)
+
+#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+
+#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+
+#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+
+#define SVM_EXIT_READ_CR0 0x000
+#define SVM_EXIT_READ_CR3 0x003
+#define SVM_EXIT_READ_CR4 0x004
+#define SVM_EXIT_READ_CR8 0x008
+#define SVM_EXIT_WRITE_CR0 0x010
+#define SVM_EXIT_WRITE_CR3 0x013
+#define SVM_EXIT_WRITE_CR4 0x014
+#define SVM_EXIT_WRITE_CR8 0x018
+#define SVM_EXIT_READ_DR0 0x020
+#define SVM_EXIT_READ_DR1 0x021
+#define SVM_EXIT_READ_DR2 0x022
+#define SVM_EXIT_READ_DR3 0x023
+#define SVM_EXIT_READ_DR4 0x024
+#define SVM_EXIT_READ_DR5 0x025
+#define SVM_EXIT_READ_DR6 0x026
+#define SVM_EXIT_READ_DR7 0x027
+#define SVM_EXIT_WRITE_DR0 0x030
+#define SVM_EXIT_WRITE_DR1 0x031
+#define SVM_EXIT_WRITE_DR2 0x032
+#define SVM_EXIT_WRITE_DR3 0x033
+#define SVM_EXIT_WRITE_DR4 0x034
+#define SVM_EXIT_WRITE_DR5 0x035
+#define SVM_EXIT_WRITE_DR6 0x036
+#define SVM_EXIT_WRITE_DR7 0x037
+#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_INTR 0x060
+#define SVM_EXIT_NMI 0x061
+#define SVM_EXIT_SMI 0x062
+#define SVM_EXIT_INIT 0x063
+#define SVM_EXIT_VINTR 0x064
+#define SVM_EXIT_CR0_SEL_WRITE 0x065
+#define SVM_EXIT_IDTR_READ 0x066
+#define SVM_EXIT_GDTR_READ 0x067
+#define SVM_EXIT_LDTR_READ 0x068
+#define SVM_EXIT_TR_READ 0x069
+#define SVM_EXIT_IDTR_WRITE 0x06a
+#define SVM_EXIT_GDTR_WRITE 0x06b
+#define SVM_EXIT_LDTR_WRITE 0x06c
+#define SVM_EXIT_TR_WRITE 0x06d
+#define SVM_EXIT_RDTSC 0x06e
+#define SVM_EXIT_RDPMC 0x06f
+#define SVM_EXIT_PUSHF 0x070
+#define SVM_EXIT_POPF 0x071
+#define SVM_EXIT_CPUID 0x072
+#define SVM_EXIT_RSM 0x073
+#define SVM_EXIT_IRET 0x074
+#define SVM_EXIT_SWINT 0x075
+#define SVM_EXIT_INVD 0x076
+#define SVM_EXIT_PAUSE 0x077
+#define SVM_EXIT_HLT 0x078
+#define SVM_EXIT_INVLPG 0x079
+#define SVM_EXIT_INVLPGA 0x07a
+#define SVM_EXIT_IOIO 0x07b
+#define SVM_EXIT_MSR 0x07c
+#define SVM_EXIT_TASK_SWITCH 0x07d
+#define SVM_EXIT_FERR_FREEZE 0x07e
+#define SVM_EXIT_SHUTDOWN 0x07f
+#define SVM_EXIT_VMRUN 0x080
+#define SVM_EXIT_VMMCALL 0x081
+#define SVM_EXIT_VMLOAD 0x082
+#define SVM_EXIT_VMSAVE 0x083
+#define SVM_EXIT_STGI 0x084
+#define SVM_EXIT_CLGI 0x085
+#define SVM_EXIT_SKINIT 0x086
+#define SVM_EXIT_RDTSCP 0x087
+#define SVM_EXIT_ICEBP 0x088
+#define SVM_EXIT_WBINVD 0x089
+/* only included in documentation, maybe wrong */
+#define SVM_EXIT_MONITOR 0x08a
+#define SVM_EXIT_MWAIT 0x08b
+#define SVM_EXIT_NPF 0x400
+
+#define SVM_EXIT_ERR -1
+
+#define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
+
+struct QEMU_PACKED vmcb_control_area {
+ uint16_t intercept_cr_read;
+ uint16_t intercept_cr_write;
+ uint16_t intercept_dr_read;
+ uint16_t intercept_dr_write;
+ uint32_t intercept_exceptions;
+ uint64_t intercept;
+ uint8_t reserved_1[44];
+ uint64_t iopm_base_pa;
+ uint64_t msrpm_base_pa;
+ uint64_t tsc_offset;
+ uint32_t asid;
+ uint8_t tlb_ctl;
+ uint8_t reserved_2[3];
+ uint32_t int_ctl;
+ uint32_t int_vector;
+ uint32_t int_state;
+ uint8_t reserved_3[4];
+ uint64_t exit_code;
+ uint64_t exit_info_1;
+ uint64_t exit_info_2;
+ uint32_t exit_int_info;
+ uint32_t exit_int_info_err;
+ uint64_t nested_ctl;
+ uint8_t reserved_4[16];
+ uint32_t event_inj;
+ uint32_t event_inj_err;
+ uint64_t nested_cr3;
+ uint64_t lbr_ctl;
+ uint8_t reserved_5[832];
+};
+
+struct QEMU_PACKED vmcb_seg {
+ uint16_t selector;
+ uint16_t attrib;
+ uint32_t limit;
+ uint64_t base;
+};
+
+struct QEMU_PACKED vmcb_save_area {
+ struct vmcb_seg es;
+ struct vmcb_seg cs;
+ struct vmcb_seg ss;
+ struct vmcb_seg ds;
+ struct vmcb_seg fs;
+ struct vmcb_seg gs;
+ struct vmcb_seg gdtr;
+ struct vmcb_seg ldtr;
+ struct vmcb_seg idtr;
+ struct vmcb_seg tr;
+ uint8_t reserved_1[43];
+ uint8_t cpl;
+ uint8_t reserved_2[4];
+ uint64_t efer;
+ uint8_t reserved_3[112];
+ uint64_t cr4;
+ uint64_t cr3;
+ uint64_t cr0;
+ uint64_t dr7;
+ uint64_t dr6;
+ uint64_t rflags;
+ uint64_t rip;
+ uint8_t reserved_4[88];
+ uint64_t rsp;
+ uint8_t reserved_5[24];
+ uint64_t rax;
+ uint64_t star;
+ uint64_t lstar;
+ uint64_t cstar;
+ uint64_t sfmask;
+ uint64_t kernel_gs_base;
+ uint64_t sysenter_cs;
+ uint64_t sysenter_esp;
+ uint64_t sysenter_eip;
+ uint64_t cr2;
+ uint8_t reserved_6[32];
+ uint64_t g_pat;
+ uint64_t dbgctl;
+ uint64_t br_from;
+ uint64_t br_to;
+ uint64_t last_excp_from;
+ uint64_t last_excp_to;
+};
+
+struct QEMU_PACKED vmcb {
+ struct vmcb_control_area control;
+ struct vmcb_save_area save;
+};
+
+#endif
diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c
new file mode 100644
index 0000000000..782b3f12f0
--- /dev/null
+++ b/target/i386/svm_helper.c
@@ -0,0 +1,774 @@
+/*
+ * x86 SVM helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+/* Secure Virtual Machine helpers */
+
+#if defined(CONFIG_USER_ONLY)
+
+void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
+{
+}
+
+void helper_vmmcall(CPUX86State *env)
+{
+}
+
+void helper_vmload(CPUX86State *env, int aflag)
+{
+}
+
+void helper_vmsave(CPUX86State *env, int aflag)
+{
+}
+
+void helper_stgi(CPUX86State *env)
+{
+}
+
+void helper_clgi(CPUX86State *env)
+{
+}
+
+void helper_skinit(CPUX86State *env)
+{
+}
+
+void helper_invlpga(CPUX86State *env, int aflag)
+{
+}
+
+void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
+{
+}
+
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
+{
+}
+
+void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param)
+{
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param)
+{
+}
+
+void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
+ uint32_t next_eip_addend)
+{
+}
+#else
+
+static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
+ const SegmentCache *sc)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
+ sc->selector);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
+ sc->base);
+ x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
+ sc->limit);
+ x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
+ ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
+}
+
+static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
+ SegmentCache *sc)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ unsigned int flags;
+
+ sc->selector = x86_lduw_phys(cs,
+ addr + offsetof(struct vmcb_seg, selector));
+ sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
+ sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
+ flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
+ sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+}
+
+static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
+ int seg_reg)
+{
+ SegmentCache sc1, *sc = &sc1;
+
+ svm_load_seg(env, addr, sc);
+ cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
+ sc->base, sc->limit, sc->flags);
+}
+
+void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ target_ulong addr;
+ uint32_t event_inj;
+ uint32_t int_ctl;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
+
+ env->vm_vmcb = addr;
+
+ /* save the current CPU state in the hsave page */
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
+
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
+ env->eip + next_eip_addend);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ x86_stq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+
+ /* load the interception bitmaps so we do not need to access the
+ vmcb in svm mode */
+ env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.intercept));
+ env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_read));
+ env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_write));
+ env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_read));
+ env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_write));
+ env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_exceptions
+ ));
+
+ /* enable intercepts */
+ env->hflags |= HF_SVMI_MASK;
+
+ env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.tsc_offset));
+
+ env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ save.gdtr.base));
+ env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ save.gdtr.limit));
+
+ env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ save.idtr.base));
+ env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ save.idtr.limit));
+
+ /* clear exit_info_2 so we behave like the real hardware */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
+
+ cpu_x86_update_cr0(env, x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ save.cr0)));
+ cpu_x86_update_cr4(env, x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ save.cr4)));
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ save.cr3)));
+ env->cr[2] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2));
+ int_ctl = x86_ldl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ if (int_ctl & V_INTR_MASKING_MASK) {
+ env->v_tpr = int_ctl & V_TPR_MASK;
+ env->hflags2 |= HF2_VINTR_MASK;
+ if (env->eflags & IF_MASK) {
+ env->hflags2 |= HF2_HIF_MASK;
+ }
+ }
+
+ cpu_load_efer(env,
+ x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
+ R_ES);
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ R_CS);
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ R_SS);
+ svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ R_DS);
+
+ env->eip = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rip));
+
+ env->regs[R_ESP] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax));
+ env->dr[7] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7));
+ env->dr[6] = x86_ldq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6));
+
+ /* FIXME: guest state consistency checks */
+
+ switch (x86_ldub_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
+ case TLB_CONTROL_DO_NOTHING:
+ break;
+ case TLB_CONTROL_FLUSH_ALL_ASID:
+ /* FIXME: this is not 100% correct but should work for now */
+ tlb_flush(cs, 1);
+ break;
+ }
+
+ env->hflags2 |= HF2_GIF_MASK;
+
+ if (int_ctl & V_IRQ_MASK) {
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ }
+
+ /* maybe we need to inject an event */
+ event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+ if (event_inj & SVM_EVTINJ_VALID) {
+ uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
+ uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
+ uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj_err));
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
+ /* FIXME: need to implement valid_err */
+ switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
+ case SVM_EVTINJ_TYPE_INTR:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
+ /* XXX: is it always correct? */
+ do_interrupt_x86_hardirq(env, vector, 1);
+ break;
+ case SVM_EVTINJ_TYPE_NMI:
+ cs->exception_index = EXCP02_NMI;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = env->eip;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
+ cpu_loop_exit(cs);
+ break;
+ case SVM_EVTINJ_TYPE_EXEPT:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
+ cpu_loop_exit(cs);
+ break;
+ case SVM_EVTINJ_TYPE_SOFT:
+ cs->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 1;
+ env->exception_next_eip = env->eip;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
+ cpu_loop_exit(cs);
+ break;
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
+ env->error_code);
+ }
+}
+
+void helper_vmmcall(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
+ raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_vmload(CPUX86State *env, int aflag)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ target_ulong addr;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
+ "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+ addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.fs.base)),
+ env->segs[R_FS].base);
+
+ svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
+ svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
+ svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
+ svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
+
+#ifdef TARGET_X86_64
+ env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.kernel_gs_base));
+ env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
+ env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
+ env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
+#endif
+ env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
+ env->sysenter_cs = x86_ldq_phys(cs,
+ addr + offsetof(struct vmcb, save.sysenter_cs));
+ env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.sysenter_esp));
+ env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
+ save.sysenter_eip));
+}
+
+void helper_vmsave(CPUX86State *env, int aflag)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ target_ulong addr;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
+ "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+ addr, x86_ldq_phys(cs,
+ addr + offsetof(struct vmcb, save.fs.base)),
+ env->segs[R_FS].base);
+
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
+ &env->segs[R_FS]);
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
+ &env->segs[R_GS]);
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
+ &env->tr);
+ svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
+ &env->ldt);
+
+#ifdef TARGET_X86_64
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
+ env->kernelgsbase);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
+#endif
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
+ x86_stq_phys(cs,
+ addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
+ env->sysenter_esp);
+ x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
+ env->sysenter_eip);
+}
+
+void helper_stgi(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
+ env->hflags2 |= HF2_GIF_MASK;
+}
+
+void helper_clgi(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
+ env->hflags2 &= ~HF2_GIF_MASK;
+}
+
+void helper_skinit(CPUX86State *env)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
+ /* XXX: not implemented */
+ raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_invlpga(CPUX86State *env, int aflag)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ target_ulong addr;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
+
+ if (aflag == 2) {
+ addr = env->regs[R_EAX];
+ } else {
+ addr = (uint32_t)env->regs[R_EAX];
+ }
+
+ /* XXX: could use the ASID to see if it is needed to do the
+ flush */
+ tlb_flush_page(CPU(cpu), addr);
+}
+
+void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ if (likely(!(env->hflags & HF_SVMI_MASK))) {
+ return;
+ }
+ switch (type) {
+ case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
+ if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
+ helper_vmexit(env, type, param);
+ }
+ break;
+ case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
+ if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
+ helper_vmexit(env, type, param);
+ }
+ break;
+ case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
+ if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
+ helper_vmexit(env, type, param);
+ }
+ break;
+ case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
+ if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
+ helper_vmexit(env, type, param);
+ }
+ break;
+ case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
+ if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
+ helper_vmexit(env, type, param);
+ }
+ break;
+ case SVM_EXIT_MSR:
+ if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.msrpm_base_pa));
+ uint32_t t0, t1;
+
+ switch ((uint32_t)env->regs[R_ECX]) {
+ case 0 ... 0x1fff:
+ t0 = (env->regs[R_ECX] * 2) % 8;
+ t1 = (env->regs[R_ECX] * 2) / 8;
+ break;
+ case 0xc0000000 ... 0xc0001fff:
+ t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ case 0xc0010000 ... 0xc0011fff:
+ t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ default:
+ helper_vmexit(env, type, param);
+ t0 = 0;
+ t1 = 0;
+ break;
+ }
+ if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
+ helper_vmexit(env, type, param);
+ }
+ }
+ break;
+ default:
+ if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
+ helper_vmexit(env, type, param);
+ }
+ break;
+ }
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param)
+{
+ helper_svm_check_intercept_param(env, type, param);
+}
+
+void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
+ uint32_t next_eip_addend)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+
+ if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
+ offsetof(struct vmcb, control.iopm_base_pa));
+ uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
+
+ if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
+ /* next env->eip */
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ env->eip + next_eip_addend);
+ helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
+ }
+ }
+}
+
+/* Note: currently only 32 bits of exit_code are used */
+void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
+{
+ CPUState *cs = CPU(x86_env_get_cpu(env));
+ uint32_t int_ctl;
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
+ PRIx64 ", " TARGET_FMT_lx ")!\n",
+ exit_code, exit_info_1,
+ x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.exit_info_2)),
+ env->eip);
+
+ if (env->hflags & HF_INHIBIT_IRQ_MASK) {
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+ SVM_INTERRUPT_SHADOW_MASK);
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ } else {
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+ }
+
+ /* Save the VM state in the vmcb */
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
+
+ int_ctl = x86_ldl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
+ int_ctl |= env->v_tpr & V_TPR_MASK;
+ if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ int_ctl |= V_IRQ_MASK;
+ }
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
+
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
+ env->eip);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
+ x86_stq_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+ env->hflags & HF_CPL_MASK);
+
+ /* Reload the host state from vm_hsave */
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ env->hflags &= ~HF_SVMI_MASK;
+ env->intercept = 0;
+ env->intercept_exceptions = 0;
+ cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->tsc_offset = 0;
+
+ env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.base));
+ env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.limit));
+
+ env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.base));
+ env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.limit));
+
+ cpu_x86_update_cr0(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr0)) |
+ CR0_PE_MASK);
+ cpu_x86_update_cr4(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr4)));
+ cpu_x86_update_cr3(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.cr3)));
+ /* we need to set the efer after the crs so the hidden flags get
+ set properly */
+ cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
+ save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
+ VM_MASK));
+
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
+ R_ES);
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
+ R_CS);
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
+ R_SS);
+ svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
+ R_DS);
+
+ env->eip = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.rip));
+ env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
+ offsetof(struct vmcb, save.rsp));
+ env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
+ offsetof(struct vmcb, save.rax));
+
+ env->dr[6] = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr6));
+ env->dr[7] = x86_ldq_phys(cs,
+ env->vm_hsave + offsetof(struct vmcb, save.dr7));
+
+ /* other setups */
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+ exit_code);
+ x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
+ exit_info_1);
+
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+ x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj)));
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+ x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err)));
+ x86_stl_phys(cs,
+ env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+
+ env->hflags2 &= ~HF2_GIF_MASK;
+ /* FIXME: Resets the current ASID register to zero (host ASID). */
+
+ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
+
+ /* Clears the TSC_OFFSET inside the processor. */
+
+ /* If the host is in PAE mode, the processor reloads the host's PDPEs
+ from the page table indicated the host's CR3. If the PDPEs contain
+ illegal state, the processor causes a shutdown. */
+
+ /* Disables all breakpoints in the host DR7 register. */
+
+ /* Checks the reloaded host state for consistency. */
+
+ /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
+ host's code segment or non-canonical (in the case of long mode), a
+ #GP fault is delivered inside the host. */
+
+ /* remove any pending exception */
+ cs->exception_index = -1;
+ env->error_code = 0;
+ env->old_exception = -1;
+
+ cpu_loop_exit(cs);
+}
+
+void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
+{
+ helper_vmexit(env, exit_code, exit_info_1);
+}
+
+#endif
diff --git a/target/i386/trace-events b/target/i386/trace-events
new file mode 100644
index 0000000000..de6a1cf0cb
--- /dev/null
+++ b/target/i386/trace-events
@@ -0,0 +1,7 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/i386/kvm.c
+kvm_x86_fixup_msi_error(uint32_t gsi) "VT-d failed to remap interrupt for GSI %" PRIu32
+kvm_x86_add_msi_route(int virq) "Adding route entry for virq %d"
+kvm_x86_remove_msi_route(int virq) "Removing route entry for virq %d"
+kvm_x86_update_msi_routes(int num) "Updated %d MSI routes"
diff --git a/target/i386/translate.c b/target/i386/translate.c
new file mode 100644
index 0000000000..324103c885
--- /dev/null
+++ b/target/i386/translate.c
@@ -0,0 +1,8502 @@
+/*
+ * i386 translation
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "qemu/host-utils.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define PREFIX_REPZ 0x01
+#define PREFIX_REPNZ 0x02
+#define PREFIX_LOCK 0x04
+#define PREFIX_DATA 0x08
+#define PREFIX_ADR 0x10
+#define PREFIX_VEX 0x20
+
+#ifdef TARGET_X86_64
+#define CODE64(s) ((s)->code64)
+#define REX_X(s) ((s)->rex_x)
+#define REX_B(s) ((s)->rex_b)
+#else
+#define CODE64(s) 0
+#define REX_X(s) 0
+#define REX_B(s) 0
+#endif
+
+#ifdef TARGET_X86_64
+# define ctztl ctz64
+# define clztl clz64
+#else
+# define ctztl ctz32
+# define clztl clz32
+#endif
+
+/* For a switch indexed by MODRM, match all memory operands for a given OP. */
+#define CASE_MODRM_MEM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
+
+#define CASE_MODRM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
+ case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
+
+//#define MACRO_TEST 1
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv cpu_A0;
+static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
+static TCGv_i32 cpu_cc_op;
+static TCGv cpu_regs[CPU_NB_REGS];
+static TCGv cpu_seg_base[6];
+static TCGv_i64 cpu_bndl[4];
+static TCGv_i64 cpu_bndu[4];
+/* local temps */
+static TCGv cpu_T0, cpu_T1;
+/* local register indexes (only used inside old micro ops) */
+static TCGv cpu_tmp0, cpu_tmp4;
+static TCGv_ptr cpu_ptr0, cpu_ptr1;
+static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
+static TCGv_i64 cpu_tmp1_i64;
+
+#include "exec/gen-icount.h"
+
+#ifdef TARGET_X86_64
+static int x86_64_hregs;
+#endif
+
+typedef struct DisasContext {
+ /* current insn context */
+ int override; /* -1 if no override */
+ int prefix;
+ TCGMemOp aflag;
+ TCGMemOp dflag;
+ target_ulong pc_start;
+ target_ulong pc; /* pc = eip + cs_base */
+ int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
+ static state change (stop translation) */
+ /* current block context */
+ target_ulong cs_base; /* base of CS segment */
+ int pe; /* protected mode */
+ int code32; /* 32 bit code segment */
+#ifdef TARGET_X86_64
+ int lma; /* long mode active */
+ int code64; /* 64 bit code segment */
+ int rex_x, rex_b;
+#endif
+ int vex_l; /* vex vector length */
+ int vex_v; /* vex vvvv register, without 1's compliment. */
+ int ss32; /* 32 bit stack segment */
+ CCOp cc_op; /* current CC operation */
+ bool cc_op_dirty;
+ int addseg; /* non zero if either DS/ES/SS have a non zero base */
+ int f_st; /* currently unused */
+ int vm86; /* vm86 mode */
+ int cpl;
+ int iopl;
+ int tf; /* TF cpu flag */
+ int singlestep_enabled; /* "hardware" single step enabled */
+ int jmp_opt; /* use direct block chaining for direct jumps */
+ int repz_opt; /* optimize jumps within repz instructions */
+ int mem_index; /* select memory access functions */
+ uint64_t flags; /* all execution flags */
+ struct TranslationBlock *tb;
+ int popl_esp_hack; /* for correct popl with esp base handling */
+ int rip_offset; /* only used in x86_64, but left for simplicity */
+ int cpuid_features;
+ int cpuid_ext_features;
+ int cpuid_ext2_features;
+ int cpuid_ext3_features;
+ int cpuid_7_0_ebx_features;
+ int cpuid_xsave_features;
+} DisasContext;
+
+static void gen_eob(DisasContext *s);
+static void gen_jmp(DisasContext *s, target_ulong eip);
+static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
+static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
+
+/* i386 arith/logic operations */
+enum {
+ OP_ADDL,
+ OP_ORL,
+ OP_ADCL,
+ OP_SBBL,
+ OP_ANDL,
+ OP_SUBL,
+ OP_XORL,
+ OP_CMPL,
+};
+
+/* i386 shift ops */
+enum {
+ OP_ROL,
+ OP_ROR,
+ OP_RCL,
+ OP_RCR,
+ OP_SHL,
+ OP_SHR,
+ OP_SHL1, /* undocumented */
+ OP_SAR = 7,
+};
+
+enum {
+ JCC_O,
+ JCC_B,
+ JCC_Z,
+ JCC_BE,
+ JCC_S,
+ JCC_P,
+ JCC_L,
+ JCC_LE,
+};
+
+enum {
+ /* I386 int registers */
+ OR_EAX, /* MUST be even numbered */
+ OR_ECX,
+ OR_EDX,
+ OR_EBX,
+ OR_ESP,
+ OR_EBP,
+ OR_ESI,
+ OR_EDI,
+
+ OR_TMP0 = 16, /* temporary operand register */
+ OR_TMP1,
+ OR_A0, /* temporary register used when doing address evaluation */
+};
+
+enum {
+ USES_CC_DST = 1,
+ USES_CC_SRC = 2,
+ USES_CC_SRC2 = 4,
+ USES_CC_SRCT = 8,
+};
+
+/* Bit set if the global variable is live after setting CC_OP to X. */
+static const uint8_t cc_op_live[CC_OP_NB] = {
+ [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_EFLAGS] = USES_CC_SRC,
+ [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
+ [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
+ [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
+ [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
+ [CC_OP_CLR] = 0,
+};
+
+static void set_cc_op(DisasContext *s, CCOp op)
+{
+ int dead;
+
+ if (s->cc_op == op) {
+ return;
+ }
+
+ /* Discard CC computation that will no longer be used. */
+ dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
+ if (dead & USES_CC_DST) {
+ tcg_gen_discard_tl(cpu_cc_dst);
+ }
+ if (dead & USES_CC_SRC) {
+ tcg_gen_discard_tl(cpu_cc_src);
+ }
+ if (dead & USES_CC_SRC2) {
+ tcg_gen_discard_tl(cpu_cc_src2);
+ }
+ if (dead & USES_CC_SRCT) {
+ tcg_gen_discard_tl(cpu_cc_srcT);
+ }
+
+ if (op == CC_OP_DYNAMIC) {
+ /* The DYNAMIC setting is translator only, and should never be
+ stored. Thus we always consider it clean. */
+ s->cc_op_dirty = false;
+ } else {
+ /* Discard any computed CC_OP value (see shifts). */
+ if (s->cc_op == CC_OP_DYNAMIC) {
+ tcg_gen_discard_i32(cpu_cc_op);
+ }
+ s->cc_op_dirty = true;
+ }
+ s->cc_op = op;
+}
+
+static void gen_update_cc_op(DisasContext *s)
+{
+ if (s->cc_op_dirty) {
+ tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
+ s->cc_op_dirty = false;
+ }
+}
+
+#ifdef TARGET_X86_64
+
+#define NB_OP_SIZES 4
+
+#else /* !TARGET_X86_64 */
+
+#define NB_OP_SIZES 3
+
+#endif /* !TARGET_X86_64 */
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define REG_B_OFFSET (sizeof(target_ulong) - 1)
+#define REG_H_OFFSET (sizeof(target_ulong) - 2)
+#define REG_W_OFFSET (sizeof(target_ulong) - 2)
+#define REG_L_OFFSET (sizeof(target_ulong) - 4)
+#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
+#else
+#define REG_B_OFFSET 0
+#define REG_H_OFFSET 1
+#define REG_W_OFFSET 0
+#define REG_L_OFFSET 0
+#define REG_LH_OFFSET 4
+#endif
+
+/* In instruction encodings for byte register accesses the
+ * register number usually indicates "low 8 bits of register N";
+ * however there are some special cases where N 4..7 indicates
+ * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
+ * true for this special case, false otherwise.
+ */
+static inline bool byte_reg_is_xH(int reg)
+{
+ if (reg < 4) {
+ return false;
+ }
+#ifdef TARGET_X86_64
+ if (reg >= 8 || x86_64_hregs) {
+ return false;
+ }
+#endif
+ return true;
+}
+
+/* Select the size of a push/pop operation. */
+static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
+{
+ if (CODE64(s)) {
+ return ot == MO_16 ? MO_16 : MO_64;
+ } else {
+ return ot;
+ }
+}
+
+/* Select the size of the stack pointer. */
+static inline TCGMemOp mo_stacksize(DisasContext *s)
+{
+ return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+}
+
+/* Select only size 64 else 32. Used for SSE operand sizes. */
+static inline TCGMemOp mo_64_32(TCGMemOp ot)
+{
+#ifdef TARGET_X86_64
+ return ot == MO_64 ? MO_64 : MO_32;
+#else
+ return MO_32;
+#endif
+}
+
+/* Select size 8 if lsb of B is clear, else OT. Used for decoding
+ byte vs word opcodes. */
+static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
+{
+ return b & 1 ? ot : MO_8;
+}
+
+/* Select size 8 if lsb of B is clear, else OT capped at 32.
+ Used for decoding operand size of port opcodes. */
+static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
+{
+ return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
+}
+
+static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
+{
+ switch(ot) {
+ case MO_8:
+ if (!byte_reg_is_xH(reg)) {
+ tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
+ } else {
+ tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
+ }
+ break;
+ case MO_16:
+ tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
+ break;
+ case MO_32:
+ /* For x86_64, this sets the higher half of register to zero.
+ For i386, this is equivalent to a mov. */
+ tcg_gen_ext32u_tl(cpu_regs[reg], t0);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_mov_tl(cpu_regs[reg], t0);
+ break;
+#endif
+ default:
+ tcg_abort();
+ }
+}
+
+static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
+{
+ if (ot == MO_8 && byte_reg_is_xH(reg)) {
+ tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
+ tcg_gen_ext8u_tl(t0, t0);
+ } else {
+ tcg_gen_mov_tl(t0, cpu_regs[reg]);
+ }
+}
+
+static void gen_add_A0_im(DisasContext *s, int val)
+{
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
+}
+
+static inline void gen_op_jmp_v(TCGv dest)
+{
+ tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
+}
+
+static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
+{
+ tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
+ gen_op_mov_reg_v(size, reg, cpu_tmp0);
+}
+
+static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
+{
+ tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
+ gen_op_mov_reg_v(size, reg, cpu_tmp0);
+}
+
+static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
+{
+ tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
+}
+
+static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
+{
+ tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
+}
+
+static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
+{
+ if (d == OR_TMP0) {
+ gen_op_st_v(s, idx, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_reg_v(idx, d, cpu_T0);
+ }
+}
+
+static inline void gen_jmp_im(target_ulong pc)
+{
+ tcg_gen_movi_tl(cpu_tmp0, pc);
+ gen_op_jmp_v(cpu_tmp0);
+}
+
+/* Compute SEG:REG into A0. SEG is selected from the override segment
+ (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
+ indicate no override. */
+static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
+ int def_seg, int ovr_seg)
+{
+ switch (aflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ if (ovr_seg < 0) {
+ tcg_gen_mov_tl(cpu_A0, a0);
+ return;
+ }
+ break;
+#endif
+ case MO_32:
+ /* 32 bit address */
+ if (ovr_seg < 0 && s->addseg) {
+ ovr_seg = def_seg;
+ }
+ if (ovr_seg < 0) {
+ tcg_gen_ext32u_tl(cpu_A0, a0);
+ return;
+ }
+ break;
+ case MO_16:
+ /* 16 bit address */
+ tcg_gen_ext16u_tl(cpu_A0, a0);
+ a0 = cpu_A0;
+ if (ovr_seg < 0) {
+ if (s->addseg) {
+ ovr_seg = def_seg;
+ } else {
+ return;
+ }
+ }
+ break;
+ default:
+ tcg_abort();
+ }
+
+ if (ovr_seg >= 0) {
+ TCGv seg = cpu_seg_base[ovr_seg];
+
+ if (aflag == MO_64) {
+ tcg_gen_add_tl(cpu_A0, a0, seg);
+ } else if (CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, a0);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
+ } else {
+ tcg_gen_add_tl(cpu_A0, a0, seg);
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
+ }
+}
+
+static inline void gen_string_movl_A0_ESI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
+}
+
+static inline void gen_string_movl_A0_EDI(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
+}
+
+static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
+{
+ tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
+ tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
+};
+
+static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
+{
+ switch (size) {
+ case MO_8:
+ if (sign) {
+ tcg_gen_ext8s_tl(dst, src);
+ } else {
+ tcg_gen_ext8u_tl(dst, src);
+ }
+ return dst;
+ case MO_16:
+ if (sign) {
+ tcg_gen_ext16s_tl(dst, src);
+ } else {
+ tcg_gen_ext16u_tl(dst, src);
+ }
+ return dst;
+#ifdef TARGET_X86_64
+ case MO_32:
+ if (sign) {
+ tcg_gen_ext32s_tl(dst, src);
+ } else {
+ tcg_gen_ext32u_tl(dst, src);
+ }
+ return dst;
+#endif
+ default:
+ return src;
+ }
+}
+
+static void gen_extu(TCGMemOp ot, TCGv reg)
+{
+ gen_ext_tl(reg, reg, ot, false);
+}
+
+static void gen_exts(TCGMemOp ot, TCGv reg)
+{
+ gen_ext_tl(reg, reg, ot, true);
+}
+
+static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
+{
+ tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
+ gen_extu(size, cpu_tmp0);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
+}
+
+static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
+{
+ tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
+ gen_extu(size, cpu_tmp0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
+}
+
+static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
+{
+ switch (ot) {
+ case MO_8:
+ gen_helper_inb(v, cpu_env, n);
+ break;
+ case MO_16:
+ gen_helper_inw(v, cpu_env, n);
+ break;
+ case MO_32:
+ gen_helper_inl(v, cpu_env, n);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
+{
+ switch (ot) {
+ case MO_8:
+ gen_helper_outb(cpu_env, v, n);
+ break;
+ case MO_16:
+ gen_helper_outw(cpu_env, v, n);
+ break;
+ case MO_32:
+ gen_helper_outl(cpu_env, v, n);
+ break;
+ default:
+ tcg_abort();
+ }
+}
+
+static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
+ uint32_t svm_flags)
+{
+ target_ulong next_eip;
+
+ if (s->pe && (s->cpl > s->iopl || s->vm86)) {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ switch (ot) {
+ case MO_8:
+ gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
+ break;
+ case MO_16:
+ gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
+ break;
+ case MO_32:
+ gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
+ break;
+ default:
+ tcg_abort();
+ }
+ }
+ if(s->flags & HF_SVMI_MASK) {
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
+ svm_flags |= (1 << (4 + ot));
+ next_eip = s->pc - s->cs_base;
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
+ tcg_const_i32(svm_flags),
+ tcg_const_i32(next_eip - cur_eip));
+ }
+}
+
+static inline void gen_movs(DisasContext *s, TCGMemOp ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static void gen_op_update1_cc(void)
+{
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+}
+
+static void gen_op_update2_cc(void)
+{
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+}
+
+static void gen_op_update3_cc(TCGv reg)
+{
+ tcg_gen_mov_tl(cpu_cc_src2, reg);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+}
+
+static inline void gen_op_testl_T0_T1_cc(void)
+{
+ tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
+}
+
+static void gen_op_update_neg_cc(void)
+{
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_movi_tl(cpu_cc_srcT, 0);
+}
+
+/* compute all eflags to cc_src */
+static void gen_compute_eflags(DisasContext *s)
+{
+ TCGv zero, dst, src1, src2;
+ int live, dead;
+
+ if (s->cc_op == CC_OP_EFLAGS) {
+ return;
+ }
+ if (s->cc_op == CC_OP_CLR) {
+ tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
+ set_cc_op(s, CC_OP_EFLAGS);
+ return;
+ }
+
+ TCGV_UNUSED(zero);
+ dst = cpu_cc_dst;
+ src1 = cpu_cc_src;
+ src2 = cpu_cc_src2;
+
+ /* Take care to not read values that are not live. */
+ live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
+ dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
+ if (dead) {
+ zero = tcg_const_tl(0);
+ if (dead & USES_CC_DST) {
+ dst = zero;
+ }
+ if (dead & USES_CC_SRC) {
+ src1 = zero;
+ }
+ if (dead & USES_CC_SRC2) {
+ src2 = zero;
+ }
+ }
+
+ gen_update_cc_op(s);
+ gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
+ set_cc_op(s, CC_OP_EFLAGS);
+
+ if (dead) {
+ tcg_temp_free(zero);
+ }
+}
+
+typedef struct CCPrepare {
+ TCGCond cond;
+ TCGv reg;
+ TCGv reg2;
+ target_ulong imm;
+ target_ulong mask;
+ bool use_reg2;
+ bool no_setcond;
+} CCPrepare;
+
+/* compute eflags.C to reg */
+static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
+{
+ TCGv t0, t1;
+ int size, shift;
+
+ switch (s->cc_op) {
+ case CC_OP_SUBB ... CC_OP_SUBQ:
+ /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
+ size = s->cc_op - CC_OP_SUBB;
+ t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
+ /* If no temporary was used, be careful not to alias t1 and t0. */
+ t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
+ tcg_gen_mov_tl(t0, cpu_cc_srcT);
+ gen_extu(size, t0);
+ goto add_sub;
+
+ case CC_OP_ADDB ... CC_OP_ADDQ:
+ /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
+ size = s->cc_op - CC_OP_ADDB;
+ t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
+ t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
+ add_sub:
+ return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
+ .reg2 = t1, .mask = -1, .use_reg2 = true };
+
+ case CC_OP_LOGICB ... CC_OP_LOGICQ:
+ case CC_OP_CLR:
+ return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+
+ case CC_OP_INCB ... CC_OP_INCQ:
+ case CC_OP_DECB ... CC_OP_DECQ:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = -1, .no_setcond = true };
+
+ case CC_OP_SHLB ... CC_OP_SHLQ:
+ /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
+ size = s->cc_op - CC_OP_SHLB;
+ shift = (8 << size) - 1;
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = (target_ulong)1 << shift };
+
+ case CC_OP_MULB ... CC_OP_MULQ:
+ return (CCPrepare) { .cond = TCG_COND_NE,
+ .reg = cpu_cc_src, .mask = -1 };
+
+ case CC_OP_BMILGB ... CC_OP_BMILGQ:
+ size = s->cc_op - CC_OP_BMILGB;
+ t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
+
+ case CC_OP_ADCX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
+ .mask = -1, .no_setcond = true };
+
+ case CC_OP_EFLAGS:
+ case CC_OP_SARB ... CC_OP_SARQ:
+ /* CC_SRC & 1 */
+ return (CCPrepare) { .cond = TCG_COND_NE,
+ .reg = cpu_cc_src, .mask = CC_C };
+
+ default:
+ /* The need to compute only C from CC_OP_DYNAMIC is important
+ in efficiently implementing e.g. INC at the start of a TB. */
+ gen_update_cc_op(s);
+ gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
+ cpu_cc_src2, cpu_cc_op);
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+ .mask = -1, .no_setcond = true };
+ }
+}
+
+/* compute eflags.P to reg */
+static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
+{
+ gen_compute_eflags(s);
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_P };
+}
+
+/* compute eflags.S to reg */
+static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
+{
+ switch (s->cc_op) {
+ case CC_OP_DYNAMIC:
+ gen_compute_eflags(s);
+ /* FALLTHRU */
+ case CC_OP_EFLAGS:
+ case CC_OP_ADCX:
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_S };
+ case CC_OP_CLR:
+ return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+ default:
+ {
+ TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
+ TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
+ return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
+ }
+ }
+}
+
+/* compute eflags.O to reg */
+static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
+{
+ switch (s->cc_op) {
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
+ .mask = -1, .no_setcond = true };
+ case CC_OP_CLR:
+ return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
+ default:
+ gen_compute_eflags(s);
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_O };
+ }
+}
+
+/* compute eflags.Z to reg */
+static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
+{
+ switch (s->cc_op) {
+ case CC_OP_DYNAMIC:
+ gen_compute_eflags(s);
+ /* FALLTHRU */
+ case CC_OP_EFLAGS:
+ case CC_OP_ADCX:
+ case CC_OP_ADOX:
+ case CC_OP_ADCOX:
+ return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_Z };
+ case CC_OP_CLR:
+ return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
+ default:
+ {
+ TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
+ TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
+ return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
+ }
+ }
+}
+
+/* perform a conditional store into register 'reg' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used. */
+static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
+{
+ int inv, jcc_op, cond;
+ TCGMemOp size;
+ CCPrepare cc;
+ TCGv t0;
+
+ inv = b & 1;
+ jcc_op = (b >> 1) & 7;
+
+ switch (s->cc_op) {
+ case CC_OP_SUBB ... CC_OP_SUBQ:
+ /* We optimize relational operators for the cmp/jcc case. */
+ size = s->cc_op - CC_OP_SUBB;
+ switch (jcc_op) {
+ case JCC_BE:
+ tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
+ gen_extu(size, cpu_tmp4);
+ t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
+ cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
+ .reg2 = t0, .mask = -1, .use_reg2 = true };
+ break;
+
+ case JCC_L:
+ cond = TCG_COND_LT;
+ goto fast_jcc_l;
+ case JCC_LE:
+ cond = TCG_COND_LE;
+ fast_jcc_l:
+ tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
+ gen_exts(size, cpu_tmp4);
+ t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
+ cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
+ .reg2 = t0, .mask = -1, .use_reg2 = true };
+ break;
+
+ default:
+ goto slow_jcc;
+ }
+ break;
+
+ default:
+ slow_jcc:
+ /* This actually generates good code for JC, JZ and JS. */
+ switch (jcc_op) {
+ case JCC_O:
+ cc = gen_prepare_eflags_o(s, reg);
+ break;
+ case JCC_B:
+ cc = gen_prepare_eflags_c(s, reg);
+ break;
+ case JCC_Z:
+ cc = gen_prepare_eflags_z(s, reg);
+ break;
+ case JCC_BE:
+ gen_compute_eflags(s);
+ cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
+ .mask = CC_Z | CC_C };
+ break;
+ case JCC_S:
+ cc = gen_prepare_eflags_s(s, reg);
+ break;
+ case JCC_P:
+ cc = gen_prepare_eflags_p(s, reg);
+ break;
+ case JCC_L:
+ gen_compute_eflags(s);
+ if (TCGV_EQUAL(reg, cpu_cc_src)) {
+ reg = cpu_tmp0;
+ }
+ tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
+ tcg_gen_xor_tl(reg, reg, cpu_cc_src);
+ cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+ .mask = CC_S };
+ break;
+ default:
+ case JCC_LE:
+ gen_compute_eflags(s);
+ if (TCGV_EQUAL(reg, cpu_cc_src)) {
+ reg = cpu_tmp0;
+ }
+ tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
+ tcg_gen_xor_tl(reg, reg, cpu_cc_src);
+ cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
+ .mask = CC_S | CC_Z };
+ break;
+ }
+ break;
+ }
+
+ if (inv) {
+ cc.cond = tcg_invert_cond(cc.cond);
+ }
+ return cc;
+}
+
+static void gen_setcc1(DisasContext *s, int b, TCGv reg)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, reg);
+
+ if (cc.no_setcond) {
+ if (cc.cond == TCG_COND_EQ) {
+ tcg_gen_xori_tl(reg, cc.reg, 1);
+ } else {
+ tcg_gen_mov_tl(reg, cc.reg);
+ }
+ return;
+ }
+
+ if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
+ cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
+ tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
+ tcg_gen_andi_tl(reg, reg, 1);
+ return;
+ }
+ if (cc.mask != -1) {
+ tcg_gen_andi_tl(reg, cc.reg, cc.mask);
+ cc.reg = reg;
+ }
+ if (cc.use_reg2) {
+ tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
+ } else {
+ tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
+ }
+}
+
+static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
+{
+ gen_setcc1(s, JCC_B << 1, reg);
+}
+
+/* generate a conditional jump to label 'l1' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used. */
+static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
+
+ if (cc.mask != -1) {
+ tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+ cc.reg = cpu_T0;
+ }
+ if (cc.use_reg2) {
+ tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
+ } else {
+ tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
+ }
+}
+
+/* Generate a conditional jump to label 'l1' according to jump opcode
+ value 'b'. In the fast case, T0 is guaranted not to be used.
+ A translation block must end soon. */
+static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
+{
+ CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
+
+ gen_update_cc_op(s);
+ if (cc.mask != -1) {
+ tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
+ cc.reg = cpu_T0;
+ }
+ set_cc_op(s, CC_OP_DYNAMIC);
+ if (cc.use_reg2) {
+ tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
+ } else {
+ tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
+ }
+}
+
+/* XXX: does not work with gdbstub "ice" single step - not a
+ serious problem */
+static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ gen_op_jnz_ecx(s->aflag, l1);
+ gen_set_label(l2);
+ gen_jmp_tb(s, next_eip, 1);
+ gen_set_label(l1);
+ return l2;
+}
+
+static inline void gen_stos(DisasContext *s, TCGMemOp ot)
+{
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ gen_string_movl_A0_EDI(s);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static inline void gen_lods(DisasContext *s, TCGMemOp ot)
+{
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
+}
+
+static inline void gen_scas(DisasContext *s, TCGMemOp ot)
+{
+ gen_string_movl_A0_EDI(s);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_op(s, OP_CMPL, ot, R_EAX);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
+{
+ gen_string_movl_A0_EDI(s);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_string_movl_A0_ESI(s);
+ gen_op(s, OP_CMPL, ot, OR_TMP0);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
+}
+
+static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
+{
+ if (s->flags & HF_IOBPT_MASK) {
+ TCGv_i32 t_size = tcg_const_i32(1 << ot);
+ TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
+
+ gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
+ tcg_temp_free_i32(t_size);
+ tcg_temp_free(t_next);
+ }
+}
+
+
+static inline void gen_ins(DisasContext *s, TCGMemOp ot)
+{
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_string_movl_A0_EDI(s);
+ /* Note: we must do this dummy write first to be restartable in
+ case of page fault. */
+ tcg_gen_movi_tl(cpu_T0, 0);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
+ gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_EDI);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+}
+
+static inline void gen_outs(DisasContext *s, TCGMemOp ot)
+{
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_string_movl_A0_ESI(s);
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
+ gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+ gen_op_movl_T0_Dshift(ot);
+ gen_op_add_reg_T0(s->aflag, R_ESI);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+}
+
+/* same method as Valgrind : we generate jumps to current or next
+ instruction */
+#define GEN_REPZ(op) \
+static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
+ target_ulong cur_eip, target_ulong next_eip) \
+{ \
+ TCGLabel *l2; \
+ gen_update_cc_op(s); \
+ l2 = gen_jz_ecx_string(s, next_eip); \
+ gen_ ## op(s, ot); \
+ gen_op_add_reg_im(s->aflag, R_ECX, -1); \
+ /* a loop would cause two single step exceptions if ECX = 1 \
+ before rep string_insn */ \
+ if (s->repz_opt) \
+ gen_op_jz_ecx(s->aflag, l2); \
+ gen_jmp(s, cur_eip); \
+}
+
+#define GEN_REPZ2(op) \
+static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
+ target_ulong cur_eip, \
+ target_ulong next_eip, \
+ int nz) \
+{ \
+ TCGLabel *l2; \
+ gen_update_cc_op(s); \
+ l2 = gen_jz_ecx_string(s, next_eip); \
+ gen_ ## op(s, ot); \
+ gen_op_add_reg_im(s->aflag, R_ECX, -1); \
+ gen_update_cc_op(s); \
+ gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
+ if (s->repz_opt) \
+ gen_op_jz_ecx(s->aflag, l2); \
+ gen_jmp(s, cur_eip); \
+}
+
+GEN_REPZ(movs)
+GEN_REPZ(stos)
+GEN_REPZ(lods)
+GEN_REPZ(ins)
+GEN_REPZ(outs)
+GEN_REPZ2(scas)
+GEN_REPZ2(cmps)
+
+static void gen_helper_fp_arith_ST0_FT0(int op)
+{
+ switch (op) {
+ case 0:
+ gen_helper_fadd_ST0_FT0(cpu_env);
+ break;
+ case 1:
+ gen_helper_fmul_ST0_FT0(cpu_env);
+ break;
+ case 2:
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 3:
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 4:
+ gen_helper_fsub_ST0_FT0(cpu_env);
+ break;
+ case 5:
+ gen_helper_fsubr_ST0_FT0(cpu_env);
+ break;
+ case 6:
+ gen_helper_fdiv_ST0_FT0(cpu_env);
+ break;
+ case 7:
+ gen_helper_fdivr_ST0_FT0(cpu_env);
+ break;
+ }
+}
+
+/* NOTE the exception in "r" op ordering */
+static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
+{
+ TCGv_i32 tmp = tcg_const_i32(opreg);
+ switch (op) {
+ case 0:
+ gen_helper_fadd_STN_ST0(cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_fmul_STN_ST0(cpu_env, tmp);
+ break;
+ case 4:
+ gen_helper_fsubr_STN_ST0(cpu_env, tmp);
+ break;
+ case 5:
+ gen_helper_fsub_STN_ST0(cpu_env, tmp);
+ break;
+ case 6:
+ gen_helper_fdivr_STN_ST0(cpu_env, tmp);
+ break;
+ case 7:
+ gen_helper_fdiv_STN_ST0(cpu_env, tmp);
+ break;
+ }
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
+{
+ if (d != OR_TMP0) {
+ gen_op_mov_v_reg(ot, cpu_T0, d);
+ } else if (!(s1->prefix & PREFIX_LOCK)) {
+ gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
+ }
+ switch(op) {
+ case OP_ADCL:
+ gen_compute_eflags_c(s1, cpu_tmp4);
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1);
+ tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update3_cc(cpu_tmp4);
+ set_cc_op(s1, CC_OP_ADCB + ot);
+ break;
+ case OP_SBBL:
+ gen_compute_eflags_c(s1, cpu_tmp4);
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4);
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
+ tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update3_cc(cpu_tmp4);
+ set_cc_op(s1, CC_OP_SBBB + ot);
+ break;
+ case OP_ADDL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update2_cc();
+ set_cc_op(s1, CC_OP_ADDB + ot);
+ break;
+ case OP_SUBL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_neg_tl(cpu_T0, cpu_T1);
+ tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT, cpu_A0, cpu_T0,
+ s1->mem_index, ot | MO_LE);
+ tcg_gen_sub_tl(cpu_T0, cpu_cc_srcT, cpu_T1);
+ } else {
+ tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+ tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update2_cc();
+ set_cc_op(s1, CC_OP_SUBB + ot);
+ break;
+ default:
+ case OP_ANDL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_and_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update1_cc();
+ set_cc_op(s1, CC_OP_LOGICB + ot);
+ break;
+ case OP_ORL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_or_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update1_cc();
+ set_cc_op(s1, CC_OP_LOGICB + ot);
+ break;
+ case OP_XORL:
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+ gen_op_update1_cc();
+ set_cc_op(s1, CC_OP_LOGICB + ot);
+ break;
+ case OP_CMPL:
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
+ set_cc_op(s1, CC_OP_SUBB + ot);
+ break;
+ }
+}
+
+/* if d == OR_TMP0, it means memory operand (address in A0) */
+static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
+{
+ if (s1->prefix & PREFIX_LOCK) {
+ tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1);
+ tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+ s1->mem_index, ot | MO_LE);
+ } else {
+ if (d != OR_TMP0) {
+ gen_op_mov_v_reg(ot, cpu_T0, d);
+ } else {
+ gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
+ }
+ tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1));
+ gen_op_st_rm_T0_A0(s1, ot, d);
+ }
+
+ gen_compute_eflags_c(s1, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
+}
+
+static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
+ TCGv shm1, TCGv count, bool is_right)
+{
+ TCGv_i32 z32, s32, oldop;
+ TCGv z_tl;
+
+ /* Store the results into the CC variables. If we know that the
+ variable must be dead, store unconditionally. Otherwise we'll
+ need to not disrupt the current contents. */
+ z_tl = tcg_const_tl(0);
+ if (cc_op_live[s->cc_op] & USES_CC_DST) {
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
+ result, cpu_cc_dst);
+ } else {
+ tcg_gen_mov_tl(cpu_cc_dst, result);
+ }
+ if (cc_op_live[s->cc_op] & USES_CC_SRC) {
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
+ shm1, cpu_cc_src);
+ } else {
+ tcg_gen_mov_tl(cpu_cc_src, shm1);
+ }
+ tcg_temp_free(z_tl);
+
+ /* Get the two potential CC_OP values into temporaries. */
+ tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
+ if (s->cc_op == CC_OP_DYNAMIC) {
+ oldop = cpu_cc_op;
+ } else {
+ tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
+ oldop = cpu_tmp3_i32;
+ }
+
+ /* Conditionally store the CC_OP value. */
+ z32 = tcg_const_i32(0);
+ s32 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(s32, count);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
+ tcg_temp_free_i32(z32);
+ tcg_temp_free_i32(s32);
+
+ /* The CC_OP value is no longer predictable. */
+ set_cc_op(s, CC_OP_DYNAMIC);
+}
+
+static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
+ int is_right, int is_arith)
+{
+ target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
+ }
+
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
+ tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
+
+ if (is_right) {
+ if (is_arith) {
+ gen_exts(ot, cpu_T0);
+ tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
+ } else {
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
+ }
+ } else {
+ tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
+}
+
+static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
+ int is_right, int is_arith)
+{
+ int mask = (ot == MO_64 ? 0x3f : 0x1f);
+
+ /* load */
+ if (op1 == OR_TMP0)
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ else
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
+
+ op2 &= mask;
+ if (op2 != 0) {
+ if (is_right) {
+ if (is_arith) {
+ gen_exts(ot, cpu_T0);
+ tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
+ } else {
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
+ }
+ } else {
+ tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
+ tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
+ }
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ /* update eflags if non zero shift */
+ if (op2 != 0) {
+ tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
+ }
+}
+
+static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
+{
+ target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
+ TCGv_i32 t0, t1;
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
+ }
+
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
+
+ switch (ot) {
+ case MO_8:
+ /* Replicate the 8-bit input so that a 32-bit rotate works. */
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
+ goto do_long;
+ case MO_16:
+ /* Replicate the 16-bit input so that a 32-bit rotate works. */
+ tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
+ goto do_long;
+ do_long:
+#ifdef TARGET_X86_64
+ case MO_32:
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+ if (is_right) {
+ tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+ } else {
+ tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+ }
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ break;
+#endif
+ default:
+ if (is_right) {
+ tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
+ } else {
+ tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
+ }
+ break;
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ /* We'll need the flags computed into CC_SRC. */
+ gen_compute_eflags(s);
+
+ /* The value that was "rotated out" is now present at the other end
+ of the word. Compute C into CC_DST and O into CC_SRC2. Note that
+ since we've computed the flags into CC_SRC, these variables are
+ currently dead. */
+ if (is_right) {
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
+ } else {
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
+ }
+ tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
+ tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
+
+ /* Now conditionally store the new CC_OP value. If the shift count
+ is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
+ Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
+ exactly as we computed above. */
+ t0 = tcg_const_i32(0);
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t1, cpu_T1);
+ tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
+ tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
+ tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
+ cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+
+ /* The CC_OP value is no longer predictable. */
+ set_cc_op(s, CC_OP_DYNAMIC);
+}
+
+static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
+ int is_right)
+{
+ int mask = (ot == MO_64 ? 0x3f : 0x1f);
+ int shift;
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
+ }
+
+ op2 &= mask;
+ if (op2 != 0) {
+ switch (ot) {
+#ifdef TARGET_X86_64
+ case MO_32:
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ if (is_right) {
+ tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
+ } else {
+ tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
+ }
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ break;
+#endif
+ default:
+ if (is_right) {
+ tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
+ } else {
+ tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
+ }
+ break;
+ case MO_8:
+ mask = 7;
+ goto do_shifts;
+ case MO_16:
+ mask = 15;
+ do_shifts:
+ shift = op2 & mask;
+ if (is_right) {
+ shift = mask + 1 - shift;
+ }
+ gen_extu(ot, cpu_T0);
+ tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
+ break;
+ }
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ if (op2 != 0) {
+ /* Compute the flags into CC_SRC. */
+ gen_compute_eflags(s);
+
+ /* The value that was "rotated out" is now present at the other end
+ of the word. Compute C into CC_DST and O into CC_SRC2. Note that
+ since we've computed the flags into CC_SRC, these variables are
+ currently dead. */
+ if (is_right) {
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
+ tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
+ } else {
+ tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
+ tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
+ }
+ tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
+ tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
+ set_cc_op(s, CC_OP_ADCOX);
+ }
+}
+
+/* XXX: add faster immediate = 1 case */
+static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
+ int is_right)
+{
+ gen_compute_eflags(s);
+ assert(s->cc_op == CC_OP_EFLAGS);
+
+ /* load */
+ if (op1 == OR_TMP0)
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ else
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
+
+ if (is_right) {
+ switch (ot) {
+ case MO_8:
+ gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+ case MO_16:
+ gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+ case MO_32:
+ gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+#endif
+ default:
+ tcg_abort();
+ }
+ } else {
+ switch (ot) {
+ case MO_8:
+ gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+ case MO_16:
+ gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+ case MO_32:
+ gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
+ break;
+#endif
+ default:
+ tcg_abort();
+ }
+ }
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+}
+
+/* XXX: add faster immediate case */
+static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
+ bool is_right, TCGv count_in)
+{
+ target_ulong mask = (ot == MO_64 ? 63 : 31);
+ TCGv count;
+
+ /* load */
+ if (op1 == OR_TMP0) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, op1);
+ }
+
+ count = tcg_temp_new();
+ tcg_gen_andi_tl(count, count_in, mask);
+
+ switch (ot) {
+ case MO_16:
+ /* Note: we implement the Intel behaviour for shift count > 16.
+ This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
+ portion by constructing it as a 32-bit value. */
+ if (is_right) {
+ tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
+ tcg_gen_mov_tl(cpu_T1, cpu_T0);
+ tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
+ } else {
+ tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
+ }
+ /* FALLTHRU */
+#ifdef TARGET_X86_64
+ case MO_32:
+ /* Concatenate the two 32-bit values and use a 64-bit shift. */
+ tcg_gen_subi_tl(cpu_tmp0, count, 1);
+ if (is_right) {
+ tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
+ } else {
+ tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
+ tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
+ tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
+ tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
+ tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
+ }
+ break;
+#endif
+ default:
+ tcg_gen_subi_tl(cpu_tmp0, count, 1);
+ if (is_right) {
+ tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+
+ tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
+ tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
+ } else {
+ tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
+ if (ot == MO_16) {
+ /* Only needed if count > 16, for Intel behaviour. */
+ tcg_gen_subfi_tl(cpu_tmp4, 33, count);
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
+ tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
+ }
+
+ tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
+ tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
+ }
+ tcg_gen_movi_tl(cpu_tmp4, 0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
+ cpu_tmp4, cpu_T1);
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
+ break;
+ }
+
+ /* store */
+ gen_op_st_rm_T0_A0(s, ot, op1);
+
+ gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
+ tcg_temp_free(count);
+}
+
+static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
+{
+ if (s != OR_TMP1)
+ gen_op_mov_v_reg(ot, cpu_T1, s);
+ switch(op) {
+ case OP_ROL:
+ gen_rot_rm_T1(s1, ot, d, 0);
+ break;
+ case OP_ROR:
+ gen_rot_rm_T1(s1, ot, d, 1);
+ break;
+ case OP_SHL:
+ case OP_SHL1:
+ gen_shift_rm_T1(s1, ot, d, 0, 0);
+ break;
+ case OP_SHR:
+ gen_shift_rm_T1(s1, ot, d, 1, 0);
+ break;
+ case OP_SAR:
+ gen_shift_rm_T1(s1, ot, d, 1, 1);
+ break;
+ case OP_RCL:
+ gen_rotc_rm_T1(s1, ot, d, 0);
+ break;
+ case OP_RCR:
+ gen_rotc_rm_T1(s1, ot, d, 1);
+ break;
+ }
+}
+
+static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
+{
+ switch(op) {
+ case OP_ROL:
+ gen_rot_rm_im(s1, ot, d, c, 0);
+ break;
+ case OP_ROR:
+ gen_rot_rm_im(s1, ot, d, c, 1);
+ break;
+ case OP_SHL:
+ case OP_SHL1:
+ gen_shift_rm_im(s1, ot, d, c, 0, 0);
+ break;
+ case OP_SHR:
+ gen_shift_rm_im(s1, ot, d, c, 1, 0);
+ break;
+ case OP_SAR:
+ gen_shift_rm_im(s1, ot, d, c, 1, 1);
+ break;
+ default:
+ /* currently not optimized */
+ tcg_gen_movi_tl(cpu_T1, c);
+ gen_shift(s1, op, ot, d, OR_TMP1);
+ break;
+ }
+}
+
+/* Decompose an address. */
+
+typedef struct AddressParts {
+ int def_seg;
+ int base;
+ int index;
+ int scale;
+ target_long disp;
+} AddressParts;
+
+static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
+ int modrm)
+{
+ int def_seg, base, index, scale, mod, rm;
+ target_long disp;
+ bool havesib;
+
+ def_seg = R_DS;
+ index = -1;
+ scale = 0;
+ disp = 0;
+
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ base = rm | REX_B(s);
+
+ if (mod == 3) {
+ /* Normally filtered out earlier, but including this path
+ simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
+ goto done;
+ }
+
+ switch (s->aflag) {
+ case MO_64:
+ case MO_32:
+ havesib = 0;
+ if (rm == 4) {
+ int code = cpu_ldub_code(env, s->pc++);
+ scale = (code >> 6) & 3;
+ index = ((code >> 3) & 7) | REX_X(s);
+ if (index == 4) {
+ index = -1; /* no index */
+ }
+ base = (code & 7) | REX_B(s);
+ havesib = 1;
+ }
+
+ switch (mod) {
+ case 0:
+ if ((base & 7) == 5) {
+ base = -1;
+ disp = (int32_t)cpu_ldl_code(env, s->pc);
+ s->pc += 4;
+ if (CODE64(s) && !havesib) {
+ base = -2;
+ disp += s->pc + s->rip_offset;
+ }
+ }
+ break;
+ case 1:
+ disp = (int8_t)cpu_ldub_code(env, s->pc++);
+ break;
+ default:
+ case 2:
+ disp = (int32_t)cpu_ldl_code(env, s->pc);
+ s->pc += 4;
+ break;
+ }
+
+ /* For correct popl handling with esp. */
+ if (base == R_ESP && s->popl_esp_hack) {
+ disp += s->popl_esp_hack;
+ }
+ if (base == R_EBP || base == R_ESP) {
+ def_seg = R_SS;
+ }
+ break;
+
+ case MO_16:
+ if (mod == 0) {
+ if (rm == 6) {
+ base = -1;
+ disp = cpu_lduw_code(env, s->pc);
+ s->pc += 2;
+ break;
+ }
+ } else if (mod == 1) {
+ disp = (int8_t)cpu_ldub_code(env, s->pc++);
+ } else {
+ disp = (int16_t)cpu_lduw_code(env, s->pc);
+ s->pc += 2;
+ }
+
+ switch (rm) {
+ case 0:
+ base = R_EBX;
+ index = R_ESI;
+ break;
+ case 1:
+ base = R_EBX;
+ index = R_EDI;
+ break;
+ case 2:
+ base = R_EBP;
+ index = R_ESI;
+ def_seg = R_SS;
+ break;
+ case 3:
+ base = R_EBP;
+ index = R_EDI;
+ def_seg = R_SS;
+ break;
+ case 4:
+ base = R_ESI;
+ break;
+ case 5:
+ base = R_EDI;
+ break;
+ case 6:
+ base = R_EBP;
+ def_seg = R_SS;
+ break;
+ default:
+ case 7:
+ base = R_EBX;
+ break;
+ }
+ break;
+
+ default:
+ tcg_abort();
+ }
+
+ done:
+ return (AddressParts){ def_seg, base, index, scale, disp };
+}
+
+/* Compute the address, with a minimum number of TCG ops. */
+static TCGv gen_lea_modrm_1(AddressParts a)
+{
+ TCGv ea;
+
+ TCGV_UNUSED(ea);
+ if (a.index >= 0) {
+ if (a.scale == 0) {
+ ea = cpu_regs[a.index];
+ } else {
+ tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
+ ea = cpu_A0;
+ }
+ if (a.base >= 0) {
+ tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
+ ea = cpu_A0;
+ }
+ } else if (a.base >= 0) {
+ ea = cpu_regs[a.base];
+ }
+ if (TCGV_IS_UNUSED(ea)) {
+ tcg_gen_movi_tl(cpu_A0, a.disp);
+ ea = cpu_A0;
+ } else if (a.disp != 0) {
+ tcg_gen_addi_tl(cpu_A0, ea, a.disp);
+ ea = cpu_A0;
+ }
+
+ return ea;
+}
+
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(a);
+ gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
+}
+
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ (void)gen_lea_modrm_0(env, s, modrm);
+}
+
+/* Used for BNDCL, BNDCU, BNDCN. */
+static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
+ TCGCond cond, TCGv_i64 bndv)
+{
+ TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
+
+ tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
+ }
+ tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
+ tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
+ gen_helper_bndck(cpu_env, cpu_tmp2_i32);
+}
+
+/* used for LEA and MOV AX, mem */
+static void gen_add_A0_ds_seg(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
+}
+
+/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
+ OR_TMP0 */
+static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
+ TCGMemOp ot, int reg, int is_store)
+{
+ int mod, rm;
+
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod == 3) {
+ if (is_store) {
+ if (reg != OR_TMP0)
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ if (reg != OR_TMP0)
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (is_store) {
+ if (reg != OR_TMP0)
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ if (reg != OR_TMP0)
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ }
+ }
+}
+
+static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
+{
+ uint32_t ret;
+
+ switch (ot) {
+ case MO_8:
+ ret = cpu_ldub_code(env, s->pc);
+ s->pc++;
+ break;
+ case MO_16:
+ ret = cpu_lduw_code(env, s->pc);
+ s->pc += 2;
+ break;
+ case MO_32:
+#ifdef TARGET_X86_64
+ case MO_64:
+#endif
+ ret = cpu_ldl_code(env, s->pc);
+ s->pc += 4;
+ break;
+ default:
+ tcg_abort();
+ }
+ return ret;
+}
+
+static inline int insn_const_size(TCGMemOp ot)
+{
+ if (ot <= MO_32) {
+ return 1 << ot;
+ } else {
+ return 4;
+ }
+}
+
+static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
+{
+#ifndef CONFIG_USER_ONLY
+ return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
+ (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
+{
+ target_ulong pc = s->cs_base + eip;
+
+ if (use_goto_tb(s, pc)) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ gen_jmp_im(eip);
+ tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
+ } else {
+ /* jump to another page: currently not optimized */
+ gen_jmp_im(eip);
+ gen_eob(s);
+ }
+}
+
+static inline void gen_jcc(DisasContext *s, int b,
+ target_ulong val, target_ulong next_eip)
+{
+ TCGLabel *l1, *l2;
+
+ if (s->jmp_opt) {
+ l1 = gen_new_label();
+ gen_jcc1(s, b, l1);
+
+ gen_goto_tb(s, 0, next_eip);
+
+ gen_set_label(l1);
+ gen_goto_tb(s, 1, val);
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ gen_jcc1(s, b, l1);
+
+ gen_jmp_im(next_eip);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+ gen_jmp_im(val);
+ gen_set_label(l2);
+ gen_eob(s);
+ }
+}
+
+static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
+ int modrm, int reg)
+{
+ CCPrepare cc;
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+ cc = gen_prepare_cc(s, b, cpu_T1);
+ if (cc.mask != -1) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cc.reg, cc.mask);
+ cc.reg = t0;
+ }
+ if (!cc.use_reg2) {
+ cc.reg2 = tcg_const_tl(cc.imm);
+ }
+
+ tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
+ cpu_T0, cpu_regs[reg]);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+
+ if (cc.mask != -1) {
+ tcg_temp_free(cc.reg);
+ }
+ if (!cc.use_reg2) {
+ tcg_temp_free(cc.reg2);
+ }
+}
+
+static inline void gen_op_movl_T0_seg(int seg_reg)
+{
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].selector));
+}
+
+static inline void gen_op_movl_seg_T0_vm(int seg_reg)
+{
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,segs[seg_reg].selector));
+ tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
+}
+
+/* move T0 to seg_reg and compute if the CPU state may change. Never
+ call this function with seg_reg == R_CS */
+static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
+{
+ if (s->pe && !s->vm86) {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
+ /* abort translation because the addseg value may change or
+ because ss32 may change. For R_SS, translation must always
+ stop as a special handling must be done to disable hardware
+ interrupts for the next instruction */
+ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ gen_op_movl_seg_T0_vm(seg_reg);
+ if (seg_reg == R_SS)
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+}
+
+static inline int svm_is_rep(int prefixes)
+{
+ return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
+}
+
+static inline void
+gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
+ uint32_t type, uint64_t param)
+{
+ /* no SVM activated; fast case */
+ if (likely(!(s->flags & HF_SVMI_MASK)))
+ return;
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
+ tcg_const_i64(param));
+}
+
+static inline void
+gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
+{
+ gen_svm_check_intercept_param(s, pc_start, type, 0);
+}
+
+static inline void gen_stack_update(DisasContext *s, int addend)
+{
+ gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
+}
+
+/* Generate a push. It depends on ss32, addseg and dflag. */
+static void gen_push_v(DisasContext *s, TCGv val)
+{
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = mo_stacksize(s);
+ int size = 1 << d_ot;
+ TCGv new_esp = cpu_A0;
+
+ tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
+
+ if (!CODE64(s)) {
+ if (s->addseg) {
+ new_esp = cpu_tmp4;
+ tcg_gen_mov_tl(new_esp, cpu_A0);
+ }
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ }
+
+ gen_op_st_v(s, d_ot, val, cpu_A0);
+ gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
+}
+
+/* two step pop is necessary for precise exceptions */
+static TCGMemOp gen_pop_T0(DisasContext *s)
+{
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+
+ gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+
+ return d_ot;
+}
+
+static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
+{
+ gen_stack_update(s, 1 << ot);
+}
+
+static inline void gen_stack_A0(DisasContext *s)
+{
+ gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
+}
+
+static void gen_pusha(DisasContext *s)
+{
+ TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ TCGMemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
+ gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
+ }
+
+ gen_stack_update(s, -8 * size);
+}
+
+static void gen_popa(DisasContext *s)
+{
+ TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
+ TCGMemOp d_ot = s->dflag;
+ int size = 1 << d_ot;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ /* ESP is not reloaded */
+ if (7 - i == R_ESP) {
+ continue;
+ }
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
+ gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
+ }
+
+ gen_stack_update(s, 8 * size);
+}
+
+static void gen_enter(DisasContext *s, int esp_addend, int level)
+{
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
+ int size = 1 << d_ot;
+
+ /* Push BP; compute FrameTemp into T1. */
+ tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
+ gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
+
+ level &= 31;
+ if (level != 0) {
+ int i;
+
+ /* Copy level-1 pointers from the previous frame. */
+ for (i = 1; i < level; ++i) {
+ tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
+
+ tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
+ }
+
+ /* Push the current FrameTemp as the last level. */
+ tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
+ gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
+ gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
+ }
+
+ /* Copy the FrameTemp value to EBP. */
+ gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
+
+ /* Compute the final value of ESP. */
+ tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
+ gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
+}
+
+static void gen_leave(DisasContext *s)
+{
+ TCGMemOp d_ot = mo_pushpop(s, s->dflag);
+ TCGMemOp a_ot = mo_stacksize(s);
+
+ gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
+ gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
+
+ tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
+
+ gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
+ gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
+}
+
+static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
+{
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+/* Generate #UD for the current instruction. The assumption here is that
+ the instruction is known, but it isn't allowed in the current cpu mode. */
+static void gen_illegal_opcode(DisasContext *s)
+{
+ gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
+}
+
+/* Similarly, except that the assumption here is that we don't decode
+ the instruction at all -- either a missing opcode, an unimplemented
+ feature, or just a bogus instruction stream. */
+static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
+{
+ gen_illegal_opcode(s);
+
+ if (qemu_loglevel_mask(LOG_UNIMP)) {
+ target_ulong pc = s->pc_start, end = s->pc;
+ qemu_log_lock();
+ qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
+ for (; pc < end; ++pc) {
+ qemu_log(" %02x", cpu_ldub_code(env, pc));
+ }
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+}
+
+/* an interrupt is different from an exception because of the
+ privilege checks */
+static void gen_interrupt(DisasContext *s, int intno,
+ target_ulong cur_eip, target_ulong next_eip)
+{
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
+ gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
+ tcg_const_i32(next_eip - cur_eip));
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_debug(DisasContext *s, target_ulong cur_eip)
+{
+ gen_update_cc_op(s);
+ gen_jmp_im(cur_eip);
+ gen_helper_debug(cpu_env);
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+static void gen_set_hflag(DisasContext *s, uint32_t mask)
+{
+ if ((s->flags & mask) == 0) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ori_i32(t, t, mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags |= mask;
+ }
+}
+
+static void gen_reset_hflag(DisasContext *s, uint32_t mask)
+{
+ if (s->flags & mask) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_andi_i32(t, t, ~mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags &= ~mask;
+ }
+}
+
+/* Clear BND registers during legacy branches. */
+static void gen_bnd_jmp(DisasContext *s)
+{
+ /* Clear the registers only if BND prefix is missing, MPX is enabled,
+ and if the BNDREGs are known to be in use (non-zero) already.
+ The helper itself will check BNDPRESERVE at runtime. */
+ if ((s->prefix & PREFIX_REPNZ) == 0
+ && (s->flags & HF_MPX_EN_MASK) != 0
+ && (s->flags & HF_MPX_IU_MASK) != 0) {
+ gen_helper_bnd_jmp(cpu_env);
+ }
+}
+
+/* Generate an end of block. Trace exception is also generated if needed.
+ If IIM, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
+static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
+{
+ gen_update_cc_op(s);
+
+ /* If several instructions disable interrupts, only the first does it. */
+ if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
+ gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
+ } else {
+ gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
+ }
+
+ if (s->tb->flags & HF_RF_MASK) {
+ gen_helper_reset_rf(cpu_env);
+ }
+ if (s->singlestep_enabled) {
+ gen_helper_debug(cpu_env);
+ } else if (s->tf) {
+ gen_helper_single_step(cpu_env);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+/* End of block, resetting the inhibit irq flag. */
+static void gen_eob(DisasContext *s)
+{
+ gen_eob_inhibit_irq(s, false);
+}
+
+/* generate a jump to eip. No segment change must happen before as a
+ direct call to the next block may occur */
+static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
+{
+ gen_update_cc_op(s);
+ set_cc_op(s, CC_OP_DYNAMIC);
+ if (s->jmp_opt) {
+ gen_goto_tb(s, tb_num, eip);
+ s->is_jmp = DISAS_TB_JUMP;
+ } else {
+ gen_jmp_im(eip);
+ gen_eob(s);
+ }
+}
+
+static void gen_jmp(DisasContext *s, target_ulong eip)
+{
+ gen_jmp_tb(s, eip, 0);
+}
+
+static inline void gen_ldq_env_A0(DisasContext *s, int offset)
+{
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
+}
+
+static inline void gen_stq_env_A0(DisasContext *s, int offset)
+{
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+}
+
+static inline void gen_ldo_env_A0(DisasContext *s, int offset)
+{
+ int mem_index = s->mem_index;
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
+}
+
+static inline void gen_sto_env_A0(DisasContext *s, int offset)
+{
+ int mem_index = s->mem_index;
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
+}
+
+static inline void gen_op_movo(int d_offset, int s_offset)
+{
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
+}
+
+static inline void gen_op_movq(int d_offset, int s_offset)
+{
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
+}
+
+static inline void gen_op_movl(int d_offset, int s_offset)
+{
+ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
+}
+
+static inline void gen_op_movq_env_0(int d_offset)
+{
+ tcg_gen_movi_i64(cpu_tmp1_i64, 0);
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
+}
+
+typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
+typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
+typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
+typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
+typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
+typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
+ TCGv_i32 val);
+typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
+typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
+ TCGv val);
+
+#define SSE_SPECIAL ((void *)1)
+#define SSE_DUMMY ((void *)2)
+
+#define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
+#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
+ gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
+
+static const SSEFunc_0_epp sse_op_table1[256][4] = {
+ /* 3DNow! extensions */
+ [0x0e] = { SSE_DUMMY }, /* femms */
+ [0x0f] = { SSE_DUMMY }, /* pf... */
+ /* pure SSE operations */
+ [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+ [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
+ [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
+ [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
+ [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
+ [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
+ [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
+ [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
+
+ [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
+ [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
+ [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
+ [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
+ [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
+ [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
+ [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
+ [0x2f] = { gen_helper_comiss, gen_helper_comisd },
+ [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
+ [0x51] = SSE_FOP(sqrt),
+ [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
+ [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
+ [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
+ [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
+ [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
+ [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
+ [0x58] = SSE_FOP(add),
+ [0x59] = SSE_FOP(mul),
+ [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
+ gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
+ [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
+ [0x5c] = SSE_FOP(sub),
+ [0x5d] = SSE_FOP(min),
+ [0x5e] = SSE_FOP(div),
+ [0x5f] = SSE_FOP(max),
+
+ [0xc2] = SSE_FOP(cmpeq),
+ [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
+ (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
+
+ /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
+ [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+ [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+
+ /* MMX ops and their SSE extensions */
+ [0x60] = MMX_OP2(punpcklbw),
+ [0x61] = MMX_OP2(punpcklwd),
+ [0x62] = MMX_OP2(punpckldq),
+ [0x63] = MMX_OP2(packsswb),
+ [0x64] = MMX_OP2(pcmpgtb),
+ [0x65] = MMX_OP2(pcmpgtw),
+ [0x66] = MMX_OP2(pcmpgtl),
+ [0x67] = MMX_OP2(packuswb),
+ [0x68] = MMX_OP2(punpckhbw),
+ [0x69] = MMX_OP2(punpckhwd),
+ [0x6a] = MMX_OP2(punpckhdq),
+ [0x6b] = MMX_OP2(packssdw),
+ [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
+ [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
+ [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
+ [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
+ [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
+ (SSEFunc_0_epp)gen_helper_pshufd_xmm,
+ (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
+ (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
+ [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
+ [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
+ [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
+ [0x74] = MMX_OP2(pcmpeqb),
+ [0x75] = MMX_OP2(pcmpeqw),
+ [0x76] = MMX_OP2(pcmpeql),
+ [0x77] = { SSE_DUMMY }, /* emms */
+ [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
+ [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
+ [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
+ [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
+ [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
+ [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
+ [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
+ [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
+ [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
+ [0xd1] = MMX_OP2(psrlw),
+ [0xd2] = MMX_OP2(psrld),
+ [0xd3] = MMX_OP2(psrlq),
+ [0xd4] = MMX_OP2(paddq),
+ [0xd5] = MMX_OP2(pmullw),
+ [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
+ [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
+ [0xd8] = MMX_OP2(psubusb),
+ [0xd9] = MMX_OP2(psubusw),
+ [0xda] = MMX_OP2(pminub),
+ [0xdb] = MMX_OP2(pand),
+ [0xdc] = MMX_OP2(paddusb),
+ [0xdd] = MMX_OP2(paddusw),
+ [0xde] = MMX_OP2(pmaxub),
+ [0xdf] = MMX_OP2(pandn),
+ [0xe0] = MMX_OP2(pavgb),
+ [0xe1] = MMX_OP2(psraw),
+ [0xe2] = MMX_OP2(psrad),
+ [0xe3] = MMX_OP2(pavgw),
+ [0xe4] = MMX_OP2(pmulhuw),
+ [0xe5] = MMX_OP2(pmulhw),
+ [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
+ [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
+ [0xe8] = MMX_OP2(psubsb),
+ [0xe9] = MMX_OP2(psubsw),
+ [0xea] = MMX_OP2(pminsw),
+ [0xeb] = MMX_OP2(por),
+ [0xec] = MMX_OP2(paddsb),
+ [0xed] = MMX_OP2(paddsw),
+ [0xee] = MMX_OP2(pmaxsw),
+ [0xef] = MMX_OP2(pxor),
+ [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
+ [0xf1] = MMX_OP2(psllw),
+ [0xf2] = MMX_OP2(pslld),
+ [0xf3] = MMX_OP2(psllq),
+ [0xf4] = MMX_OP2(pmuludq),
+ [0xf5] = MMX_OP2(pmaddwd),
+ [0xf6] = MMX_OP2(psadbw),
+ [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
+ (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
+ [0xf8] = MMX_OP2(psubb),
+ [0xf9] = MMX_OP2(psubw),
+ [0xfa] = MMX_OP2(psubl),
+ [0xfb] = MMX_OP2(psubq),
+ [0xfc] = MMX_OP2(paddb),
+ [0xfd] = MMX_OP2(paddw),
+ [0xfe] = MMX_OP2(paddl),
+};
+
+static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
+ [0 + 2] = MMX_OP2(psrlw),
+ [0 + 4] = MMX_OP2(psraw),
+ [0 + 6] = MMX_OP2(psllw),
+ [8 + 2] = MMX_OP2(psrld),
+ [8 + 4] = MMX_OP2(psrad),
+ [8 + 6] = MMX_OP2(pslld),
+ [16 + 2] = MMX_OP2(psrlq),
+ [16 + 3] = { NULL, gen_helper_psrldq_xmm },
+ [16 + 6] = MMX_OP2(psllq),
+ [16 + 7] = { NULL, gen_helper_pslldq_xmm },
+};
+
+static const SSEFunc_0_epi sse_op_table3ai[] = {
+ gen_helper_cvtsi2ss,
+ gen_helper_cvtsi2sd
+};
+
+#ifdef TARGET_X86_64
+static const SSEFunc_0_epl sse_op_table3aq[] = {
+ gen_helper_cvtsq2ss,
+ gen_helper_cvtsq2sd
+};
+#endif
+
+static const SSEFunc_i_ep sse_op_table3bi[] = {
+ gen_helper_cvttss2si,
+ gen_helper_cvtss2si,
+ gen_helper_cvttsd2si,
+ gen_helper_cvtsd2si
+};
+
+#ifdef TARGET_X86_64
+static const SSEFunc_l_ep sse_op_table3bq[] = {
+ gen_helper_cvttss2sq,
+ gen_helper_cvtss2sq,
+ gen_helper_cvttsd2sq,
+ gen_helper_cvtsd2sq
+};
+#endif
+
+static const SSEFunc_0_epp sse_op_table4[8][4] = {
+ SSE_FOP(cmpeq),
+ SSE_FOP(cmplt),
+ SSE_FOP(cmple),
+ SSE_FOP(cmpunord),
+ SSE_FOP(cmpneq),
+ SSE_FOP(cmpnlt),
+ SSE_FOP(cmpnle),
+ SSE_FOP(cmpord),
+};
+
+static const SSEFunc_0_epp sse_op_table5[256] = {
+ [0x0c] = gen_helper_pi2fw,
+ [0x0d] = gen_helper_pi2fd,
+ [0x1c] = gen_helper_pf2iw,
+ [0x1d] = gen_helper_pf2id,
+ [0x8a] = gen_helper_pfnacc,
+ [0x8e] = gen_helper_pfpnacc,
+ [0x90] = gen_helper_pfcmpge,
+ [0x94] = gen_helper_pfmin,
+ [0x96] = gen_helper_pfrcp,
+ [0x97] = gen_helper_pfrsqrt,
+ [0x9a] = gen_helper_pfsub,
+ [0x9e] = gen_helper_pfadd,
+ [0xa0] = gen_helper_pfcmpgt,
+ [0xa4] = gen_helper_pfmax,
+ [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
+ [0xa7] = gen_helper_movq, /* pfrsqit1 */
+ [0xaa] = gen_helper_pfsubr,
+ [0xae] = gen_helper_pfacc,
+ [0xb0] = gen_helper_pfcmpeq,
+ [0xb4] = gen_helper_pfmul,
+ [0xb6] = gen_helper_movq, /* pfrcpit2 */
+ [0xb7] = gen_helper_pmulhrw_mmx,
+ [0xbb] = gen_helper_pswapd,
+ [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
+};
+
+struct SSEOpHelper_epp {
+ SSEFunc_0_epp op[2];
+ uint32_t ext_mask;
+};
+
+struct SSEOpHelper_eppi {
+ SSEFunc_0_eppi op[2];
+ uint32_t ext_mask;
+};
+
+#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
+#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
+#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
+#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
+#define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
+ CPUID_EXT_PCLMULQDQ }
+#define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
+
+static const struct SSEOpHelper_epp sse_op_table6[256] = {
+ [0x00] = SSSE3_OP(pshufb),
+ [0x01] = SSSE3_OP(phaddw),
+ [0x02] = SSSE3_OP(phaddd),
+ [0x03] = SSSE3_OP(phaddsw),
+ [0x04] = SSSE3_OP(pmaddubsw),
+ [0x05] = SSSE3_OP(phsubw),
+ [0x06] = SSSE3_OP(phsubd),
+ [0x07] = SSSE3_OP(phsubsw),
+ [0x08] = SSSE3_OP(psignb),
+ [0x09] = SSSE3_OP(psignw),
+ [0x0a] = SSSE3_OP(psignd),
+ [0x0b] = SSSE3_OP(pmulhrsw),
+ [0x10] = SSE41_OP(pblendvb),
+ [0x14] = SSE41_OP(blendvps),
+ [0x15] = SSE41_OP(blendvpd),
+ [0x17] = SSE41_OP(ptest),
+ [0x1c] = SSSE3_OP(pabsb),
+ [0x1d] = SSSE3_OP(pabsw),
+ [0x1e] = SSSE3_OP(pabsd),
+ [0x20] = SSE41_OP(pmovsxbw),
+ [0x21] = SSE41_OP(pmovsxbd),
+ [0x22] = SSE41_OP(pmovsxbq),
+ [0x23] = SSE41_OP(pmovsxwd),
+ [0x24] = SSE41_OP(pmovsxwq),
+ [0x25] = SSE41_OP(pmovsxdq),
+ [0x28] = SSE41_OP(pmuldq),
+ [0x29] = SSE41_OP(pcmpeqq),
+ [0x2a] = SSE41_SPECIAL, /* movntqda */
+ [0x2b] = SSE41_OP(packusdw),
+ [0x30] = SSE41_OP(pmovzxbw),
+ [0x31] = SSE41_OP(pmovzxbd),
+ [0x32] = SSE41_OP(pmovzxbq),
+ [0x33] = SSE41_OP(pmovzxwd),
+ [0x34] = SSE41_OP(pmovzxwq),
+ [0x35] = SSE41_OP(pmovzxdq),
+ [0x37] = SSE42_OP(pcmpgtq),
+ [0x38] = SSE41_OP(pminsb),
+ [0x39] = SSE41_OP(pminsd),
+ [0x3a] = SSE41_OP(pminuw),
+ [0x3b] = SSE41_OP(pminud),
+ [0x3c] = SSE41_OP(pmaxsb),
+ [0x3d] = SSE41_OP(pmaxsd),
+ [0x3e] = SSE41_OP(pmaxuw),
+ [0x3f] = SSE41_OP(pmaxud),
+ [0x40] = SSE41_OP(pmulld),
+ [0x41] = SSE41_OP(phminposuw),
+ [0xdb] = AESNI_OP(aesimc),
+ [0xdc] = AESNI_OP(aesenc),
+ [0xdd] = AESNI_OP(aesenclast),
+ [0xde] = AESNI_OP(aesdec),
+ [0xdf] = AESNI_OP(aesdeclast),
+};
+
+static const struct SSEOpHelper_eppi sse_op_table7[256] = {
+ [0x08] = SSE41_OP(roundps),
+ [0x09] = SSE41_OP(roundpd),
+ [0x0a] = SSE41_OP(roundss),
+ [0x0b] = SSE41_OP(roundsd),
+ [0x0c] = SSE41_OP(blendps),
+ [0x0d] = SSE41_OP(blendpd),
+ [0x0e] = SSE41_OP(pblendw),
+ [0x0f] = SSSE3_OP(palignr),
+ [0x14] = SSE41_SPECIAL, /* pextrb */
+ [0x15] = SSE41_SPECIAL, /* pextrw */
+ [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
+ [0x17] = SSE41_SPECIAL, /* extractps */
+ [0x20] = SSE41_SPECIAL, /* pinsrb */
+ [0x21] = SSE41_SPECIAL, /* insertps */
+ [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
+ [0x40] = SSE41_OP(dpps),
+ [0x41] = SSE41_OP(dppd),
+ [0x42] = SSE41_OP(mpsadbw),
+ [0x44] = PCLMULQDQ_OP(pclmulqdq),
+ [0x60] = SSE42_OP(pcmpestrm),
+ [0x61] = SSE42_OP(pcmpestri),
+ [0x62] = SSE42_OP(pcmpistrm),
+ [0x63] = SSE42_OP(pcmpistri),
+ [0xdf] = AESNI_OP(aeskeygenassist),
+};
+
+static void gen_sse(CPUX86State *env, DisasContext *s, int b,
+ target_ulong pc_start, int rex_r)
+{
+ int b1, op1_offset, op2_offset, is_xmm, val;
+ int modrm, mod, rm, reg;
+ SSEFunc_0_epp sse_fn_epp;
+ SSEFunc_0_eppi sse_fn_eppi;
+ SSEFunc_0_ppi sse_fn_ppi;
+ SSEFunc_0_eppt sse_fn_eppt;
+ TCGMemOp ot;
+
+ b &= 0xff;
+ if (s->prefix & PREFIX_DATA)
+ b1 = 1;
+ else if (s->prefix & PREFIX_REPZ)
+ b1 = 2;
+ else if (s->prefix & PREFIX_REPNZ)
+ b1 = 3;
+ else
+ b1 = 0;
+ sse_fn_epp = sse_op_table1[b][b1];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
+ is_xmm = 1;
+ } else {
+ if (b1 == 0) {
+ /* MMX case */
+ is_xmm = 0;
+ } else {
+ is_xmm = 1;
+ }
+ }
+ /* simple MMX/SSE operation */
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ return;
+ }
+ if (s->flags & HF_EM_MASK) {
+ illegal_op:
+ gen_illegal_opcode(s);
+ return;
+ }
+ if (is_xmm
+ && !(s->flags & HF_OSFXSR_MASK)
+ && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
+ goto unknown_op;
+ }
+ if (b == 0x0e) {
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+ /* If we were fully decoding this we might use illegal_op. */
+ goto unknown_op;
+ }
+ /* femms */
+ gen_helper_emms(cpu_env);
+ return;
+ }
+ if (b == 0x77) {
+ /* emms */
+ gen_helper_emms(cpu_env);
+ return;
+ }
+ /* prepare MMX state (XXX: optimize by storing fptt and fptags in
+ the static cpu state) */
+ if (!is_xmm) {
+ gen_helper_enter_mmx(cpu_env);
+ }
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7);
+ if (is_xmm)
+ reg |= rex_r;
+ mod = (modrm >> 6) & 3;
+ if (sse_fn_epp == SSE_SPECIAL) {
+ b |= (b1 << 8);
+ switch(b) {
+ case 0x0e7: /* movntq */
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+ break;
+ case 0x1e7: /* movntdq */
+ case 0x02b: /* movntps */
+ case 0x12b: /* movntps */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ break;
+ case 0x3f0: /* lddqu */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ break;
+ case 0x22b: /* movntss */
+ case 0x32b: /* movntsd */
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ if (b1 & 1) {
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+ }
+ break;
+ case 0x6e: /* movd mm, ea */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
+ } else
+#endif
+ {
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
+ }
+ break;
+ case 0x16e: /* movd xmm, ea */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]));
+ gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
+ } else
+#endif
+ {
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]));
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
+ }
+ break;
+ case 0x6f: /* movq mm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+ } else {
+ rm = (modrm & 7);
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
+ offsetof(CPUX86State,fpregs[rm].mmx));
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ }
+ break;
+ case 0x010: /* movups */
+ case 0x110: /* movupd */
+ case 0x028: /* movaps */
+ case 0x128: /* movapd */
+ case 0x16f: /* movdqa xmm, ea */
+ case 0x26f: /* movdqu xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
+ offsetof(CPUX86State,xmm_regs[rm]));
+ }
+ break;
+ case 0x210: /* movss xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+ }
+ break;
+ case 0x310: /* movsd xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ break;
+ case 0x012: /* movlps */
+ case 0x112: /* movlpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ /* movhlps */
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
+ }
+ break;
+ case 0x212: /* movsldup */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
+ }
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
+ break;
+ case 0x312: /* movddup */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ break;
+ case 0x016: /* movhps */
+ case 0x116: /* movhpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(1)));
+ } else {
+ /* movlhps */
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ break;
+ case 0x216: /* movshdup */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
+ }
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
+ break;
+ case 0x178:
+ case 0x378:
+ {
+ int bit_index, field_length;
+
+ if (b1 == 1 && reg != 0)
+ goto illegal_op;
+ field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
+ bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]));
+ if (b1 == 1)
+ gen_helper_extrq_i(cpu_env, cpu_ptr0,
+ tcg_const_i32(bit_index),
+ tcg_const_i32(field_length));
+ else
+ gen_helper_insertq_i(cpu_env, cpu_ptr0,
+ tcg_const_i32(bit_index),
+ tcg_const_i32(field_length));
+ }
+ break;
+ case 0x7e: /* movd ea, mm */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ tcg_gen_ld_i64(cpu_T0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
+ } else
+#endif
+ {
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
+ }
+ break;
+ case 0x17e: /* movd ea, xmm */
+#ifdef TARGET_X86_64
+ if (s->dflag == MO_64) {
+ tcg_gen_ld_i64(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
+ } else
+#endif
+ {
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
+ }
+ break;
+ case 0x27e: /* movq xmm, ea */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ }
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
+ break;
+ case 0x7f: /* movq ea, mm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
+ } else {
+ rm = (modrm & 7);
+ gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
+ offsetof(CPUX86State,fpregs[reg].mmx));
+ }
+ break;
+ case 0x011: /* movups */
+ case 0x111: /* movupd */
+ case 0x029: /* movaps */
+ case 0x129: /* movapd */
+ case 0x17f: /* movdqa ea, xmm */
+ case 0x27f: /* movdqu ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
+ offsetof(CPUX86State,xmm_regs[reg]));
+ }
+ break;
+ case 0x211: /* movss ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
+ }
+ break;
+ case 0x311: /* movsd ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ }
+ break;
+ case 0x013: /* movlps */
+ case 0x113: /* movlpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x017: /* movhps */
+ case 0x117: /* movhpd */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(1)));
+ } else {
+ goto illegal_op;
+ }
+ break;
+ case 0x71: /* shift mm, im */
+ case 0x72:
+ case 0x73:
+ case 0x171: /* shift xmm, im */
+ case 0x172:
+ case 0x173:
+ if (b1 >= 2) {
+ goto unknown_op;
+ }
+ val = cpu_ldub_code(env, s->pc++);
+ if (is_xmm) {
+ tcg_gen_movi_tl(cpu_T0, val);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
+ op1_offset = offsetof(CPUX86State,xmm_t0);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, val);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
+ tcg_gen_movi_tl(cpu_T0, 0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
+ op1_offset = offsetof(CPUX86State,mmx_t0);
+ }
+ sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
+ (((modrm >> 3)) & 7)][b1];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (is_xmm) {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
+ sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ case 0x050: /* movmskps */
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm]));
+ gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+ break;
+ case 0x150: /* movmskpd */
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm]));
+ gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+ break;
+ case 0x02a: /* cvtpi2ps */
+ case 0x12a: /* cvtpi2pd */
+ gen_helper_enter_mmx(cpu_env);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_ldq_env_A0(s, op2_offset);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ switch(b >> 8) {
+ case 0x0:
+ gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ default:
+ case 0x1:
+ gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ }
+ break;
+ case 0x22a: /* cvtsi2ss */
+ case 0x32a: /* cvtsi2sd */
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ if (ot == MO_32) {
+ SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
+ } else {
+#ifdef TARGET_X86_64
+ SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
+ sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
+#else
+ goto illegal_op;
+#endif
+ }
+ break;
+ case 0x02c: /* cvttps2pi */
+ case 0x12c: /* cvttpd2pi */
+ case 0x02d: /* cvtps2pi */
+ case 0x12d: /* cvtpd2pi */
+ gen_helper_enter_mmx(cpu_env);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_ldo_env_A0(s, op2_offset);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ switch(b) {
+ case 0x02c:
+ gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ case 0x12c:
+ gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ case 0x02d:
+ gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ case 0x12d:
+ gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ }
+ break;
+ case 0x22c: /* cvttss2si */
+ case 0x32c: /* cvttsd2si */
+ case 0x22d: /* cvtss2si */
+ case 0x32d: /* cvtsd2si */
+ ot = mo_64_32(s->dflag);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ if ((b >> 8) & 1) {
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
+ } else {
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ }
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
+ if (ot == MO_32) {
+ SSEFunc_i_ep sse_fn_i_ep =
+ sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
+ sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ } else {
+#ifdef TARGET_X86_64
+ SSEFunc_l_ep sse_fn_l_ep =
+ sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
+ sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
+#else
+ goto illegal_op;
+#endif
+ }
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+ case 0xc4: /* pinsrw */
+ case 0x1c4:
+ s->rip_offset = 1;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ val = cpu_ldub_code(env, s->pc++);
+ if (b1) {
+ val &= 7;
+ tcg_gen_st16_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
+ } else {
+ val &= 3;
+ tcg_gen_st16_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
+ }
+ break;
+ case 0xc5: /* pextrw */
+ case 0x1c5:
+ if (mod != 3)
+ goto illegal_op;
+ ot = mo_64_32(s->dflag);
+ val = cpu_ldub_code(env, s->pc++);
+ if (b1) {
+ val &= 7;
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
+ } else {
+ val &= 3;
+ rm = (modrm & 7);
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
+ }
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+ case 0x1d6: /* movq ea, xmm */
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_stq_env_A0(s, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(0)));
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
+ offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
+ }
+ break;
+ case 0x2d6: /* movq2dq */
+ gen_helper_enter_mmx(cpu_env);
+ rm = (modrm & 7);
+ gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
+ offsetof(CPUX86State,fpregs[rm].mmx));
+ gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
+ break;
+ case 0x3d6: /* movdq2q */
+ gen_helper_enter_mmx(cpu_env);
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
+ offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
+ break;
+ case 0xd7: /* pmovmskb */
+ case 0x1d7:
+ if (mod != 3)
+ goto illegal_op;
+ if (b1) {
+ rm = (modrm & 7) | REX_B(s);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
+ gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+ } else {
+ rm = (modrm & 7);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
+ gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
+ }
+ reg = ((modrm >> 3) & 7) | rex_r;
+ tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+ break;
+
+ case 0x138:
+ case 0x038:
+ b = modrm;
+ if ((b & 0xf0) == 0xf0) {
+ goto do_0f_38_fx;
+ }
+ modrm = cpu_ldub_code(env, s->pc++);
+ rm = modrm & 7;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ if (b1 >= 2) {
+ goto unknown_op;
+ }
+
+ sse_fn_epp = sse_op_table6[b].op[b1];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
+ goto illegal_op;
+
+ if (b1) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
+ } else {
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_lea_modrm(env, s, modrm);
+ switch (b) {
+ case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
+ case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
+ case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
+ gen_ldq_env_A0(s, op2_offset +
+ offsetof(ZMMReg, ZMM_Q(0)));
+ break;
+ case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
+ case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
+ offsetof(ZMMReg, ZMM_L(0)));
+ break;
+ case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
+ tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
+ s->mem_index, MO_LEUW);
+ tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
+ offsetof(ZMMReg, ZMM_W(0)));
+ break;
+ case 0x2a: /* movntqda */
+ gen_ldo_env_A0(s, op1_offset);
+ return;
+ default:
+ gen_ldo_env_A0(s, op2_offset);
+ }
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ } else {
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, op2_offset);
+ }
+ }
+ if (sse_fn_epp == SSE_SPECIAL) {
+ goto unknown_op;
+ }
+
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+
+ if (b == 0x17) {
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ break;
+
+ case 0x238:
+ case 0x338:
+ do_0f_38_fx:
+ /* Various integer extensions at 0f 38 f[0-f]. */
+ b = modrm | (b1 << 8);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+
+ switch (b) {
+ case 0x3f0: /* crc32 Gd,Eb */
+ case 0x3f1: /* crc32 Gd,Ey */
+ do_crc32:
+ if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
+ goto illegal_op;
+ }
+ if ((b & 0xff) == 0xf0) {
+ ot = MO_8;
+ } else if (s->dflag != MO_64) {
+ ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
+ } else {
+ ot = MO_64;
+ }
+
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
+ cpu_T0, tcg_const_i32(8 << ot));
+
+ ot = mo_64_32(s->dflag);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+
+ case 0x1f0: /* crc32 or movbe */
+ case 0x1f1:
+ /* For these insns, the f3 prefix is supposed to have priority
+ over the 66 prefix, but that's not what we implement above
+ setting b1. */
+ if (s->prefix & PREFIX_REPNZ) {
+ goto do_crc32;
+ }
+ /* FALLTHRU */
+ case 0x0f0: /* movbe Gy,My */
+ case 0x0f1: /* movbe My,Gy */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
+ goto illegal_op;
+ }
+ if (s->dflag != MO_64) {
+ ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
+ } else {
+ ot = MO_64;
+ }
+
+ gen_lea_modrm(env, s, modrm);
+ if ((b & 1) == 0) {
+ tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
+ s->mem_index, ot | MO_BE);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ } else {
+ tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
+ s->mem_index, ot | MO_BE);
+ }
+ break;
+
+ case 0x0f2: /* andn Gy, By, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ gen_op_update1_cc();
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+
+ case 0x0f7: /* bextr Gy, Ey, By */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ {
+ TCGv bound, zero;
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ /* Extract START, and shift the operand.
+ Shifts larger than operand size get zeros. */
+ tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
+
+ bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
+ zero = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
+ cpu_T0, zero);
+ tcg_temp_free(zero);
+
+ /* Extract the LEN into a mask. Lengths larger than
+ operand size get all ones. */
+ tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
+ tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
+ tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
+ cpu_A0, bound);
+ tcg_temp_free(bound);
+ tcg_gen_movi_tl(cpu_T1, 1);
+ tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
+ tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ gen_op_update1_cc();
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ }
+ break;
+
+ case 0x0f5: /* bzhi Gy, Ey, By */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
+ {
+ TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
+ /* Note that since we're using BMILG (in order to get O
+ cleared) we need to store the inverse into C. */
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
+ cpu_T1, bound);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
+ bound, bound, cpu_T1);
+ tcg_temp_free(bound);
+ }
+ tcg_gen_movi_tl(cpu_A0, -1);
+ tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
+ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ gen_op_update1_cc();
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ break;
+
+ case 0x3f6: /* mulx By, Gy, rdx, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ switch (ot) {
+ default:
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
+ tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+ cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
+ cpu_T0, cpu_regs[R_EDX]);
+ tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
+ tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
+ break;
+#endif
+ }
+ break;
+
+ case 0x3f5: /* pdep Gy, By, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ /* Note that by zero-extending the mask operand, we
+ automatically handle zero-extending the result. */
+ if (ot == MO_64) {
+ tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
+ } else {
+ tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
+ }
+ gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
+ break;
+
+ case 0x2f5: /* pext Gy, By, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ /* Note that by zero-extending the mask operand, we
+ automatically handle zero-extending the result. */
+ if (ot == MO_64) {
+ tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
+ } else {
+ tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
+ }
+ gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
+ break;
+
+ case 0x1f6: /* adcx Gy, Ey */
+ case 0x2f6: /* adox Gy, Ey */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
+ goto illegal_op;
+ } else {
+ TCGv carry_in, carry_out, zero;
+ int end_op;
+
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+ /* Re-use the carry-out from a previous round. */
+ TCGV_UNUSED(carry_in);
+ carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
+ switch (s->cc_op) {
+ case CC_OP_ADCX:
+ if (b == 0x1f6) {
+ carry_in = cpu_cc_dst;
+ end_op = CC_OP_ADCX;
+ } else {
+ end_op = CC_OP_ADCOX;
+ }
+ break;
+ case CC_OP_ADOX:
+ if (b == 0x1f6) {
+ end_op = CC_OP_ADCOX;
+ } else {
+ carry_in = cpu_cc_src2;
+ end_op = CC_OP_ADOX;
+ }
+ break;
+ case CC_OP_ADCOX:
+ end_op = CC_OP_ADCOX;
+ carry_in = carry_out;
+ break;
+ default:
+ end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
+ break;
+ }
+ /* If we can't reuse carry-out, get it out of EFLAGS. */
+ if (TCGV_IS_UNUSED(carry_in)) {
+ if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
+ gen_compute_eflags(s);
+ }
+ carry_in = cpu_tmp0;
+ tcg_gen_shri_tl(carry_in, cpu_cc_src,
+ ctz32(b == 0x1f6 ? CC_C : CC_O));
+ tcg_gen_andi_tl(carry_in, carry_in, 1);
+ }
+
+ switch (ot) {
+#ifdef TARGET_X86_64
+ case MO_32:
+ /* If we know TL is 64-bit, and we want a 32-bit
+ result, just do everything in 64-bit arithmetic. */
+ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
+ tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
+ tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
+ tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
+ tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
+ tcg_gen_shri_i64(carry_out, cpu_T0, 32);
+ break;
+#endif
+ default:
+ /* Otherwise compute the carry-out in two steps. */
+ zero = tcg_const_tl(0);
+ tcg_gen_add2_tl(cpu_T0, carry_out,
+ cpu_T0, zero,
+ carry_in, zero);
+ tcg_gen_add2_tl(cpu_regs[reg], carry_out,
+ cpu_regs[reg], carry_out,
+ cpu_T0, zero);
+ tcg_temp_free(zero);
+ break;
+ }
+ set_cc_op(s, end_op);
+ }
+ break;
+
+ case 0x1f7: /* shlx Gy, Ey, By */
+ case 0x2f7: /* sarx Gy, Ey, By */
+ case 0x3f7: /* shrx Gy, Ey, By */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ if (ot == MO_64) {
+ tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
+ } else {
+ tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
+ }
+ if (b == 0x1f7) {
+ tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
+ } else if (b == 0x2f7) {
+ if (ot != MO_64) {
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ }
+ tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
+ } else {
+ if (ot != MO_64) {
+ tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
+ }
+ tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
+ }
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+
+ case 0x0f3:
+ case 0x1f3:
+ case 0x2f3:
+ case 0x3f3: /* Group 17 */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+
+ switch (reg & 7) {
+ case 1: /* blsr By,Ey */
+ tcg_gen_neg_tl(cpu_T1, cpu_T0);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
+ gen_op_update2_cc();
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ break;
+
+ case 2: /* blsmsk By,Ey */
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ break;
+
+ case 3: /* blsi By, Ey */
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
+ tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x03a:
+ case 0x13a:
+ b = modrm;
+ modrm = cpu_ldub_code(env, s->pc++);
+ rm = modrm & 7;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ if (b1 >= 2) {
+ goto unknown_op;
+ }
+
+ sse_fn_eppi = sse_op_table7[b].op[b1];
+ if (!sse_fn_eppi) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
+ goto illegal_op;
+
+ if (sse_fn_eppi == SSE_SPECIAL) {
+ ot = mo_64_32(s->dflag);
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3)
+ gen_lea_modrm(env, s, modrm);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ val = cpu_ldub_code(env, s->pc++);
+ switch (b) {
+ case 0x14: /* pextrb */
+ tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_B(val & 15)));
+ if (mod == 3) {
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ } else {
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
+ s->mem_index, MO_UB);
+ }
+ break;
+ case 0x15: /* pextrw */
+ tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_W(val & 7)));
+ if (mod == 3) {
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ } else {
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
+ s->mem_index, MO_LEUW);
+ }
+ break;
+ case 0x16:
+ if (ot == MO_32) { /* pextrd */
+ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
+ if (mod == 3) {
+ tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
+ } else {
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ } else { /* pextrq */
+#ifdef TARGET_X86_64
+ tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(val & 1)));
+ if (mod == 3) {
+ tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
+ } else {
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
+ s->mem_index, MO_LEQ);
+ }
+#else
+ goto illegal_op;
+#endif
+ }
+ break;
+ case 0x17: /* extractps */
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
+ if (mod == 3) {
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ } else {
+ tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ break;
+ case 0x20: /* pinsrb */
+ if (mod == 3) {
+ gen_op_mov_v_reg(MO_32, cpu_T0, rm);
+ } else {
+ tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
+ s->mem_index, MO_UB);
+ }
+ tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_B(val & 15)));
+ break;
+ case 0x21: /* insertps */
+ if (mod == 3) {
+ tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
+ offsetof(CPUX86State,xmm_regs[rm]
+ .ZMM_L((val >> 6) & 3)));
+ } else {
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
+ offsetof(CPUX86State,xmm_regs[reg]
+ .ZMM_L((val >> 4) & 3)));
+ if ((val >> 0) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(0)));
+ if ((val >> 1) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(1)));
+ if ((val >> 2) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(2)));
+ if ((val >> 3) & 1)
+ tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
+ cpu_env, offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(3)));
+ break;
+ case 0x22:
+ if (ot == MO_32) { /* pinsrd */
+ if (mod == 3) {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
+ } else {
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_L(val & 3)));
+ } else { /* pinsrq */
+#ifdef TARGET_X86_64
+ if (mod == 3) {
+ gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
+ } else {
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
+ s->mem_index, MO_LEQ);
+ }
+ tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
+ offsetof(CPUX86State,
+ xmm_regs[reg].ZMM_Q(val & 1)));
+#else
+ goto illegal_op;
+#endif
+ }
+ break;
+ }
+ return;
+ }
+
+ if (b1) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
+ } else {
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+ gen_lea_modrm(env, s, modrm);
+ gen_ldo_env_A0(s, op2_offset);
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod == 3) {
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ } else {
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_lea_modrm(env, s, modrm);
+ gen_ldq_env_A0(s, op2_offset);
+ }
+ }
+ val = cpu_ldub_code(env, s->pc++);
+
+ if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
+ set_cc_op(s, CC_OP_EFLAGS);
+
+ if (s->dflag == MO_64) {
+ /* The helper must use entire 64-bit gp registers */
+ val |= 1 << 8;
+ }
+ }
+
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
+ break;
+
+ case 0x33a:
+ /* Various integer extensions at 0f 3a f[0-f]. */
+ b = modrm | (b1 << 8);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+
+ switch (b) {
+ case 0x3f0: /* rorx Gy,Ey, Ib */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
+ || !(s->prefix & PREFIX_VEX)
+ || s->vex_l != 0) {
+ goto illegal_op;
+ }
+ ot = mo_64_32(s->dflag);
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ b = cpu_ldub_code(env, s->pc++);
+ if (ot == MO_64) {
+ tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
+ } else {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ }
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ default:
+ unknown_op:
+ gen_unknown_opcode(env, s);
+ return;
+ }
+ } else {
+ /* generic MMX or SSE operation */
+ switch(b) {
+ case 0x70: /* pshufx insn */
+ case 0xc6: /* pshufx insn */
+ case 0xc2: /* compare insns */
+ s->rip_offset = 1;
+ break;
+ default:
+ break;
+ }
+ if (is_xmm) {
+ op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
+ if (mod != 3) {
+ int sz = 4;
+
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,xmm_t0);
+
+ switch (b) {
+ case 0x50 ... 0x5a:
+ case 0x5c ... 0x5f:
+ case 0xc2:
+ /* Most sse scalar operations. */
+ if (b1 == 2) {
+ sz = 2;
+ } else if (b1 == 3) {
+ sz = 3;
+ }
+ break;
+
+ case 0x2e: /* ucomis[sd] */
+ case 0x2f: /* comis[sd] */
+ if (b1 == 0) {
+ sz = 2;
+ } else {
+ sz = 3;
+ }
+ break;
+ }
+
+ switch (sz) {
+ case 2:
+ /* 32 bit access */
+ gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_st32_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
+ break;
+ case 3:
+ /* 64 bit access */
+ gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
+ break;
+ default:
+ /* 128 bit access */
+ gen_ldo_env_A0(s, op2_offset);
+ break;
+ }
+ } else {
+ rm = (modrm & 7) | REX_B(s);
+ op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
+ }
+ } else {
+ op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ op2_offset = offsetof(CPUX86State,mmx_t0);
+ gen_ldq_env_A0(s, op2_offset);
+ } else {
+ rm = (modrm & 7);
+ op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
+ }
+ }
+ switch(b) {
+ case 0x0f: /* 3DNow! data insns */
+ val = cpu_ldub_code(env, s->pc++);
+ sse_fn_epp = sse_op_table5[val];
+ if (!sse_fn_epp) {
+ goto unknown_op;
+ }
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
+ goto illegal_op;
+ }
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ case 0x70: /* pshufx insn */
+ case 0xc6: /* pshufx insn */
+ val = cpu_ldub_code(env, s->pc++);
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ /* XXX: introduce a new table? */
+ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
+ sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
+ break;
+ case 0xc2:
+ /* compare insns */
+ val = cpu_ldub_code(env, s->pc++);
+ if (val >= 8)
+ goto unknown_op;
+ sse_fn_epp = sse_op_table4[val][b1];
+
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ case 0xf7:
+ /* maskmov : we must prepare A0 */
+ if (mod != 3)
+ goto illegal_op;
+ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
+ gen_extu(s->aflag, cpu_A0);
+ gen_add_A0_ds_seg(s);
+
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ /* XXX: introduce a new table? */
+ sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
+ sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
+ break;
+ default:
+ tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
+ tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
+ sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
+ break;
+ }
+ if (b == 0x2e || b == 0x2f) {
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ }
+}
+
+/* convert one instruction. s->is_jmp is set if the translation must
+ be stopped. Return the next pc value */
+static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
+ target_ulong pc_start)
+{
+ int b, prefixes;
+ int shift;
+ TCGMemOp ot, aflag, dflag;
+ int modrm, reg, rm, mod, op, opreg, val;
+ target_ulong next_eip, tval;
+ int rex_w, rex_r;
+
+ s->pc_start = s->pc = pc_start;
+ prefixes = 0;
+ s->override = -1;
+ rex_w = -1;
+ rex_r = 0;
+#ifdef TARGET_X86_64
+ s->rex_x = 0;
+ s->rex_b = 0;
+ x86_64_hregs = 0;
+#endif
+ s->rip_offset = 0; /* for relative ip address */
+ s->vex_l = 0;
+ s->vex_v = 0;
+ next_byte:
+ b = cpu_ldub_code(env, s->pc);
+ s->pc++;
+ /* Collect prefixes. */
+ switch (b) {
+ case 0xf3:
+ prefixes |= PREFIX_REPZ;
+ goto next_byte;
+ case 0xf2:
+ prefixes |= PREFIX_REPNZ;
+ goto next_byte;
+ case 0xf0:
+ prefixes |= PREFIX_LOCK;
+ goto next_byte;
+ case 0x2e:
+ s->override = R_CS;
+ goto next_byte;
+ case 0x36:
+ s->override = R_SS;
+ goto next_byte;
+ case 0x3e:
+ s->override = R_DS;
+ goto next_byte;
+ case 0x26:
+ s->override = R_ES;
+ goto next_byte;
+ case 0x64:
+ s->override = R_FS;
+ goto next_byte;
+ case 0x65:
+ s->override = R_GS;
+ goto next_byte;
+ case 0x66:
+ prefixes |= PREFIX_DATA;
+ goto next_byte;
+ case 0x67:
+ prefixes |= PREFIX_ADR;
+ goto next_byte;
+#ifdef TARGET_X86_64
+ case 0x40 ... 0x4f:
+ if (CODE64(s)) {
+ /* REX prefix */
+ rex_w = (b >> 3) & 1;
+ rex_r = (b & 0x4) << 1;
+ s->rex_x = (b & 0x2) << 2;
+ REX_B(s) = (b & 0x1) << 3;
+ x86_64_hregs = 1; /* select uniform byte register addressing */
+ goto next_byte;
+ }
+ break;
+#endif
+ case 0xc5: /* 2-byte VEX */
+ case 0xc4: /* 3-byte VEX */
+ /* VEX prefixes cannot be used except in 32-bit mode.
+ Otherwise the instruction is LES or LDS. */
+ if (s->code32 && !s->vm86) {
+ static const int pp_prefix[4] = {
+ 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
+ };
+ int vex3, vex2 = cpu_ldub_code(env, s->pc);
+
+ if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
+ /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
+ otherwise the instruction is LES or LDS. */
+ break;
+ }
+ s->pc++;
+
+ /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
+ | PREFIX_LOCK | PREFIX_DATA)) {
+ goto illegal_op;
+ }
+#ifdef TARGET_X86_64
+ if (x86_64_hregs) {
+ goto illegal_op;
+ }
+#endif
+ rex_r = (~vex2 >> 4) & 8;
+ if (b == 0xc5) {
+ vex3 = vex2;
+ b = cpu_ldub_code(env, s->pc++);
+ } else {
+#ifdef TARGET_X86_64
+ s->rex_x = (~vex2 >> 3) & 8;
+ s->rex_b = (~vex2 >> 2) & 8;
+#endif
+ vex3 = cpu_ldub_code(env, s->pc++);
+ rex_w = (vex3 >> 7) & 1;
+ switch (vex2 & 0x1f) {
+ case 0x01: /* Implied 0f leading opcode bytes. */
+ b = cpu_ldub_code(env, s->pc++) | 0x100;
+ break;
+ case 0x02: /* Implied 0f 38 leading opcode bytes. */
+ b = 0x138;
+ break;
+ case 0x03: /* Implied 0f 3a leading opcode bytes. */
+ b = 0x13a;
+ break;
+ default: /* Reserved for future use. */
+ goto unknown_op;
+ }
+ }
+ s->vex_v = (~vex3 >> 3) & 0xf;
+ s->vex_l = (vex3 >> 2) & 1;
+ prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
+ }
+ break;
+ }
+
+ /* Post-process prefixes. */
+ if (CODE64(s)) {
+ /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
+ data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
+ over 0x66 if both are present. */
+ dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
+ /* In 64-bit mode, 0x67 selects 32-bit addressing. */
+ aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
+ } else {
+ /* In 16/32-bit mode, 0x66 selects the opposite data size. */
+ if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
+ dflag = MO_32;
+ } else {
+ dflag = MO_16;
+ }
+ /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
+ if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
+ aflag = MO_32;
+ } else {
+ aflag = MO_16;
+ }
+ }
+
+ s->prefix = prefixes;
+ s->aflag = aflag;
+ s->dflag = dflag;
+
+ /* now check op code */
+ reswitch:
+ switch(b) {
+ case 0x0f:
+ /**************************/
+ /* extended op code */
+ b = cpu_ldub_code(env, s->pc++) | 0x100;
+ goto reswitch;
+
+ /**************************/
+ /* arith & logic */
+ case 0x00 ... 0x05:
+ case 0x08 ... 0x0d:
+ case 0x10 ... 0x15:
+ case 0x18 ... 0x1d:
+ case 0x20 ... 0x25:
+ case 0x28 ... 0x2d:
+ case 0x30 ... 0x35:
+ case 0x38 ... 0x3d:
+ {
+ int op, f, val;
+ op = (b >> 3) & 7;
+ f = (b >> 1) & 3;
+
+ ot = mo_b_d(b, dflag);
+
+ switch(f) {
+ case 0: /* OP Ev, Gv */
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else if (op == OP_XORL && rm == reg) {
+ xor_zero:
+ /* xor reg, reg optimisation */
+ set_cc_op(s, CC_OP_CLR);
+ tcg_gen_movi_tl(cpu_T0, 0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+ } else {
+ opreg = rm;
+ }
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
+ gen_op(s, op, ot, opreg);
+ break;
+ case 1: /* OP Gv, Ev */
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ } else if (op == OP_XORL && rm == reg) {
+ goto xor_zero;
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ }
+ gen_op(s, op, ot, reg);
+ break;
+ case 2: /* OP A, Iv */
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(cpu_T1, val);
+ gen_op(s, op, ot, OR_EAX);
+ break;
+ }
+ }
+ break;
+
+ case 0x82:
+ if (CODE64(s))
+ goto illegal_op;
+ case 0x80: /* GRP1 */
+ case 0x81:
+ case 0x83:
+ {
+ int val;
+
+ ot = mo_b_d(b, dflag);
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ op = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ if (b == 0x83)
+ s->rip_offset = 1;
+ else
+ s->rip_offset = insn_const_size(ot);
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else {
+ opreg = rm;
+ }
+
+ switch(b) {
+ default:
+ case 0x80:
+ case 0x81:
+ case 0x82:
+ val = insn_get(env, s, ot);
+ break;
+ case 0x83:
+ val = (int8_t)insn_get(env, s, MO_8);
+ break;
+ }
+ tcg_gen_movi_tl(cpu_T1, val);
+ gen_op(s, op, ot, opreg);
+ }
+ break;
+
+ /**************************/
+ /* inc, dec, and other misc arith */
+ case 0x40 ... 0x47: /* inc Gv */
+ ot = dflag;
+ gen_inc(s, ot, OR_EAX + (b & 7), 1);
+ break;
+ case 0x48 ... 0x4f: /* dec Gv */
+ ot = dflag;
+ gen_inc(s, ot, OR_EAX + (b & 7), -1);
+ break;
+ case 0xf6: /* GRP3 */
+ case 0xf7:
+ ot = mo_b_d(b, dflag);
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ op = (modrm >> 3) & 7;
+ if (mod != 3) {
+ if (op == 0) {
+ s->rip_offset = insn_const_size(ot);
+ }
+ gen_lea_modrm(env, s, modrm);
+ /* For those below that handle locked memory, don't load here. */
+ if (!(s->prefix & PREFIX_LOCK)
+ || op != 2) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ }
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ }
+
+ switch(op) {
+ case 0: /* test */
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(cpu_T1, val);
+ gen_op_testl_T0_T1_cc();
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+ case 2: /* not */
+ if (s->prefix & PREFIX_LOCK) {
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ tcg_gen_movi_tl(cpu_T0, ~0);
+ tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
+ s->mem_index, ot | MO_LE);
+ } else {
+ tcg_gen_not_tl(cpu_T0, cpu_T0);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ }
+ }
+ break;
+ case 3: /* neg */
+ if (s->prefix & PREFIX_LOCK) {
+ TCGLabel *label1;
+ TCGv a0, t0, t1, t2;
+
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ a0 = tcg_temp_local_new();
+ t0 = tcg_temp_local_new();
+ label1 = gen_new_label();
+
+ tcg_gen_mov_tl(a0, cpu_A0);
+ tcg_gen_mov_tl(t0, cpu_T0);
+
+ gen_set_label(label1);
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ tcg_gen_mov_tl(t2, t0);
+ tcg_gen_neg_tl(t1, t0);
+ tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
+ s->mem_index, ot | MO_LE);
+ tcg_temp_free(t1);
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
+
+ tcg_temp_free(t2);
+ tcg_temp_free(a0);
+ tcg_gen_mov_tl(cpu_T0, t0);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ }
+ }
+ gen_op_update_neg_cc();
+ set_cc_op(s, CC_OP_SUBB + ot);
+ break;
+ case 4: /* mul */
+ switch(ot) {
+ case MO_8:
+ gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
+ set_cc_op(s, CC_OP_MULB);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ set_cc_op(s, CC_OP_MULW);
+ break;
+ default:
+ case MO_32:
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
+ tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+ cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
+ set_cc_op(s, CC_OP_MULL);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
+ cpu_T0, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
+ set_cc_op(s, CC_OP_MULQ);
+ break;
+#endif
+ }
+ break;
+ case 5: /* imul */
+ switch(ot) {
+ case MO_8:
+ gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ set_cc_op(s, CC_OP_MULB);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+ set_cc_op(s, CC_OP_MULW);
+ break;
+ default:
+ case MO_32:
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
+ tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+ cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
+ tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
+ set_cc_op(s, CC_OP_MULL);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
+ cpu_T0, cpu_regs[R_EAX]);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
+ tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
+ set_cc_op(s, CC_OP_MULQ);
+ break;
+#endif
+ }
+ break;
+ case 6: /* div */
+ switch(ot) {
+ case MO_8:
+ gen_helper_divb_AL(cpu_env, cpu_T0);
+ break;
+ case MO_16:
+ gen_helper_divw_AX(cpu_env, cpu_T0);
+ break;
+ default:
+ case MO_32:
+ gen_helper_divl_EAX(cpu_env, cpu_T0);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_divq_EAX(cpu_env, cpu_T0);
+ break;
+#endif
+ }
+ break;
+ case 7: /* idiv */
+ switch(ot) {
+ case MO_8:
+ gen_helper_idivb_AL(cpu_env, cpu_T0);
+ break;
+ case MO_16:
+ gen_helper_idivw_AX(cpu_env, cpu_T0);
+ break;
+ default:
+ case MO_32:
+ gen_helper_idivl_EAX(cpu_env, cpu_T0);
+ break;
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_helper_idivq_EAX(cpu_env, cpu_T0);
+ break;
+#endif
+ }
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0xfe: /* GRP4 */
+ case 0xff: /* GRP5 */
+ ot = mo_b_d(b, dflag);
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ op = (modrm >> 3) & 7;
+ if (op >= 2 && b == 0xfe) {
+ goto unknown_op;
+ }
+ if (CODE64(s)) {
+ if (op == 2 || op == 4) {
+ /* operand size for jumps is 64 bit */
+ ot = MO_64;
+ } else if (op == 3 || op == 5) {
+ ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
+ } else if (op == 6) {
+ /* default push size is 64 bit */
+ ot = mo_pushpop(s, dflag);
+ }
+ }
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ if (op >= 2 && op != 3 && op != 5)
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ }
+
+ switch(op) {
+ case 0: /* inc Ev */
+ if (mod != 3)
+ opreg = OR_TMP0;
+ else
+ opreg = rm;
+ gen_inc(s, ot, opreg, 1);
+ break;
+ case 1: /* dec Ev */
+ if (mod != 3)
+ opreg = OR_TMP0;
+ else
+ opreg = rm;
+ gen_inc(s, ot, opreg, -1);
+ break;
+ case 2: /* call Ev */
+ /* XXX: optimize if memory (no 'and' is necessary) */
+ if (dflag == MO_16) {
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ }
+ next_eip = s->pc - s->cs_base;
+ tcg_gen_movi_tl(cpu_T1, next_eip);
+ gen_push_v(s, cpu_T1);
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
+ gen_eob(s);
+ break;
+ case 3: /* lcall Ev */
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 1 << ot);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+ do_lcall:
+ if (s->pe && !s->vm86) {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
+ tcg_const_i32(dflag - 1),
+ tcg_const_tl(s->pc - s->cs_base));
+ } else {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
+ tcg_const_i32(dflag - 1),
+ tcg_const_i32(s->pc - s->cs_base));
+ }
+ gen_eob(s);
+ break;
+ case 4: /* jmp Ev */
+ if (dflag == MO_16) {
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ }
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
+ gen_eob(s);
+ break;
+ case 5: /* ljmp Ev */
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 1 << ot);
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+ do_ljmp:
+ if (s->pe && !s->vm86) {
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
+ tcg_const_tl(s->pc - s->cs_base));
+ } else {
+ gen_op_movl_seg_T0_vm(R_CS);
+ gen_op_jmp_v(cpu_T1);
+ }
+ gen_eob(s);
+ break;
+ case 6: /* push Ev */
+ gen_push_v(s, cpu_T0);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x84: /* test Ev, Gv */
+ case 0x85:
+ ot = mo_b_d(b, dflag);
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
+ gen_op_testl_T0_T1_cc();
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+
+ case 0xa8: /* test eAX, Iv */
+ case 0xa9:
+ ot = mo_b_d(b, dflag);
+ val = insn_get(env, s, ot);
+
+ gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
+ tcg_gen_movi_tl(cpu_T1, val);
+ gen_op_testl_T0_T1_cc();
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ break;
+
+ case 0x98: /* CWDE/CBW */
+ switch (dflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
+ break;
+#endif
+ case MO_32:
+ gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ break;
+ default:
+ tcg_abort();
+ }
+ break;
+ case 0x99: /* CDQ/CWD */
+ switch (dflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
+ gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
+ break;
+#endif
+ case MO_32:
+ gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
+ gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
+ break;
+ case MO_16:
+ gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
+ gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
+ break;
+ default:
+ tcg_abort();
+ }
+ break;
+ case 0x1af: /* imul Gv, Ev */
+ case 0x69: /* imul Gv, Ev, I */
+ case 0x6b:
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (b == 0x69)
+ s->rip_offset = insn_const_size(ot);
+ else if (b == 0x6b)
+ s->rip_offset = 1;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ if (b == 0x69) {
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(cpu_T1, val);
+ } else if (b == 0x6b) {
+ val = (int8_t)insn_get(env, s, MO_8);
+ tcg_gen_movi_tl(cpu_T1, val);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
+ }
+ switch (ot) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
+ tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
+ break;
+#endif
+ case MO_32:
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+ tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
+ cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
+ tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
+ tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
+ tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
+ break;
+ default:
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
+ /* XXX: use 32 bit mul which could be faster */
+ tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
+ tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+ }
+ set_cc_op(s, CC_OP_MULB + ot);
+ break;
+ case 0x1c0:
+ case 0x1c1: /* xadd Ev, Gv */
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ if (mod == 3) {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (s->prefix & PREFIX_LOCK) {
+ tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
+ s->mem_index, ot | MO_LE);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ } else {
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ }
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ }
+ gen_op_update2_cc();
+ set_cc_op(s, CC_OP_ADDB + ot);
+ break;
+ case 0x1b0:
+ case 0x1b1: /* cmpxchg Ev, Gv */
+ {
+ TCGv oldv, newv, cmpv;
+
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ oldv = tcg_temp_new();
+ newv = tcg_temp_new();
+ cmpv = tcg_temp_new();
+ gen_op_mov_v_reg(ot, newv, reg);
+ tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
+
+ if (s->prefix & PREFIX_LOCK) {
+ if (mod == 3) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
+ s->mem_index, ot | MO_LE);
+ gen_op_mov_reg_v(ot, R_EAX, oldv);
+ } else {
+ if (mod == 3) {
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_v_reg(ot, oldv, rm);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, oldv, cpu_A0);
+ rm = 0; /* avoid warning */
+ }
+ gen_extu(ot, oldv);
+ gen_extu(ot, cmpv);
+ /* store value = (old == cmp ? new : old); */
+ tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
+ if (mod == 3) {
+ gen_op_mov_reg_v(ot, R_EAX, oldv);
+ gen_op_mov_reg_v(ot, rm, newv);
+ } else {
+ /* Perform an unconditional store cycle like physical cpu;
+ must be before changing accumulator to ensure
+ idempotency if the store faults and the instruction
+ is restarted */
+ gen_op_st_v(s, ot, newv, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, oldv);
+ }
+ }
+ tcg_gen_mov_tl(cpu_cc_src, oldv);
+ tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
+ tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
+ set_cc_op(s, CC_OP_SUBB + ot);
+ tcg_temp_free(oldv);
+ tcg_temp_free(newv);
+ tcg_temp_free(cmpv);
+ }
+ break;
+ case 0x1c7: /* cmpxchg8b */
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ if ((mod == 3) || ((modrm & 0x38) != 0x8))
+ goto illegal_op;
+#ifdef TARGET_X86_64
+ if (dflag == MO_64) {
+ if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
+ gen_helper_cmpxchg16b(cpu_env, cpu_A0);
+ } else {
+ gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
+ }
+ } else
+#endif
+ {
+ if (!(s->cpuid_features & CPUID_CX8))
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
+ gen_helper_cmpxchg8b(cpu_env, cpu_A0);
+ } else {
+ gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
+ }
+ }
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+
+ /**************************/
+ /* push/pop */
+ case 0x50 ... 0x57: /* push */
+ gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
+ gen_push_v(s, cpu_T0);
+ break;
+ case 0x58 ... 0x5f: /* pop */
+ ot = gen_pop_T0(s);
+ /* NOTE: order is important for pop %sp */
+ gen_pop_update(s, ot);
+ gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
+ break;
+ case 0x60: /* pusha */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_pusha(s);
+ break;
+ case 0x61: /* popa */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_popa(s);
+ break;
+ case 0x68: /* push Iv */
+ case 0x6a:
+ ot = mo_pushpop(s, dflag);
+ if (b == 0x68)
+ val = insn_get(env, s, ot);
+ else
+ val = (int8_t)insn_get(env, s, MO_8);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_push_v(s, cpu_T0);
+ break;
+ case 0x8f: /* pop Ev */
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ ot = gen_pop_T0(s);
+ if (mod == 3) {
+ /* NOTE: order is important for pop %sp */
+ gen_pop_update(s, ot);
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ } else {
+ /* NOTE: order is important too for MMU exceptions */
+ s->popl_esp_hack = 1 << ot;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ s->popl_esp_hack = 0;
+ gen_pop_update(s, ot);
+ }
+ break;
+ case 0xc8: /* enter */
+ {
+ int level;
+ val = cpu_lduw_code(env, s->pc);
+ s->pc += 2;
+ level = cpu_ldub_code(env, s->pc++);
+ gen_enter(s, val, level);
+ }
+ break;
+ case 0xc9: /* leave */
+ gen_leave(s);
+ break;
+ case 0x06: /* push es */
+ case 0x0e: /* push cs */
+ case 0x16: /* push ss */
+ case 0x1e: /* push ds */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_op_movl_T0_seg(b >> 3);
+ gen_push_v(s, cpu_T0);
+ break;
+ case 0x1a0: /* push fs */
+ case 0x1a8: /* push gs */
+ gen_op_movl_T0_seg((b >> 3) & 7);
+ gen_push_v(s, cpu_T0);
+ break;
+ case 0x07: /* pop es */
+ case 0x17: /* pop ss */
+ case 0x1f: /* pop ds */
+ if (CODE64(s))
+ goto illegal_op;
+ reg = b >> 3;
+ ot = gen_pop_T0(s);
+ gen_movl_seg_T0(s, reg);
+ gen_pop_update(s, ot);
+ /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
+ if (s->is_jmp) {
+ gen_jmp_im(s->pc - s->cs_base);
+ if (reg == R_SS) {
+ s->tf = 0;
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_eob(s);
+ }
+ }
+ break;
+ case 0x1a1: /* pop fs */
+ case 0x1a9: /* pop gs */
+ ot = gen_pop_T0(s);
+ gen_movl_seg_T0(s, (b >> 3) & 7);
+ gen_pop_update(s, ot);
+ if (s->is_jmp) {
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+
+ /**************************/
+ /* mov */
+ case 0x88:
+ case 0x89: /* mov Gv, Ev */
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+
+ /* generate a generic store */
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
+ break;
+ case 0xc6:
+ case 0xc7: /* mov Ev, Iv */
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod != 3) {
+ s->rip_offset = insn_const_size(ot);
+ gen_lea_modrm(env, s, modrm);
+ }
+ val = insn_get(env, s, ot);
+ tcg_gen_movi_tl(cpu_T0, val);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
+ }
+ break;
+ case 0x8a:
+ case 0x8b: /* mov Ev, Gv */
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+ case 0x8e: /* mov seg, Gv */
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = (modrm >> 3) & 7;
+ if (reg >= 6 || reg == R_CS)
+ goto illegal_op;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_movl_seg_T0(s, reg);
+ /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
+ if (s->is_jmp) {
+ gen_jmp_im(s->pc - s->cs_base);
+ if (reg == R_SS) {
+ s->tf = 0;
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_eob(s);
+ }
+ }
+ break;
+ case 0x8c: /* mov Gv, seg */
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (reg >= 6)
+ goto illegal_op;
+ gen_op_movl_T0_seg(reg);
+ ot = mod == 3 ? dflag : MO_16;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+
+ case 0x1b6: /* movzbS Gv, Eb */
+ case 0x1b7: /* movzwS Gv, Eb */
+ case 0x1be: /* movsbS Gv, Eb */
+ case 0x1bf: /* movswS Gv, Eb */
+ {
+ TCGMemOp d_ot;
+ TCGMemOp s_ot;
+
+ /* d_ot is the size of destination */
+ d_ot = dflag;
+ /* ot is the size of source */
+ ot = (b & 1) + MO_8;
+ /* s_ot is the sign+size of source */
+ s_ot = b & 8 ? MO_SIGN | ot : ot;
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+
+ if (mod == 3) {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ switch (s_ot) {
+ case MO_UB:
+ tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
+ break;
+ case MO_SB:
+ tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
+ break;
+ case MO_UW:
+ tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
+ break;
+ default:
+ case MO_SW:
+ tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
+ break;
+ }
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+ }
+ }
+ break;
+
+ case 0x8d: /* lea */
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ {
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(a);
+ gen_lea_v_seg(s, s->aflag, ea, -1, -1);
+ gen_op_mov_reg_v(dflag, reg, cpu_A0);
+ }
+ break;
+
+ case 0xa0: /* mov EAX, Ov */
+ case 0xa1:
+ case 0xa2: /* mov Ov, EAX */
+ case 0xa3:
+ {
+ target_ulong offset_addr;
+
+ ot = mo_b_d(b, dflag);
+ switch (s->aflag) {
+#ifdef TARGET_X86_64
+ case MO_64:
+ offset_addr = cpu_ldq_code(env, s->pc);
+ s->pc += 8;
+ break;
+#endif
+ default:
+ offset_addr = insn_get(env, s, s->aflag);
+ break;
+ }
+ tcg_gen_movi_tl(cpu_A0, offset_addr);
+ gen_add_A0_ds_seg(s);
+ if ((b & 2) == 0) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ }
+ }
+ break;
+ case 0xd7: /* xlat */
+ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
+ tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
+ gen_extu(s->aflag, cpu_A0);
+ gen_add_A0_ds_seg(s);
+ gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
+ break;
+ case 0xb0 ... 0xb7: /* mov R, Ib */
+ val = insn_get(env, s, MO_8);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
+ break;
+ case 0xb8 ... 0xbf: /* mov R, Iv */
+#ifdef TARGET_X86_64
+ if (dflag == MO_64) {
+ uint64_t tmp;
+ /* 64 bit case */
+ tmp = cpu_ldq_code(env, s->pc);
+ s->pc += 8;
+ reg = (b & 7) | REX_B(s);
+ tcg_gen_movi_tl(cpu_T0, tmp);
+ gen_op_mov_reg_v(MO_64, reg, cpu_T0);
+ } else
+#endif
+ {
+ ot = dflag;
+ val = insn_get(env, s, ot);
+ reg = (b & 7) | REX_B(s);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ }
+ break;
+
+ case 0x91 ... 0x97: /* xchg R, EAX */
+ do_xchg_reg_eax:
+ ot = dflag;
+ reg = (b & 7) | REX_B(s);
+ rm = R_EAX;
+ goto do_xchg_reg;
+ case 0x86:
+ case 0x87: /* xchg Ev, Gv */
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3) {
+ rm = (modrm & 7) | REX_B(s);
+ do_xchg_reg:
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_op_mov_v_reg(ot, cpu_T1, rm);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ /* for xchg, lock is implicit */
+ tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
+ s->mem_index, ot | MO_LE);
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ }
+ break;
+ case 0xc4: /* les Gv */
+ /* In CODE64 this is VEX3; see above. */
+ op = R_ES;
+ goto do_lxx;
+ case 0xc5: /* lds Gv */
+ /* In CODE64 this is VEX2; see above. */
+ op = R_DS;
+ goto do_lxx;
+ case 0x1b2: /* lss Gv */
+ op = R_SS;
+ goto do_lxx;
+ case 0x1b4: /* lfs Gv */
+ op = R_FS;
+ goto do_lxx;
+ case 0x1b5: /* lgs Gv */
+ op = R_GS;
+ do_lxx:
+ ot = dflag != MO_16 ? MO_32 : MO_16;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 1 << ot);
+ /* load the segment first to handle exceptions properly */
+ gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
+ gen_movl_seg_T0(s, op);
+ /* then put the data */
+ gen_op_mov_reg_v(ot, reg, cpu_T1);
+ if (s->is_jmp) {
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+
+ /************************/
+ /* shifts */
+ case 0xc0:
+ case 0xc1:
+ /* shift Ev,Ib */
+ shift = 2;
+ grp2:
+ {
+ ot = mo_b_d(b, dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+
+ if (mod != 3) {
+ if (shift == 2) {
+ s->rip_offset = 1;
+ }
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else {
+ opreg = (modrm & 7) | REX_B(s);
+ }
+
+ /* simpler op */
+ if (shift == 0) {
+ gen_shift(s, op, ot, opreg, OR_ECX);
+ } else {
+ if (shift == 2) {
+ shift = cpu_ldub_code(env, s->pc++);
+ }
+ gen_shifti(s, op, ot, opreg, shift);
+ }
+ }
+ break;
+ case 0xd0:
+ case 0xd1:
+ /* shift Ev,1 */
+ shift = 1;
+ goto grp2;
+ case 0xd2:
+ case 0xd3:
+ /* shift Ev,cl */
+ shift = 0;
+ goto grp2;
+
+ case 0x1a4: /* shld imm */
+ op = 0;
+ shift = 1;
+ goto do_shiftd;
+ case 0x1a5: /* shld cl */
+ op = 0;
+ shift = 0;
+ goto do_shiftd;
+ case 0x1ac: /* shrd imm */
+ op = 1;
+ shift = 1;
+ goto do_shiftd;
+ case 0x1ad: /* shrd cl */
+ op = 1;
+ shift = 0;
+ do_shiftd:
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ opreg = OR_TMP0;
+ } else {
+ opreg = rm;
+ }
+ gen_op_mov_v_reg(ot, cpu_T1, reg);
+
+ if (shift) {
+ TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
+ gen_shiftd_rm_T1(s, ot, opreg, op, imm);
+ tcg_temp_free(imm);
+ } else {
+ gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
+ }
+ break;
+
+ /************************/
+ /* floats */
+ case 0xd8 ... 0xdf:
+ if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
+ /* if CR0.EM or CR0.TS are set, generate an FPU exception */
+ /* XXX: what to do if illegal op ? */
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ op = ((b & 7) << 3) | ((modrm >> 3) & 7);
+ if (mod != 3) {
+ /* memory op */
+ gen_lea_modrm(env, s, modrm);
+ switch(op) {
+ case 0x00 ... 0x07: /* fxxxs */
+ case 0x10 ... 0x17: /* fixxxl */
+ case 0x20 ... 0x27: /* fxxxl */
+ case 0x30 ... 0x37: /* fixxx */
+ {
+ int op1;
+ op1 = op & 7;
+
+ switch(op >> 4) {
+ case 0:
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
+ break;
+ case 1:
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
+ break;
+ case 2:
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
+ s->mem_index, MO_LEQ);
+ gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
+ break;
+ case 3:
+ default:
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LESW);
+ gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
+ break;
+ }
+
+ gen_helper_fp_arith_ST0_FT0(op1);
+ if (op1 == 3) {
+ /* fcomp needs pop */
+ gen_helper_fpop(cpu_env);
+ }
+ }
+ break;
+ case 0x08: /* flds */
+ case 0x0a: /* fsts */
+ case 0x0b: /* fstps */
+ case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
+ case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
+ case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
+ switch(op & 7) {
+ case 0:
+ switch(op >> 4) {
+ case 0:
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
+ break;
+ case 1:
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
+ break;
+ case 2:
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
+ s->mem_index, MO_LEQ);
+ gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
+ break;
+ case 3:
+ default:
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LESW);
+ gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
+ break;
+ }
+ break;
+ case 1:
+ /* XXX: the corresponding CPUID bit must be tested ! */
+ switch(op >> 4) {
+ case 1:
+ gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ break;
+ case 2:
+ gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
+ s->mem_index, MO_LEQ);
+ break;
+ case 3:
+ default:
+ gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUW);
+ break;
+ }
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ switch(op >> 4) {
+ case 0:
+ gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ break;
+ case 1:
+ gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUL);
+ break;
+ case 2:
+ gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
+ s->mem_index, MO_LEQ);
+ break;
+ case 3:
+ default:
+ gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUW);
+ break;
+ }
+ if ((op & 7) == 3)
+ gen_helper_fpop(cpu_env);
+ break;
+ }
+ break;
+ case 0x0c: /* fldenv mem */
+ gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+ break;
+ case 0x0d: /* fldcw mem */
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUW);
+ gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
+ break;
+ case 0x0e: /* fnstenv mem */
+ gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+ break;
+ case 0x0f: /* fnstcw mem */
+ gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUW);
+ break;
+ case 0x1d: /* fldt mem */
+ gen_helper_fldt_ST0(cpu_env, cpu_A0);
+ break;
+ case 0x1f: /* fstpt mem */
+ gen_helper_fstt_ST0(cpu_env, cpu_A0);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x2c: /* frstor mem */
+ gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+ break;
+ case 0x2e: /* fnsave mem */
+ gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
+ break;
+ case 0x2f: /* fnstsw mem */
+ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
+ tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
+ s->mem_index, MO_LEUW);
+ break;
+ case 0x3c: /* fbld */
+ gen_helper_fbld_ST0(cpu_env, cpu_A0);
+ break;
+ case 0x3e: /* fbstp */
+ gen_helper_fbst_ST0(cpu_env, cpu_A0);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x3d: /* fildll */
+ tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+ gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
+ break;
+ case 0x3f: /* fistpll */
+ gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
+ tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ } else {
+ /* register float ops */
+ opreg = rm;
+
+ switch(op) {
+ case 0x08: /* fld sti */
+ gen_helper_fpush(cpu_env);
+ gen_helper_fmov_ST0_STN(cpu_env,
+ tcg_const_i32((opreg + 1) & 7));
+ break;
+ case 0x09: /* fxchg sti */
+ case 0x29: /* fxchg4 sti, undocumented op */
+ case 0x39: /* fxchg7 sti, undocumented op */
+ gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ break;
+ case 0x0a: /* grp d9/2 */
+ switch(rm) {
+ case 0: /* fnop */
+ /* check exceptions (FreeBSD FPU probe) */
+ gen_helper_fwait(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x0c: /* grp d9/4 */
+ switch(rm) {
+ case 0: /* fchs */
+ gen_helper_fchs_ST0(cpu_env);
+ break;
+ case 1: /* fabs */
+ gen_helper_fabs_ST0(cpu_env);
+ break;
+ case 4: /* ftst */
+ gen_helper_fldz_FT0(cpu_env);
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 5: /* fxam */
+ gen_helper_fxam_ST0(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x0d: /* grp d9/5 */
+ {
+ switch(rm) {
+ case 0:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fld1_ST0(cpu_env);
+ break;
+ case 1:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldl2t_ST0(cpu_env);
+ break;
+ case 2:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldl2e_ST0(cpu_env);
+ break;
+ case 3:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldpi_ST0(cpu_env);
+ break;
+ case 4:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldlg2_ST0(cpu_env);
+ break;
+ case 5:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldln2_ST0(cpu_env);
+ break;
+ case 6:
+ gen_helper_fpush(cpu_env);
+ gen_helper_fldz_ST0(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ }
+ break;
+ case 0x0e: /* grp d9/6 */
+ switch(rm) {
+ case 0: /* f2xm1 */
+ gen_helper_f2xm1(cpu_env);
+ break;
+ case 1: /* fyl2x */
+ gen_helper_fyl2x(cpu_env);
+ break;
+ case 2: /* fptan */
+ gen_helper_fptan(cpu_env);
+ break;
+ case 3: /* fpatan */
+ gen_helper_fpatan(cpu_env);
+ break;
+ case 4: /* fxtract */
+ gen_helper_fxtract(cpu_env);
+ break;
+ case 5: /* fprem1 */
+ gen_helper_fprem1(cpu_env);
+ break;
+ case 6: /* fdecstp */
+ gen_helper_fdecstp(cpu_env);
+ break;
+ default:
+ case 7: /* fincstp */
+ gen_helper_fincstp(cpu_env);
+ break;
+ }
+ break;
+ case 0x0f: /* grp d9/7 */
+ switch(rm) {
+ case 0: /* fprem */
+ gen_helper_fprem(cpu_env);
+ break;
+ case 1: /* fyl2xp1 */
+ gen_helper_fyl2xp1(cpu_env);
+ break;
+ case 2: /* fsqrt */
+ gen_helper_fsqrt(cpu_env);
+ break;
+ case 3: /* fsincos */
+ gen_helper_fsincos(cpu_env);
+ break;
+ case 5: /* fscale */
+ gen_helper_fscale(cpu_env);
+ break;
+ case 4: /* frndint */
+ gen_helper_frndint(cpu_env);
+ break;
+ case 6: /* fsin */
+ gen_helper_fsin(cpu_env);
+ break;
+ default:
+ case 7: /* fcos */
+ gen_helper_fcos(cpu_env);
+ break;
+ }
+ break;
+ case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
+ case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
+ case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
+ {
+ int op1;
+
+ op1 = op & 7;
+ if (op >= 0x20) {
+ gen_helper_fp_arith_STN_ST0(op1, opreg);
+ if (op >= 0x30)
+ gen_helper_fpop(cpu_env);
+ } else {
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fp_arith_ST0_FT0(op1);
+ }
+ }
+ break;
+ case 0x02: /* fcom */
+ case 0x22: /* fcom2, undocumented op */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ break;
+ case 0x03: /* fcomp */
+ case 0x23: /* fcomp3, undocumented op */
+ case 0x32: /* fcomp5, undocumented op */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x15: /* da/5 */
+ switch(rm) {
+ case 1: /* fucompp */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fucom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x1c:
+ switch(rm) {
+ case 0: /* feni (287 only, just do nop here) */
+ break;
+ case 1: /* fdisi (287 only, just do nop here) */
+ break;
+ case 2: /* fclex */
+ gen_helper_fclex(cpu_env);
+ break;
+ case 3: /* fninit */
+ gen_helper_fninit(cpu_env);
+ break;
+ case 4: /* fsetpm (287 only, just do nop here) */
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x1d: /* fucomi */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucomi_ST0_FT0(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x1e: /* fcomi */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcomi_ST0_FT0(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x28: /* ffree sti */
+ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ break;
+ case 0x2a: /* fst sti */
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ break;
+ case 0x2b: /* fstp sti */
+ case 0x0b: /* fstp1 sti, undocumented op */
+ case 0x3a: /* fstp8 sti, undocumented op */
+ case 0x3b: /* fstp9 sti, undocumented op */
+ gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x2c: /* fucom st(i) */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucom_ST0_FT0(cpu_env);
+ break;
+ case 0x2d: /* fucomp st(i) */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x33: /* de/3 */
+ switch(rm) {
+ case 1: /* fcompp */
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
+ gen_helper_fcom_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ gen_helper_fpop(cpu_env);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x38: /* ffreep sti, undocumented op */
+ gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fpop(cpu_env);
+ break;
+ case 0x3c: /* df/4 */
+ switch(rm) {
+ case 0:
+ gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
+ tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
+ gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+ case 0x3d: /* fucomip */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fucomi_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x3e: /* fcomip */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_helper_fcomi_ST0_FT0(cpu_env);
+ gen_helper_fpop(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x10 ... 0x13: /* fcmovxx */
+ case 0x18 ... 0x1b:
+ {
+ int op1;
+ TCGLabel *l1;
+ static const uint8_t fcmov_cc[8] = {
+ (JCC_B << 1),
+ (JCC_Z << 1),
+ (JCC_BE << 1),
+ (JCC_P << 1),
+ };
+
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
+ l1 = gen_new_label();
+ gen_jcc1_noeob(s, op1, l1);
+ gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
+ gen_set_label(l1);
+ }
+ break;
+ default:
+ goto unknown_op;
+ }
+ }
+ break;
+ /************************/
+ /* string ops */
+
+ case 0xa4: /* movsS */
+ case 0xa5:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_movs(s, ot);
+ }
+ break;
+
+ case 0xaa: /* stosS */
+ case 0xab:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_stos(s, ot);
+ }
+ break;
+ case 0xac: /* lodsS */
+ case 0xad:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_lods(s, ot);
+ }
+ break;
+ case 0xae: /* scasS */
+ case 0xaf:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & PREFIX_REPNZ) {
+ gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+ } else if (prefixes & PREFIX_REPZ) {
+ gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+ } else {
+ gen_scas(s, ot);
+ }
+ break;
+
+ case 0xa6: /* cmpsS */
+ case 0xa7:
+ ot = mo_b_d(b, dflag);
+ if (prefixes & PREFIX_REPNZ) {
+ gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
+ } else if (prefixes & PREFIX_REPZ) {
+ gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
+ } else {
+ gen_cmps(s, ot);
+ }
+ break;
+ case 0x6c: /* insS */
+ case 0x6d:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+ gen_check_io(s, ot, pc_start - s->cs_base,
+ SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_ins(s, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ }
+ break;
+ case 0x6e: /* outsS */
+ case 0x6f:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+ gen_check_io(s, ot, pc_start - s->cs_base,
+ svm_is_rep(prefixes) | 4);
+ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
+ gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
+ } else {
+ gen_outs(s, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ }
+ break;
+
+ /************************/
+ /* port I/O */
+
+ case 0xe4:
+ case 0xe5:
+ ot = mo_b_d32(b, dflag);
+ val = cpu_ldub_code(env, s->pc++);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_check_io(s, ot, pc_start - s->cs_base,
+ SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ tcg_gen_movi_i32(cpu_tmp2_i32, val);
+ gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0xe6:
+ case 0xe7:
+ ot = mo_b_d32(b, dflag);
+ val = cpu_ldub_code(env, s->pc++);
+ tcg_gen_movi_tl(cpu_T0, val);
+ gen_check_io(s, ot, pc_start - s->cs_base,
+ svm_is_rep(prefixes));
+ gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
+
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ tcg_gen_movi_i32(cpu_tmp2_i32, val);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+ gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0xec:
+ case 0xed:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+ gen_check_io(s, ot, pc_start - s->cs_base,
+ SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0xee:
+ case 0xef:
+ ot = mo_b_d32(b, dflag);
+ tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
+ gen_check_io(s, ot, pc_start - s->cs_base,
+ svm_is_rep(prefixes));
+ gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
+
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
+ gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
+ gen_bpt_io(s, cpu_tmp2_i32, ot);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+
+ /************************/
+ /* control */
+ case 0xc2: /* ret im */
+ val = cpu_ldsw_code(env, s->pc);
+ s->pc += 2;
+ ot = gen_pop_T0(s);
+ gen_stack_update(s, val + (1 << ot));
+ /* Note that gen_pop_T0 uses a zero-extending load. */
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
+ gen_eob(s);
+ break;
+ case 0xc3: /* ret */
+ ot = gen_pop_T0(s);
+ gen_pop_update(s, ot);
+ /* Note that gen_pop_T0 uses a zero-extending load. */
+ gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
+ gen_eob(s);
+ break;
+ case 0xca: /* lret im */
+ val = cpu_ldsw_code(env, s->pc);
+ s->pc += 2;
+ do_lret:
+ if (s->pe && !s->vm86) {
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
+ tcg_const_i32(val));
+ } else {
+ gen_stack_A0(s);
+ /* pop offset */
+ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
+ /* NOTE: keeping EIP updated is not a problem in case of
+ exception */
+ gen_op_jmp_v(cpu_T0);
+ /* pop selector */
+ gen_add_A0_im(s, 1 << dflag);
+ gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
+ gen_op_movl_seg_T0_vm(R_CS);
+ /* add stack offset */
+ gen_stack_update(s, val + (2 << dflag));
+ }
+ gen_eob(s);
+ break;
+ case 0xcb: /* lret */
+ val = 0;
+ goto do_lret;
+ case 0xcf: /* iret */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
+ if (!s->pe) {
+ /* real mode */
+ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+ set_cc_op(s, CC_OP_EFLAGS);
+ } else if (s->vm86) {
+ if (s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ } else {
+ gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
+ tcg_const_i32(s->pc - s->cs_base));
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ gen_eob(s);
+ break;
+ case 0xe8: /* call im */
+ {
+ if (dflag != MO_16) {
+ tval = (int32_t)insn_get(env, s, MO_32);
+ } else {
+ tval = (int16_t)insn_get(env, s, MO_16);
+ }
+ next_eip = s->pc - s->cs_base;
+ tval += next_eip;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ } else if (!CODE64(s)) {
+ tval &= 0xffffffff;
+ }
+ tcg_gen_movi_tl(cpu_T0, next_eip);
+ gen_push_v(s, cpu_T0);
+ gen_bnd_jmp(s);
+ gen_jmp(s, tval);
+ }
+ break;
+ case 0x9a: /* lcall im */
+ {
+ unsigned int selector, offset;
+
+ if (CODE64(s))
+ goto illegal_op;
+ ot = dflag;
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, MO_16);
+
+ tcg_gen_movi_tl(cpu_T0, selector);
+ tcg_gen_movi_tl(cpu_T1, offset);
+ }
+ goto do_lcall;
+ case 0xe9: /* jmp im */
+ if (dflag != MO_16) {
+ tval = (int32_t)insn_get(env, s, MO_32);
+ } else {
+ tval = (int16_t)insn_get(env, s, MO_16);
+ }
+ tval += s->pc - s->cs_base;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ } else if (!CODE64(s)) {
+ tval &= 0xffffffff;
+ }
+ gen_bnd_jmp(s);
+ gen_jmp(s, tval);
+ break;
+ case 0xea: /* ljmp im */
+ {
+ unsigned int selector, offset;
+
+ if (CODE64(s))
+ goto illegal_op;
+ ot = dflag;
+ offset = insn_get(env, s, ot);
+ selector = insn_get(env, s, MO_16);
+
+ tcg_gen_movi_tl(cpu_T0, selector);
+ tcg_gen_movi_tl(cpu_T1, offset);
+ }
+ goto do_ljmp;
+ case 0xeb: /* jmp Jb */
+ tval = (int8_t)insn_get(env, s, MO_8);
+ tval += s->pc - s->cs_base;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ }
+ gen_jmp(s, tval);
+ break;
+ case 0x70 ... 0x7f: /* jcc Jb */
+ tval = (int8_t)insn_get(env, s, MO_8);
+ goto do_jcc;
+ case 0x180 ... 0x18f: /* jcc Jv */
+ if (dflag != MO_16) {
+ tval = (int32_t)insn_get(env, s, MO_32);
+ } else {
+ tval = (int16_t)insn_get(env, s, MO_16);
+ }
+ do_jcc:
+ next_eip = s->pc - s->cs_base;
+ tval += next_eip;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ }
+ gen_bnd_jmp(s);
+ gen_jcc(s, b, tval, next_eip);
+ break;
+
+ case 0x190 ... 0x19f: /* setcc Gv */
+ modrm = cpu_ldub_code(env, s->pc++);
+ gen_setcc1(s, b, cpu_T0);
+ gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
+ break;
+ case 0x140 ... 0x14f: /* cmov Gv, Ev */
+ if (!(s->cpuid_features & CPUID_CMOV)) {
+ goto illegal_op;
+ }
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_cmovcc1(env, s, ot, b, modrm, reg);
+ break;
+
+ /************************/
+ /* flags */
+ case 0x9c: /* pushf */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
+ if (s->vm86 && s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_update_cc_op(s);
+ gen_helper_read_eflags(cpu_T0, cpu_env);
+ gen_push_v(s, cpu_T0);
+ }
+ break;
+ case 0x9d: /* popf */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
+ if (s->vm86 && s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ ot = gen_pop_T0(s);
+ if (s->cpl == 0) {
+ if (dflag != MO_16) {
+ gen_helper_write_eflags(cpu_env, cpu_T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK |
+ IF_MASK |
+ IOPL_MASK)));
+ } else {
+ gen_helper_write_eflags(cpu_env, cpu_T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK |
+ IF_MASK | IOPL_MASK)
+ & 0xffff));
+ }
+ } else {
+ if (s->cpl <= s->iopl) {
+ if (dflag != MO_16) {
+ gen_helper_write_eflags(cpu_env, cpu_T0,
+ tcg_const_i32((TF_MASK |
+ AC_MASK |
+ ID_MASK |
+ NT_MASK |
+ IF_MASK)));
+ } else {
+ gen_helper_write_eflags(cpu_env, cpu_T0,
+ tcg_const_i32((TF_MASK |
+ AC_MASK |
+ ID_MASK |
+ NT_MASK |
+ IF_MASK)
+ & 0xffff));
+ }
+ } else {
+ if (dflag != MO_16) {
+ gen_helper_write_eflags(cpu_env, cpu_T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK)));
+ } else {
+ gen_helper_write_eflags(cpu_env, cpu_T0,
+ tcg_const_i32((TF_MASK | AC_MASK |
+ ID_MASK | NT_MASK)
+ & 0xffff));
+ }
+ }
+ }
+ gen_pop_update(s, ot);
+ set_cc_op(s, CC_OP_EFLAGS);
+ /* abort translation because TF/AC flag may change */
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ case 0x9e: /* sahf */
+ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
+ goto illegal_op;
+ gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
+ gen_compute_eflags(s);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
+ break;
+ case 0x9f: /* lahf */
+ if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
+ goto illegal_op;
+ gen_compute_eflags(s);
+ /* Note: gen_compute_eflags() only gives the condition codes */
+ tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
+ gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
+ break;
+ case 0xf5: /* cmc */
+ gen_compute_eflags(s);
+ tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
+ break;
+ case 0xf8: /* clc */
+ gen_compute_eflags(s);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
+ break;
+ case 0xf9: /* stc */
+ gen_compute_eflags(s);
+ tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
+ break;
+ case 0xfc: /* cld */
+ tcg_gen_movi_i32(cpu_tmp2_i32, 1);
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+ break;
+ case 0xfd: /* std */
+ tcg_gen_movi_i32(cpu_tmp2_i32, -1);
+ tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
+ break;
+
+ /************************/
+ /* bit operations */
+ case 0x1ba: /* bt/bts/btr/btc Gv, im */
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ op = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ if (mod != 3) {
+ s->rip_offset = 1;
+ gen_lea_modrm(env, s, modrm);
+ if (!(s->prefix & PREFIX_LOCK)) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ }
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ }
+ /* load shift */
+ val = cpu_ldub_code(env, s->pc++);
+ tcg_gen_movi_tl(cpu_T1, val);
+ if (op < 4)
+ goto unknown_op;
+ op -= 4;
+ goto bt_op;
+ case 0x1a3: /* bt Gv, Ev */
+ op = 0;
+ goto do_btx;
+ case 0x1ab: /* bts */
+ op = 1;
+ goto do_btx;
+ case 0x1b3: /* btr */
+ op = 2;
+ goto do_btx;
+ case 0x1bb: /* btc */
+ op = 3;
+ do_btx:
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+ gen_op_mov_v_reg(MO_32, cpu_T1, reg);
+ if (mod != 3) {
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ /* specific case: we need to add a displacement */
+ gen_exts(ot, cpu_T1);
+ tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
+ tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
+ tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (!(s->prefix & PREFIX_LOCK)) {
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ }
+ } else {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ }
+ bt_op:
+ tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
+ tcg_gen_movi_tl(cpu_tmp0, 1);
+ tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
+ if (s->prefix & PREFIX_LOCK) {
+ switch (op) {
+ case 0: /* bt */
+ /* Needs no atomic ops; we surpressed the normal
+ memory load for LOCK above so do it now. */
+ gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
+ break;
+ case 1: /* bts */
+ tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
+ s->mem_index, ot | MO_LE);
+ break;
+ case 2: /* btr */
+ tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
+ tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
+ s->mem_index, ot | MO_LE);
+ break;
+ default:
+ case 3: /* btc */
+ tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
+ s->mem_index, ot | MO_LE);
+ break;
+ }
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
+ } else {
+ tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
+ switch (op) {
+ case 0: /* bt */
+ /* Data already loaded; nothing to do. */
+ break;
+ case 1: /* bts */
+ tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
+ break;
+ case 2: /* btr */
+ tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
+ break;
+ default:
+ case 3: /* btc */
+ tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
+ break;
+ }
+ if (op != 0) {
+ if (mod != 3) {
+ gen_op_st_v(s, ot, cpu_T0, cpu_A0);
+ } else {
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ }
+ }
+ }
+
+ /* Delay all CC updates until after the store above. Note that
+ C is the result of the test, Z is unchanged, and the others
+ are all undefined. */
+ switch (s->cc_op) {
+ case CC_OP_MULB ... CC_OP_MULQ:
+ case CC_OP_ADDB ... CC_OP_ADDQ:
+ case CC_OP_ADCB ... CC_OP_ADCQ:
+ case CC_OP_SUBB ... CC_OP_SUBQ:
+ case CC_OP_SBBB ... CC_OP_SBBQ:
+ case CC_OP_LOGICB ... CC_OP_LOGICQ:
+ case CC_OP_INCB ... CC_OP_INCQ:
+ case CC_OP_DECB ... CC_OP_DECQ:
+ case CC_OP_SHLB ... CC_OP_SHLQ:
+ case CC_OP_SARB ... CC_OP_SARQ:
+ case CC_OP_BMILGB ... CC_OP_BMILGQ:
+ /* Z was going to be computed from the non-zero status of CC_DST.
+ We can get that same Z value (and the new C value) by leaving
+ CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
+ same width. */
+ tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
+ set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
+ break;
+ default:
+ /* Otherwise, generate EFLAGS and replace the C bit. */
+ gen_compute_eflags(s);
+ tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
+ ctz32(CC_C), 1);
+ break;
+ }
+ break;
+ case 0x1bc: /* bsf / tzcnt */
+ case 0x1bd: /* bsr / lzcnt */
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_extu(ot, cpu_T0);
+
+ /* Note that lzcnt and tzcnt are in different extensions. */
+ if ((prefixes & PREFIX_REPZ)
+ && (b & 1
+ ? s->cpuid_ext3_features & CPUID_EXT3_ABM
+ : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
+ int size = 8 << ot;
+ tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
+ if (b & 1) {
+ /* For lzcnt, reduce the target_ulong result by the
+ number of zeros that we expect to find at the top. */
+ gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
+ } else {
+ /* For tzcnt, a zero input must return the operand size:
+ force all bits outside the operand size to 1. */
+ target_ulong mask = (target_ulong)-2 << (size - 1);
+ tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
+ gen_helper_ctz(cpu_T0, cpu_T0);
+ }
+ /* For lzcnt/tzcnt, C and Z bits are defined and are
+ related to the result. */
+ gen_op_update1_cc();
+ set_cc_op(s, CC_OP_BMILGB + ot);
+ } else {
+ /* For bsr/bsf, only the Z bit is defined and it is related
+ to the input and not the result. */
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
+ set_cc_op(s, CC_OP_LOGICB + ot);
+ if (b & 1) {
+ /* For bsr, return the bit index of the first 1 bit,
+ not the count of leading zeros. */
+ gen_helper_clz(cpu_T0, cpu_T0);
+ tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
+ } else {
+ gen_helper_ctz(cpu_T0, cpu_T0);
+ }
+ /* ??? The manual says that the output is undefined when the
+ input is zero, but real hardware leaves it unchanged, and
+ real programs appear to depend on that. */
+ tcg_gen_movi_tl(cpu_tmp0, 0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
+ cpu_regs[reg], cpu_T0);
+ }
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+ break;
+ /************************/
+ /* bcd */
+ case 0x27: /* daa */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_daa(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x2f: /* das */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_das(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x37: /* aaa */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_aaa(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x3f: /* aas */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_helper_aas(cpu_env);
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0xd4: /* aam */
+ if (CODE64(s))
+ goto illegal_op;
+ val = cpu_ldub_code(env, s->pc++);
+ if (val == 0) {
+ gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
+ } else {
+ gen_helper_aam(cpu_env, tcg_const_i32(val));
+ set_cc_op(s, CC_OP_LOGICB);
+ }
+ break;
+ case 0xd5: /* aad */
+ if (CODE64(s))
+ goto illegal_op;
+ val = cpu_ldub_code(env, s->pc++);
+ gen_helper_aad(cpu_env, tcg_const_i32(val));
+ set_cc_op(s, CC_OP_LOGICB);
+ break;
+ /************************/
+ /* misc */
+ case 0x90: /* nop */
+ /* XXX: correct lock test for all insn */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
+ if (REX_B(s)) {
+ goto do_xchg_reg_eax;
+ }
+ if (prefixes & PREFIX_REPZ) {
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+ break;
+ case 0x9b: /* fwait */
+ if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
+ (HF_MP_MASK | HF_TS_MASK)) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ } else {
+ gen_helper_fwait(cpu_env);
+ }
+ break;
+ case 0xcc: /* int3 */
+ gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
+ break;
+ case 0xcd: /* int N */
+ val = cpu_ldub_code(env, s->pc++);
+ if (s->vm86 && s->iopl != 3) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
+ }
+ break;
+ case 0xce: /* into */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
+ break;
+#ifdef WANT_ICEBP
+ case 0xf1: /* icebp (undocumented, exits to external debugger) */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
+#if 1
+ gen_debug(s, pc_start - s->cs_base);
+#else
+ /* start debug */
+ tb_flush(CPU(x86_env_get_cpu(env)));
+ qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
+#endif
+ break;
+#endif
+ case 0xfa: /* cli */
+ if (!s->vm86) {
+ if (s->cpl <= s->iopl) {
+ gen_helper_cli(cpu_env);
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ } else {
+ if (s->iopl == 3) {
+ gen_helper_cli(cpu_env);
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ }
+ break;
+ case 0xfb: /* sti */
+ if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
+ gen_helper_sti(cpu_env);
+ /* interruptions are enabled only the first insn after sti */
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob_inhibit_irq(s, true);
+ } else {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ }
+ break;
+ case 0x62: /* bound */
+ if (CODE64(s))
+ goto illegal_op;
+ ot = dflag;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_op_mov_v_reg(ot, cpu_T0, reg);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ if (ot == MO_16) {
+ gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
+ } else {
+ gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
+ }
+ break;
+ case 0x1c8 ... 0x1cf: /* bswap reg */
+ reg = (b & 7) | REX_B(s);
+#ifdef TARGET_X86_64
+ if (dflag == MO_64) {
+ gen_op_mov_v_reg(MO_64, cpu_T0, reg);
+ tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_64, reg, cpu_T0);
+ } else
+#endif
+ {
+ gen_op_mov_v_reg(MO_32, cpu_T0, reg);
+ tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
+ tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_32, reg, cpu_T0);
+ }
+ break;
+ case 0xd6: /* salc */
+ if (CODE64(s))
+ goto illegal_op;
+ gen_compute_eflags_c(s, cpu_T0);
+ tcg_gen_neg_tl(cpu_T0, cpu_T0);
+ gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
+ break;
+ case 0xe0: /* loopnz */
+ case 0xe1: /* loopz */
+ case 0xe2: /* loop */
+ case 0xe3: /* jecxz */
+ {
+ TCGLabel *l1, *l2, *l3;
+
+ tval = (int8_t)insn_get(env, s, MO_8);
+ next_eip = s->pc - s->cs_base;
+ tval += next_eip;
+ if (dflag == MO_16) {
+ tval &= 0xffff;
+ }
+
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ l3 = gen_new_label();
+ b &= 3;
+ switch(b) {
+ case 0: /* loopnz */
+ case 1: /* loopz */
+ gen_op_add_reg_im(s->aflag, R_ECX, -1);
+ gen_op_jz_ecx(s->aflag, l3);
+ gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
+ break;
+ case 2: /* loop */
+ gen_op_add_reg_im(s->aflag, R_ECX, -1);
+ gen_op_jnz_ecx(s->aflag, l1);
+ break;
+ default:
+ case 3: /* jcxz */
+ gen_op_jz_ecx(s->aflag, l1);
+ break;
+ }
+
+ gen_set_label(l3);
+ gen_jmp_im(next_eip);
+ tcg_gen_br(l2);
+
+ gen_set_label(l1);
+ gen_jmp_im(tval);
+ gen_set_label(l2);
+ gen_eob(s);
+ }
+ break;
+ case 0x130: /* wrmsr */
+ case 0x132: /* rdmsr */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ if (b & 2) {
+ gen_helper_rdmsr(cpu_env);
+ } else {
+ gen_helper_wrmsr(cpu_env);
+ }
+ }
+ break;
+ case 0x131: /* rdtsc */
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdtsc(cpu_env);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+ case 0x133: /* rdpmc */
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_rdpmc(cpu_env);
+ break;
+ case 0x134: /* sysenter */
+ /* For Intel SYSENTER is valid on 64-bit */
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ goto illegal_op;
+ if (!s->pe) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_helper_sysenter(cpu_env);
+ gen_eob(s);
+ }
+ break;
+ case 0x135: /* sysexit */
+ /* For Intel SYSEXIT is valid on 64-bit */
+ if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
+ goto illegal_op;
+ if (!s->pe) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
+ gen_eob(s);
+ }
+ break;
+#ifdef TARGET_X86_64
+ case 0x105: /* syscall */
+ /* XXX: is it usable in real mode ? */
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
+ gen_eob(s);
+ break;
+ case 0x107: /* sysret */
+ if (!s->pe) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
+ /* condition codes are modified only in long mode */
+ if (s->lma) {
+ set_cc_op(s, CC_OP_EFLAGS);
+ }
+ gen_eob(s);
+ }
+ break;
+#endif
+ case 0x1a2: /* cpuid */
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_cpuid(cpu_env);
+ break;
+ case 0xf4: /* hlt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+ break;
+ case 0x100:
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* sldt */
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, ldt.selector));
+ ot = mod == 3 ? dflag : MO_16;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 2: /* lldt */
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_lldt(cpu_env, cpu_tmp2_i32);
+ }
+ break;
+ case 1: /* str */
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, tr.selector));
+ ot = mod == 3 ? dflag : MO_16;
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 3: /* ltr */
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
+ gen_helper_ltr(cpu_env, cpu_tmp2_i32);
+ }
+ break;
+ case 4: /* verr */
+ case 5: /* verw */
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_update_cc_op(s);
+ if (op == 4) {
+ gen_helper_verr(cpu_env, cpu_T0);
+ } else {
+ gen_helper_verw(cpu_env, cpu_T0);
+ }
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x101:
+ modrm = cpu_ldub_code(env, s->pc++);
+ switch (modrm) {
+ CASE_MODRM_MEM_OP(0): /* sgdt */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(cpu_T0,
+ cpu_env, offsetof(CPUX86State, gdt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
+ gen_add_A0_im(s, 2);
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ break;
+
+ case 0xc8: /* monitor */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
+ gen_extu(s->aflag, cpu_A0);
+ gen_add_A0_ds_seg(s);
+ gen_helper_monitor(cpu_env, cpu_A0);
+ break;
+
+ case 0xc9: /* mwait */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
+ gen_eob(s);
+ break;
+
+ case 0xca: /* clac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_clac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xcb: /* stac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_stac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(1): /* sidt */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
+ gen_add_A0_im(s, 2);
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ break;
+
+ case 0xd0: /* xgetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+ break;
+
+ case 0xd1: /* xsetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+ /* End TB because translation flags may change. */
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xd8: /* VMRUN */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+ tcg_const_i32(s->pc - pc_start));
+ tcg_gen_exit_tb(0);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+
+ case 0xd9: /* VMMCALL */
+ if (!(s->flags & HF_SVME_MASK)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmmcall(cpu_env);
+ break;
+
+ case 0xda: /* VMLOAD */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdb: /* VMSAVE */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdc: /* STGI */
+ if ((!(s->flags & HF_SVME_MASK)
+ && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_stgi(cpu_env);
+ break;
+
+ case 0xdd: /* CLGI */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_clgi(cpu_env);
+ break;
+
+ case 0xde: /* SKINIT */
+ if ((!(s->flags & HF_SVME_MASK)
+ && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !s->pe) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_skinit(cpu_env);
+ break;
+
+ case 0xdf: /* INVLPGA */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ CASE_MODRM_MEM_OP(2): /* lgdt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
+ break;
+
+ CASE_MODRM_MEM_OP(3): /* lidt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
+ break;
+
+ CASE_MODRM_OP(4): /* smsw */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
+ if (CODE64(s)) {
+ mod = (modrm >> 6) & 3;
+ ot = (mod != 3 ? MO_16 : s->dflag);
+ } else {
+ ot = MO_16;
+ }
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
+ break;
+ case 0xee: /* rdpkru */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+ break;
+ case 0xef: /* wrpkru */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+ break;
+ CASE_MODRM_OP(6): /* lmsw */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_helper_lmsw(cpu_env, cpu_T0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(7): /* invlpg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_invlpg(cpu_env, cpu_A0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xf8: /* swapgs */
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
+ tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ tcg_gen_st_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ }
+ break;
+ }
+#endif
+ goto illegal_op;
+
+ case 0xf9: /* rdtscp */
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdtscp(cpu_env);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
+ }
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x108: /* invd */
+ case 0x109: /* wbinvd */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
+ /* nothing to do */
+ }
+ break;
+ case 0x63: /* arpl or movslS (x86_64) */
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
+ int d_ot;
+ /* d_ot is the size of destination */
+ d_ot = dflag;
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ mod = (modrm >> 6) & 3;
+ rm = (modrm & 7) | REX_B(s);
+
+ if (mod == 3) {
+ gen_op_mov_v_reg(MO_32, cpu_T0, rm);
+ /* sign extend */
+ if (d_ot == MO_64) {
+ tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
+ }
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
+ gen_op_mov_reg_v(d_ot, reg, cpu_T0);
+ }
+ } else
+#endif
+ {
+ TCGLabel *label1;
+ TCGv t0, t1, t2, a0;
+
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+ t2 = tcg_temp_local_new();
+ ot = MO_16;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = (modrm >> 3) & 7;
+ mod = (modrm >> 6) & 3;
+ rm = modrm & 7;
+ if (mod != 3) {
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, ot, t0, cpu_A0);
+ a0 = tcg_temp_local_new();
+ tcg_gen_mov_tl(a0, cpu_A0);
+ } else {
+ gen_op_mov_v_reg(ot, t0, rm);
+ TCGV_UNUSED(a0);
+ }
+ gen_op_mov_v_reg(ot, t1, reg);
+ tcg_gen_andi_tl(cpu_tmp0, t0, 3);
+ tcg_gen_andi_tl(t1, t1, 3);
+ tcg_gen_movi_tl(t2, 0);
+ label1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_movi_tl(t2, CC_Z);
+ gen_set_label(label1);
+ if (mod != 3) {
+ gen_op_st_v(s, ot, t0, a0);
+ tcg_temp_free(a0);
+ } else {
+ gen_op_mov_reg_v(ot, rm, t0);
+ }
+ gen_compute_eflags(s);
+ tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ case 0x102: /* lar */
+ case 0x103: /* lsl */
+ {
+ TCGLabel *label1;
+ TCGv t0;
+ if (!s->pe || s->vm86)
+ goto illegal_op;
+ ot = dflag != MO_16 ? MO_32 : MO_16;
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ t0 = tcg_temp_local_new();
+ gen_update_cc_op(s);
+ if (b == 0x102) {
+ gen_helper_lar(t0, cpu_env, cpu_T0);
+ } else {
+ gen_helper_lsl(t0, cpu_env, cpu_T0);
+ }
+ tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
+ label1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
+ gen_op_mov_reg_v(ot, reg, t0);
+ gen_set_label(label1);
+ set_cc_op(s, CC_OP_EFLAGS);
+ tcg_temp_free(t0);
+ }
+ break;
+ case 0x118:
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ op = (modrm >> 3) & 7;
+ switch(op) {
+ case 0: /* prefetchnta */
+ case 1: /* prefetchnt0 */
+ case 2: /* prefetchnt0 */
+ case 3: /* prefetchnt0 */
+ if (mod == 3)
+ goto illegal_op;
+ gen_nop_modrm(env, s, modrm);
+ /* nothing more to do */
+ break;
+ default: /* nop (multi byte) */
+ gen_nop_modrm(env, s, modrm);
+ break;
+ }
+ break;
+ case 0x11a:
+ modrm = cpu_ldub_code(env, s->pc++);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (prefixes & PREFIX_REPZ) {
+ /* bndcl */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcu */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ TCGv_i64 notu = tcg_temp_new_i64();
+ tcg_gen_not_i64(notu, cpu_bndu[reg]);
+ gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
+ tcg_temp_free_i64(notu);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- from reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
+ tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ } else if (mod != 3) {
+ /* bndldx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(cpu_A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
+ tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
+ offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
+ } else {
+ gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
+ tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
+ }
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x11b:
+ modrm = cpu_ldub_code(env, s->pc++);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (mod != 3 && (prefixes & PREFIX_REPZ)) {
+ /* bndmk */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (a.base >= 0) {
+ tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
+ }
+ } else if (a.base == -1) {
+ /* no base register has lower bound of 0 */
+ tcg_gen_movi_i64(cpu_bndl[reg], 0);
+ } else {
+ /* rip-relative generates #ud */
+ goto illegal_op;
+ }
+ tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
+ tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ break;
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcn */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- to reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
+ tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ }
+ } else if (mod != 3) {
+ /* bndstx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(cpu_A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ } else {
+ gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ }
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
+ modrm = cpu_ldub_code(env, s->pc++);
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x120: /* mov reg, crN */
+ case 0x122: /* mov crN, reg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ modrm = cpu_ldub_code(env, s->pc++);
+ /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of
+ * intel 386 and 486 processors all show that the mod bits
+ * are assumed to be 1's, regardless of actual values.
+ */
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (CODE64(s))
+ ot = MO_64;
+ else
+ ot = MO_32;
+ if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
+ (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
+ reg = 8;
+ }
+ switch(reg) {
+ case 0:
+ case 2:
+ case 3:
+ case 4:
+ case 8:
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ if (b & 2) {
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
+ cpu_T0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ }
+ break;
+ default:
+ goto unknown_op;
+ }
+ }
+ break;
+ case 0x121: /* mov reg, drN */
+ case 0x123: /* mov drN, reg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ modrm = cpu_ldub_code(env, s->pc++);
+ /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
+ * AMD documentation (24594.pdf) and testing of
+ * intel 386 and 486 processors all show that the mod bits
+ * are assumed to be 1's, regardless of actual values.
+ */
+ rm = (modrm & 7) | REX_B(s);
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (CODE64(s))
+ ot = MO_64;
+ else
+ ot = MO_32;
+ if (reg >= 8) {
+ goto illegal_op;
+ }
+ if (b & 2) {
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
+ gen_op_mov_v_reg(ot, cpu_T0, rm);
+ tcg_gen_movi_i32(cpu_tmp2_i32, reg);
+ gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ } else {
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
+ tcg_gen_movi_i32(cpu_tmp2_i32, reg);
+ gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
+ gen_op_mov_reg_v(ot, rm, cpu_T0);
+ }
+ }
+ break;
+ case 0x106: /* clts */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ } else {
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+ gen_helper_clts(cpu_env);
+ /* abort block because static cpu state changed */
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ }
+ break;
+ /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
+ case 0x1c3: /* MOVNTI reg, mem */
+ if (!(s->cpuid_features & CPUID_SSE2))
+ goto illegal_op;
+ ot = mo_64_32(dflag);
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ /* generate a generic store */
+ gen_ldst_modrm(env, s, modrm, ot, reg, 1);
+ break;
+ case 0x1ae:
+ modrm = cpu_ldub_code(env, s->pc++);
+ switch (modrm) {
+ CASE_MODRM_MEM_OP(0): /* fxsave */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_fxsave(cpu_env, cpu_A0);
+ break;
+
+ CASE_MODRM_MEM_OP(1): /* fxrstor */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_fxrstor(cpu_env, cpu_A0);
+ break;
+
+ CASE_MODRM_MEM_OP(2): /* ldmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
+ gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
+ break;
+
+ CASE_MODRM_MEM_OP(3): /* stmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+ break;
+
+ CASE_MODRM_MEM_OP(4): /* xsave */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
+ break;
+
+ CASE_MODRM_MEM_OP(5): /* xrstor */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
+ /* XRSTOR is how MPX is enabled, which changes how
+ we translate. Thus we need to end the TB. */
+ gen_update_cc_op(s);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clwb */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
+ goto illegal_op;
+ }
+ gen_nop_modrm(env, s, modrm);
+ } else {
+ /* xsaveopt */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
+ || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
+ }
+ break;
+
+ CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clflushopt */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
+ goto illegal_op;
+ }
+ } else {
+ /* clflush */
+ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
+ || !(s->cpuid_features & CPUID_CLFLUSH)) {
+ goto illegal_op;
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+
+ case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
+ case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
+ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
+ case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
+ if (CODE64(s)
+ && (prefixes & PREFIX_REPZ)
+ && !(prefixes & PREFIX_LOCK)
+ && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
+ TCGv base, treg, src, dst;
+
+ /* Preserve hflags bits by testing CR4 at runtime. */
+ tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
+ gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
+
+ base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
+ treg = cpu_regs[(modrm & 7) | REX_B(s)];
+
+ if (modrm & 0x10) {
+ /* wr*base */
+ dst = base, src = treg;
+ } else {
+ /* rd*base */
+ dst = treg, src = base;
+ }
+
+ if (s->dflag == MO_32) {
+ tcg_gen_ext32u_tl(dst, src);
+ } else {
+ tcg_gen_mov_tl(dst, src);
+ }
+ break;
+ }
+ goto unknown_op;
+
+ case 0xf8: /* sfence / pcommit */
+ if (prefixes & PREFIX_DATA) {
+ /* pcommit */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ break;
+ }
+ /* fallthru */
+ case 0xf9 ... 0xff: /* sfence */
+ if (!(s->cpuid_features & CPUID_SSE)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
+ break;
+ case 0xe8 ... 0xef: /* lfence */
+ if (!(s->cpuid_features & CPUID_SSE)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
+ break;
+ case 0xf0 ... 0xf7: /* mfence */
+ if (!(s->cpuid_features & CPUID_SSE2)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
+ break;
+
+ default:
+ goto unknown_op;
+ }
+ break;
+
+ case 0x10d: /* 3DNow! prefetch(w) */
+ modrm = cpu_ldub_code(env, s->pc++);
+ mod = (modrm >> 6) & 3;
+ if (mod == 3)
+ goto illegal_op;
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x1aa: /* rsm */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
+ if (!(s->flags & HF_SMM_MASK))
+ goto illegal_op;
+ gen_update_cc_op(s);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_helper_rsm(cpu_env);
+ gen_eob(s);
+ break;
+ case 0x1b8: /* SSE4.2 popcnt */
+ if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
+ PREFIX_REPZ)
+ goto illegal_op;
+ if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
+ goto illegal_op;
+
+ modrm = cpu_ldub_code(env, s->pc++);
+ reg = ((modrm >> 3) & 7) | rex_r;
+
+ if (s->prefix & PREFIX_DATA) {
+ ot = MO_16;
+ } else {
+ ot = mo_64_32(dflag);
+ }
+
+ gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
+ gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
+ gen_op_mov_reg_v(ot, reg, cpu_T0);
+
+ set_cc_op(s, CC_OP_EFLAGS);
+ break;
+ case 0x10e ... 0x10f:
+ /* 3DNow! instructions, ignore prefixes */
+ s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
+ case 0x110 ... 0x117:
+ case 0x128 ... 0x12f:
+ case 0x138 ... 0x13a:
+ case 0x150 ... 0x179:
+ case 0x17c ... 0x17f:
+ case 0x1c2:
+ case 0x1c4 ... 0x1c6:
+ case 0x1d0 ... 0x1fe:
+ gen_sse(env, s, b, pc_start, rex_r);
+ break;
+ default:
+ goto unknown_op;
+ }
+ return s->pc;
+ illegal_op:
+ gen_illegal_opcode(s);
+ return s->pc;
+ unknown_op:
+ gen_unknown_opcode(env, s);
+ return s->pc;
+}
+
+void tcg_x86_init(void)
+{
+ static const char reg_names[CPU_NB_REGS][4] = {
+#ifdef TARGET_X86_64
+ [R_EAX] = "rax",
+ [R_EBX] = "rbx",
+ [R_ECX] = "rcx",
+ [R_EDX] = "rdx",
+ [R_ESI] = "rsi",
+ [R_EDI] = "rdi",
+ [R_EBP] = "rbp",
+ [R_ESP] = "rsp",
+ [8] = "r8",
+ [9] = "r9",
+ [10] = "r10",
+ [11] = "r11",
+ [12] = "r12",
+ [13] = "r13",
+ [14] = "r14",
+ [15] = "r15",
+#else
+ [R_EAX] = "eax",
+ [R_EBX] = "ebx",
+ [R_ECX] = "ecx",
+ [R_EDX] = "edx",
+ [R_ESI] = "esi",
+ [R_EDI] = "edi",
+ [R_EBP] = "ebp",
+ [R_ESP] = "esp",
+#endif
+ };
+ static const char seg_base_names[6][8] = {
+ [R_CS] = "cs_base",
+ [R_DS] = "ds_base",
+ [R_ES] = "es_base",
+ [R_FS] = "fs_base",
+ [R_GS] = "gs_base",
+ [R_SS] = "ss_base",
+ };
+ static const char bnd_regl_names[4][8] = {
+ "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
+ };
+ static const char bnd_regu_names[4][8] = {
+ "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
+ };
+ int i;
+ static bool initialized;
+
+ if (initialized) {
+ return;
+ }
+ initialized = true;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUX86State, cc_op), "cc_op");
+ cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
+ "cc_dst");
+ cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
+ "cc_src");
+ cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
+ "cc_src2");
+
+ for (i = 0; i < CPU_NB_REGS; ++i) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUX86State, regs[i]),
+ reg_names[i]);
+ }
+
+ for (i = 0; i < 6; ++i) {
+ cpu_seg_base[i]
+ = tcg_global_mem_new(cpu_env,
+ offsetof(CPUX86State, segs[i].base),
+ seg_base_names[i]);
+ }
+
+ for (i = 0; i < 4; ++i) {
+ cpu_bndl[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].lb),
+ bnd_regl_names[i]);
+ cpu_bndu[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].ub),
+ bnd_regu_names[i]);
+ }
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
+{
+ X86CPU *cpu = x86_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc1, *dc = &dc1;
+ target_ulong pc_ptr;
+ uint32_t flags;
+ target_ulong pc_start;
+ target_ulong cs_base;
+ int num_insns;
+ int max_insns;
+
+ /* generate intermediate code */
+ pc_start = tb->pc;
+ cs_base = tb->cs_base;
+ flags = tb->flags;
+
+ dc->pe = (flags >> HF_PE_SHIFT) & 1;
+ dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
+ dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
+ dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
+ dc->f_st = 0;
+ dc->vm86 = (flags >> VM_SHIFT) & 1;
+ dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
+ dc->iopl = (flags >> IOPL_SHIFT) & 3;
+ dc->tf = (flags >> TF_SHIFT) & 1;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->cc_op_dirty = false;
+ dc->cs_base = cs_base;
+ dc->tb = tb;
+ dc->popl_esp_hack = 0;
+ /* select memory access functions */
+ dc->mem_index = 0;
+#ifdef CONFIG_SOFTMMU
+ dc->mem_index = cpu_mmu_index(env, false);
+#endif
+ dc->cpuid_features = env->features[FEAT_1_EDX];
+ dc->cpuid_ext_features = env->features[FEAT_1_ECX];
+ dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
+ dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
+ dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
+ dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
+#ifdef TARGET_X86_64
+ dc->lma = (flags >> HF_LMA_SHIFT) & 1;
+ dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
+#endif
+ dc->flags = flags;
+ dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
+ (flags & HF_INHIBIT_IRQ_MASK));
+ /* Do not optimize repz jumps at all in icount mode, because
+ rep movsS instructions are execured with different paths
+ in !repz_opt and repz_opt modes. The first one was used
+ always except single step mode. And this setting
+ disables jumps optimization and control paths become
+ equivalent in run and single step modes.
+ Now there will be no jump optimization for repz in
+ record/replay modes and there will always be an
+ additional step for ecx=0 when icount is enabled.
+ */
+ dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
+#if 0
+ /* check addseg logic */
+ if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
+ printf("ERROR addseg\n");
+#endif
+
+ cpu_T0 = tcg_temp_new();
+ cpu_T1 = tcg_temp_new();
+ cpu_A0 = tcg_temp_new();
+
+ cpu_tmp0 = tcg_temp_new();
+ cpu_tmp1_i64 = tcg_temp_new_i64();
+ cpu_tmp2_i32 = tcg_temp_new_i32();
+ cpu_tmp3_i32 = tcg_temp_new_i32();
+ cpu_tmp4 = tcg_temp_new();
+ cpu_ptr0 = tcg_temp_new_ptr();
+ cpu_ptr1 = tcg_temp_new_ptr();
+ cpu_cc_srcT = tcg_temp_local_new();
+
+ dc->is_jmp = DISAS_NEXT;
+ pc_ptr = pc_start;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ for(;;) {
+ tcg_gen_insn_start(pc_ptr, dc->cc_op);
+ num_insns++;
+
+ /* If RF is set, suppress an internally generated breakpoint. */
+ if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
+ tb->flags & HF_RF_MASK
+ ? BP_GDB : BP_ANY))) {
+ gen_debug(dc, pc_ptr - dc->cs_base);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ pc_ptr += 1;
+ goto done_generating;
+ }
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ pc_ptr = disas_insn(env, dc, pc_ptr);
+ /* stop translation if indicated */
+ if (dc->is_jmp)
+ break;
+ /* if single step mode, we generate only one instruction and
+ generate an exception */
+ /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
+ the flag and abort the translation to give the irqs a
+ change to be happen */
+ if (dc->tf || dc->singlestep_enabled ||
+ (flags & HF_INHIBIT_IRQ_MASK)) {
+ gen_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
+ /* Do not cross the boundary of the pages in icount mode,
+ it can cause an exception. Do it only when boundary is
+ crossed by the first instruction in the block.
+ If current instruction already crossed the bound - it's ok,
+ because an exception hasn't stopped this code.
+ */
+ if ((tb->cflags & CF_USE_ICOUNT)
+ && ((pc_ptr & TARGET_PAGE_MASK)
+ != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
+ || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
+ gen_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
+ /* if too long translation, stop generation too */
+ if (tcg_op_buf_full() ||
+ (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
+ num_insns >= max_insns) {
+ gen_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
+ if (singlestep) {
+ gen_jmp_im(pc_ptr - dc->cs_base);
+ gen_eob(dc);
+ break;
+ }
+ }
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+done_generating:
+ gen_tb_end(tb, num_insns);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ int disas_flags;
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+#ifdef TARGET_X86_64
+ if (dc->code64)
+ disas_flags = 2;
+ else
+#endif
+ disas_flags = !dc->code32;
+ log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+
+ tb->size = pc_ptr - pc_start;
+ tb->icount = num_insns;
+}
+
+void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ int cc_op = data[1];
+ env->eip = data[0] - tb->cs_base;
+ if (cc_op != CC_OP_DYNAMIC) {
+ env->cc_op = cc_op;
+ }
+}
diff --git a/target/lm32/Makefile.objs b/target/lm32/Makefile.objs
new file mode 100644
index 0000000000..c3e1bd6bd6
--- /dev/null
+++ b/target/lm32/Makefile.objs
@@ -0,0 +1,4 @@
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
+obj-y += lm32-semi.o
+obj-$(CONFIG_SOFTMMU) += machine.o
diff --git a/target/lm32/README b/target/lm32/README
new file mode 100644
index 0000000000..ba3508a711
--- /dev/null
+++ b/target/lm32/README
@@ -0,0 +1,45 @@
+LatticeMico32 target
+--------------------
+
+General
+-------
+All opcodes including the JUART CSRs are supported.
+
+
+JTAG UART
+---------
+JTAG UART is routed to a serial console device. For the current boards it
+is the second one. Ie to enable it in the qemu virtual console window use
+the following command line parameters:
+ -serial vc -serial vc
+This will make serial0 (the lm32_uart) and serial1 (the JTAG UART)
+available as virtual consoles.
+
+
+Semihosting
+-----------
+Semihosting on this target is supported. Some system calls like read, write
+and exit are executed on the host if semihosting is enabled. See
+target/lm32-semi.c for all supported system calls. Emulation aware programs
+can use this mechanism to shut down the virtual machine and print to the
+host console. See the tcg tests for an example.
+
+
+Special instructions
+--------------------
+The translation recognizes one special instruction to halt the cpu:
+ and r0, r0, r0
+On real hardware this instruction is a nop. It is not used by GCC and
+should (hopefully) not be used within hand-crafted assembly.
+Insert this instruction in your idle loop to reduce the cpu load on the
+host.
+
+
+Ignoring the MSB of the address bus
+-----------------------------------
+Some SoC ignores the MSB on the address bus. Thus creating a shadow memory
+area. As a general rule, 0x00000000-0x7fffffff is cached, whereas
+0x80000000-0xffffffff is not cached and used to access IO devices. This
+behaviour can be enabled with:
+ cpu_lm32_set_phys_msb_ignore(env, 1);
+
diff --git a/target/lm32/TODO b/target/lm32/TODO
new file mode 100644
index 0000000000..e163c42ebe
--- /dev/null
+++ b/target/lm32/TODO
@@ -0,0 +1 @@
+* linux-user emulation
diff --git a/target/lm32/cpu-qom.h b/target/lm32/cpu-qom.h
new file mode 100644
index 0000000000..b423d2564b
--- /dev/null
+++ b/target/lm32/cpu-qom.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU LatticeMico32 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_LM32_CPU_QOM_H
+#define QEMU_LM32_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_LM32_CPU "lm32-cpu"
+
+#define LM32_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(LM32CPUClass, (klass), TYPE_LM32_CPU)
+#define LM32_CPU(obj) \
+ OBJECT_CHECK(LM32CPU, (obj), TYPE_LM32_CPU)
+#define LM32_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(LM32CPUClass, (obj), TYPE_LM32_CPU)
+
+/**
+ * LM32CPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A LatticeMico32 CPU model.
+ */
+typedef struct LM32CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} LM32CPUClass;
+
+typedef struct LM32CPU LM32CPU;
+
+#endif
diff --git a/target/lm32/cpu.c b/target/lm32/cpu.c
new file mode 100644
index 0000000000..8d939a7779
--- /dev/null
+++ b/target/lm32/cpu.c
@@ -0,0 +1,328 @@
+/*
+ * QEMU LatticeMico32 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "exec/exec-all.h"
+
+
+static void lm32_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+/* Sort alphabetically by type name. */
+static gint lm32_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void lm32_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+ const char *typename = object_class_get_name(oc);
+ char *name;
+
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_LM32_CPU));
+ (*s->cpu_fprintf)(s->file, " %s\n", name);
+ g_free(name);
+}
+
+
+void lm32_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_LM32_CPU, false);
+ list = g_slist_sort(list, lm32_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, lm32_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+static void lm32_cpu_init_cfg_reg(LM32CPU *cpu)
+{
+ CPULM32State *env = &cpu->env;
+ uint32_t cfg = 0;
+
+ if (cpu->features & LM32_FEATURE_MULTIPLY) {
+ cfg |= CFG_M;
+ }
+
+ if (cpu->features & LM32_FEATURE_DIVIDE) {
+ cfg |= CFG_D;
+ }
+
+ if (cpu->features & LM32_FEATURE_SHIFT) {
+ cfg |= CFG_S;
+ }
+
+ if (cpu->features & LM32_FEATURE_SIGN_EXTEND) {
+ cfg |= CFG_X;
+ }
+
+ if (cpu->features & LM32_FEATURE_I_CACHE) {
+ cfg |= CFG_IC;
+ }
+
+ if (cpu->features & LM32_FEATURE_D_CACHE) {
+ cfg |= CFG_DC;
+ }
+
+ if (cpu->features & LM32_FEATURE_CYCLE_COUNT) {
+ cfg |= CFG_CC;
+ }
+
+ cfg |= (cpu->num_interrupts << CFG_INT_SHIFT);
+ cfg |= (cpu->num_breakpoints << CFG_BP_SHIFT);
+ cfg |= (cpu->num_watchpoints << CFG_WP_SHIFT);
+ cfg |= (cpu->revision << CFG_REV_SHIFT);
+
+ env->cfg = cfg;
+}
+
+static bool lm32_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+/* CPUClass::reset() */
+static void lm32_cpu_reset(CPUState *s)
+{
+ LM32CPU *cpu = LM32_CPU(s);
+ LM32CPUClass *lcc = LM32_CPU_GET_CLASS(cpu);
+ CPULM32State *env = &cpu->env;
+
+ lcc->parent_reset(s);
+
+ /* reset cpu state */
+ memset(env, 0, offsetof(CPULM32State, eba));
+
+ lm32_cpu_init_cfg_reg(cpu);
+ tlb_flush(s, 1);
+}
+
+static void lm32_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_lm32;
+ info->print_insn = print_insn_lm32;
+}
+
+static void lm32_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ LM32CPUClass *lcc = LM32_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+
+ qemu_init_vcpu(cs);
+
+ lcc->parent_realize(dev, errp);
+}
+
+static void lm32_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ LM32CPU *cpu = LM32_CPU(obj);
+ CPULM32State *env = &cpu->env;
+ static bool tcg_initialized;
+
+ cs->env_ptr = env;
+
+ env->flags = 0;
+
+ if (tcg_enabled() && !tcg_initialized) {
+ tcg_initialized = true;
+ lm32_translate_init();
+ }
+}
+
+static void lm32_basic_cpu_initfn(Object *obj)
+{
+ LM32CPU *cpu = LM32_CPU(obj);
+
+ cpu->revision = 3;
+ cpu->num_interrupts = 32;
+ cpu->num_breakpoints = 4;
+ cpu->num_watchpoints = 4;
+ cpu->features = LM32_FEATURE_SHIFT
+ | LM32_FEATURE_SIGN_EXTEND
+ | LM32_FEATURE_CYCLE_COUNT;
+}
+
+static void lm32_standard_cpu_initfn(Object *obj)
+{
+ LM32CPU *cpu = LM32_CPU(obj);
+
+ cpu->revision = 3;
+ cpu->num_interrupts = 32;
+ cpu->num_breakpoints = 4;
+ cpu->num_watchpoints = 4;
+ cpu->features = LM32_FEATURE_MULTIPLY
+ | LM32_FEATURE_DIVIDE
+ | LM32_FEATURE_SHIFT
+ | LM32_FEATURE_SIGN_EXTEND
+ | LM32_FEATURE_I_CACHE
+ | LM32_FEATURE_CYCLE_COUNT;
+}
+
+static void lm32_full_cpu_initfn(Object *obj)
+{
+ LM32CPU *cpu = LM32_CPU(obj);
+
+ cpu->revision = 3;
+ cpu->num_interrupts = 32;
+ cpu->num_breakpoints = 4;
+ cpu->num_watchpoints = 4;
+ cpu->features = LM32_FEATURE_MULTIPLY
+ | LM32_FEATURE_DIVIDE
+ | LM32_FEATURE_SHIFT
+ | LM32_FEATURE_SIGN_EXTEND
+ | LM32_FEATURE_I_CACHE
+ | LM32_FEATURE_D_CACHE
+ | LM32_FEATURE_CYCLE_COUNT;
+}
+
+typedef struct LM32CPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+} LM32CPUInfo;
+
+static const LM32CPUInfo lm32_cpus[] = {
+ {
+ .name = "lm32-basic",
+ .initfn = lm32_basic_cpu_initfn,
+ },
+ {
+ .name = "lm32-standard",
+ .initfn = lm32_standard_cpu_initfn,
+ },
+ {
+ .name = "lm32-full",
+ .initfn = lm32_full_cpu_initfn,
+ },
+};
+
+static ObjectClass *lm32_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_LM32_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_LM32_CPU) ||
+ object_class_is_abstract(oc))) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+static void lm32_cpu_class_init(ObjectClass *oc, void *data)
+{
+ LM32CPUClass *lcc = LM32_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ lcc->parent_realize = dc->realize;
+ dc->realize = lm32_cpu_realizefn;
+
+ lcc->parent_reset = cc->reset;
+ cc->reset = lm32_cpu_reset;
+
+ cc->class_by_name = lm32_cpu_class_by_name;
+ cc->has_work = lm32_cpu_has_work;
+ cc->do_interrupt = lm32_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = lm32_cpu_exec_interrupt;
+ cc->dump_state = lm32_cpu_dump_state;
+ cc->set_pc = lm32_cpu_set_pc;
+ cc->gdb_read_register = lm32_cpu_gdb_read_register;
+ cc->gdb_write_register = lm32_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = lm32_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = lm32_cpu_get_phys_page_debug;
+ cc->vmsd = &vmstate_lm32_cpu;
+#endif
+ cc->gdb_num_core_regs = 32 + 7;
+ cc->gdb_stop_before_watchpoint = true;
+ cc->debug_excp_handler = lm32_debug_excp_handler;
+ cc->disas_set_info = lm32_cpu_disas_set_info;
+}
+
+static void lm32_register_cpu_type(const LM32CPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_LM32_CPU,
+ .instance_init = info->initfn,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_LM32_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo lm32_cpu_type_info = {
+ .name = TYPE_LM32_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(LM32CPU),
+ .instance_init = lm32_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(LM32CPUClass),
+ .class_init = lm32_cpu_class_init,
+};
+
+static void lm32_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&lm32_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(lm32_cpus); i++) {
+ lm32_register_cpu_type(&lm32_cpus[i]);
+ }
+}
+
+type_init(lm32_cpu_register_types)
diff --git a/target/lm32/cpu.h b/target/lm32/cpu.h
new file mode 100644
index 0000000000..d8a3515244
--- /dev/null
+++ b/target/lm32/cpu.h
@@ -0,0 +1,274 @@
+/*
+ * LatticeMico32 virtual CPU header.
+ *
+ * Copyright (c) 2010 Michael Walle <michael@walle.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LM32_CPU_H
+#define LM32_CPU_H
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPULM32State
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+struct CPULM32State;
+typedef struct CPULM32State CPULM32State;
+
+#define NB_MMU_MODES 1
+#define TARGET_PAGE_BITS 12
+static inline int cpu_mmu_index(CPULM32State *env, bool ifetch)
+{
+ return 0;
+}
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+/* Exceptions indices */
+enum {
+ EXCP_RESET = 0,
+ EXCP_BREAKPOINT,
+ EXCP_INSN_BUS_ERROR,
+ EXCP_WATCHPOINT,
+ EXCP_DATA_BUS_ERROR,
+ EXCP_DIVIDE_BY_ZERO,
+ EXCP_IRQ,
+ EXCP_SYSTEMCALL
+};
+
+/* Registers */
+enum {
+ R_R0 = 0, R_R1, R_R2, R_R3, R_R4, R_R5, R_R6, R_R7, R_R8, R_R9, R_R10,
+ R_R11, R_R12, R_R13, R_R14, R_R15, R_R16, R_R17, R_R18, R_R19, R_R20,
+ R_R21, R_R22, R_R23, R_R24, R_R25, R_R26, R_R27, R_R28, R_R29, R_R30,
+ R_R31
+};
+
+/* Register aliases */
+enum {
+ R_GP = R_R26,
+ R_FP = R_R27,
+ R_SP = R_R28,
+ R_RA = R_R29,
+ R_EA = R_R30,
+ R_BA = R_R31
+};
+
+/* IE flags */
+enum {
+ IE_IE = (1<<0),
+ IE_EIE = (1<<1),
+ IE_BIE = (1<<2),
+};
+
+/* DC flags */
+enum {
+ DC_SS = (1<<0),
+ DC_RE = (1<<1),
+ DC_C0 = (1<<2),
+ DC_C1 = (1<<3),
+ DC_C2 = (1<<4),
+ DC_C3 = (1<<5),
+};
+
+/* CFG mask */
+enum {
+ CFG_M = (1<<0),
+ CFG_D = (1<<1),
+ CFG_S = (1<<2),
+ CFG_U = (1<<3),
+ CFG_X = (1<<4),
+ CFG_CC = (1<<5),
+ CFG_IC = (1<<6),
+ CFG_DC = (1<<7),
+ CFG_G = (1<<8),
+ CFG_H = (1<<9),
+ CFG_R = (1<<10),
+ CFG_J = (1<<11),
+ CFG_INT_SHIFT = 12,
+ CFG_BP_SHIFT = 18,
+ CFG_WP_SHIFT = 22,
+ CFG_REV_SHIFT = 26,
+};
+
+/* CSRs */
+enum {
+ CSR_IE = 0x00,
+ CSR_IM = 0x01,
+ CSR_IP = 0x02,
+ CSR_ICC = 0x03,
+ CSR_DCC = 0x04,
+ CSR_CC = 0x05,
+ CSR_CFG = 0x06,
+ CSR_EBA = 0x07,
+ CSR_DC = 0x08,
+ CSR_DEBA = 0x09,
+ CSR_JTX = 0x0e,
+ CSR_JRX = 0x0f,
+ CSR_BP0 = 0x10,
+ CSR_BP1 = 0x11,
+ CSR_BP2 = 0x12,
+ CSR_BP3 = 0x13,
+ CSR_WP0 = 0x18,
+ CSR_WP1 = 0x19,
+ CSR_WP2 = 0x1a,
+ CSR_WP3 = 0x1b,
+};
+
+enum {
+ LM32_FEATURE_MULTIPLY = 1,
+ LM32_FEATURE_DIVIDE = 2,
+ LM32_FEATURE_SHIFT = 4,
+ LM32_FEATURE_SIGN_EXTEND = 8,
+ LM32_FEATURE_I_CACHE = 16,
+ LM32_FEATURE_D_CACHE = 32,
+ LM32_FEATURE_CYCLE_COUNT = 64,
+};
+
+enum {
+ LM32_FLAG_IGNORE_MSB = 1,
+};
+
+struct CPULM32State {
+ /* general registers */
+ uint32_t regs[32];
+
+ /* special registers */
+ uint32_t pc; /* program counter */
+ uint32_t ie; /* interrupt enable */
+ uint32_t icc; /* instruction cache control */
+ uint32_t dcc; /* data cache control */
+ uint32_t cc; /* cycle counter */
+ uint32_t cfg; /* configuration */
+
+ /* debug registers */
+ uint32_t dc; /* debug control */
+ uint32_t bp[4]; /* breakpoints */
+ uint32_t wp[4]; /* watchpoints */
+
+ struct CPUBreakpoint *cpu_breakpoint[4];
+ struct CPUWatchpoint *cpu_watchpoint[4];
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved across CPU reset. */
+ uint32_t eba; /* exception base address */
+ uint32_t deba; /* debug exception base address */
+
+ /* interrupt controller handle for callbacks */
+ DeviceState *pic_state;
+ /* JTAG UART handle for callbacks */
+ DeviceState *juart_state;
+
+ /* processor core features */
+ uint32_t flags;
+
+};
+
+/**
+ * LM32CPU:
+ * @env: #CPULM32State
+ *
+ * A LatticeMico32 CPU.
+ */
+struct LM32CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPULM32State env;
+
+ uint32_t revision;
+ uint8_t num_interrupts;
+ uint8_t num_breakpoints;
+ uint8_t num_watchpoints;
+ uint32_t features;
+};
+
+static inline LM32CPU *lm32_env_get_cpu(CPULM32State *env)
+{
+ return container_of(env, LM32CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(lm32_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(LM32CPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_lm32_cpu;
+#endif
+
+void lm32_cpu_do_interrupt(CPUState *cpu);
+bool lm32_cpu_exec_interrupt(CPUState *cs, int int_req);
+void lm32_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+hwaddr lm32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int lm32_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int lm32_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+typedef enum {
+ LM32_WP_DISABLED = 0,
+ LM32_WP_READ,
+ LM32_WP_WRITE,
+ LM32_WP_READ_WRITE,
+} lm32_wp_t;
+
+static inline lm32_wp_t lm32_wp_type(uint32_t dc, int idx)
+{
+ assert(idx < 4);
+ return (dc >> (idx+1)*2) & 0x3;
+}
+
+LM32CPU *cpu_lm32_init(const char *cpu_model);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_lm32_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+void lm32_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+void lm32_translate_init(void);
+void cpu_lm32_set_phys_msb_ignore(CPULM32State *env, int value);
+void QEMU_NORETURN raise_exception(CPULM32State *env, int index);
+void lm32_debug_excp_handler(CPUState *cs);
+void lm32_breakpoint_insert(CPULM32State *env, int index, target_ulong address);
+void lm32_breakpoint_remove(CPULM32State *env, int index);
+void lm32_watchpoint_insert(CPULM32State *env, int index, target_ulong address,
+ lm32_wp_t wp_type);
+void lm32_watchpoint_remove(CPULM32State *env, int index);
+bool lm32_cpu_do_semihosting(CPUState *cs);
+
+#define cpu_init(cpu_model) CPU(cpu_lm32_init(cpu_model))
+
+#define cpu_list lm32_cpu_list
+#define cpu_signal_handler cpu_lm32_signal_handler
+
+int lm32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPULM32State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = 0;
+}
+
+#endif
diff --git a/target/lm32/gdbstub.c b/target/lm32/gdbstub.c
new file mode 100644
index 0000000000..cf929dd392
--- /dev/null
+++ b/target/lm32/gdbstub.c
@@ -0,0 +1,93 @@
+/*
+ * LM32 gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "hw/lm32/lm32_pic.h"
+
+int lm32_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ } else {
+ switch (n) {
+ case 32:
+ return gdb_get_reg32(mem_buf, env->pc);
+ /* FIXME: put in right exception ID */
+ case 33:
+ return gdb_get_reg32(mem_buf, 0);
+ case 34:
+ return gdb_get_reg32(mem_buf, env->eba);
+ case 35:
+ return gdb_get_reg32(mem_buf, env->deba);
+ case 36:
+ return gdb_get_reg32(mem_buf, env->ie);
+ case 37:
+ return gdb_get_reg32(mem_buf, lm32_pic_get_im(env->pic_state));
+ case 38:
+ return gdb_get_reg32(mem_buf, lm32_pic_get_ip(env->pic_state));
+ }
+ }
+ return 0;
+}
+
+int lm32_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPULM32State *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 32) {
+ env->regs[n] = tmp;
+ } else {
+ switch (n) {
+ case 32:
+ env->pc = tmp;
+ break;
+ case 34:
+ env->eba = tmp;
+ break;
+ case 35:
+ env->deba = tmp;
+ break;
+ case 36:
+ env->ie = tmp;
+ break;
+ case 37:
+ lm32_pic_set_im(env->pic_state, tmp);
+ break;
+ case 38:
+ lm32_pic_set_ip(env->pic_state, tmp);
+ break;
+ }
+ }
+ return 4;
+}
diff --git a/target/lm32/helper.c b/target/lm32/helper.c
new file mode 100644
index 0000000000..891da18c30
--- /dev/null
+++ b/target/lm32/helper.c
@@ -0,0 +1,237 @@
+/*
+ * LatticeMico32 helper routines.
+ *
+ * Copyright (c) 2010-2014 Michael Walle <michael@walle.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "sysemu/sysemu.h"
+#include "exec/semihost.h"
+#include "exec/log.h"
+
+int lm32_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+ int prot;
+
+ address &= TARGET_PAGE_MASK;
+ prot = PAGE_BITS;
+ if (env->flags & LM32_FLAG_IGNORE_MSB) {
+ tlb_set_page(cs, address, address & 0x7fffffff, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
+ } else {
+ tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+hwaddr lm32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+
+ addr &= TARGET_PAGE_MASK;
+ if (cpu->env.flags & LM32_FLAG_IGNORE_MSB) {
+ return addr & 0x7fffffff;
+ } else {
+ return addr;
+ }
+}
+
+void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address)
+{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+
+ cpu_breakpoint_insert(CPU(cpu), address, BP_CPU,
+ &env->cpu_breakpoint[idx]);
+}
+
+void lm32_breakpoint_remove(CPULM32State *env, int idx)
+{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+
+ if (!env->cpu_breakpoint[idx]) {
+ return;
+ }
+
+ cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[idx]);
+ env->cpu_breakpoint[idx] = NULL;
+}
+
+void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address,
+ lm32_wp_t wp_type)
+{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+ int flags = 0;
+
+ switch (wp_type) {
+ case LM32_WP_DISABLED:
+ /* nothing to do */
+ break;
+ case LM32_WP_READ:
+ flags = BP_CPU | BP_STOP_BEFORE_ACCESS | BP_MEM_READ;
+ break;
+ case LM32_WP_WRITE:
+ flags = BP_CPU | BP_STOP_BEFORE_ACCESS | BP_MEM_WRITE;
+ break;
+ case LM32_WP_READ_WRITE:
+ flags = BP_CPU | BP_STOP_BEFORE_ACCESS | BP_MEM_ACCESS;
+ break;
+ }
+
+ if (flags != 0) {
+ cpu_watchpoint_insert(CPU(cpu), address, 1, flags,
+ &env->cpu_watchpoint[idx]);
+ }
+}
+
+void lm32_watchpoint_remove(CPULM32State *env, int idx)
+{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+
+ if (!env->cpu_watchpoint[idx]) {
+ return;
+ }
+
+ cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[idx]);
+ env->cpu_watchpoint[idx] = NULL;
+}
+
+static bool check_watchpoints(CPULM32State *env)
+{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+ int i;
+
+ for (i = 0; i < cpu->num_watchpoints; i++) {
+ if (env->cpu_watchpoint[i] &&
+ env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void lm32_debug_excp_handler(CPUState *cs)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+ CPUBreakpoint *bp;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ cs->watchpoint_hit = NULL;
+ if (check_watchpoints(env)) {
+ raise_exception(env, EXCP_WATCHPOINT);
+ } else {
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+ } else {
+ QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
+ if (bp->pc == env->pc) {
+ if (bp->flags & BP_CPU) {
+ raise_exception(env, EXCP_BREAKPOINT);
+ }
+ break;
+ }
+ }
+ }
+}
+
+void lm32_cpu_do_interrupt(CPUState *cs)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "exception at pc=%x type=%x\n", env->pc, cs->exception_index);
+
+ switch (cs->exception_index) {
+ case EXCP_SYSTEMCALL:
+ if (unlikely(semihosting_enabled())) {
+ /* do_semicall() returns true if call was handled. Otherwise
+ * do the normal exception handling. */
+ if (lm32_cpu_do_semihosting(cs)) {
+ env->pc += 4;
+ break;
+ }
+ }
+ /* fall through */
+ case EXCP_INSN_BUS_ERROR:
+ case EXCP_DATA_BUS_ERROR:
+ case EXCP_DIVIDE_BY_ZERO:
+ case EXCP_IRQ:
+ /* non-debug exceptions */
+ env->regs[R_EA] = env->pc;
+ env->ie |= (env->ie & IE_IE) ? IE_EIE : 0;
+ env->ie &= ~IE_IE;
+ if (env->dc & DC_RE) {
+ env->pc = env->deba + (cs->exception_index * 32);
+ } else {
+ env->pc = env->eba + (cs->exception_index * 32);
+ }
+ log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ break;
+ case EXCP_BREAKPOINT:
+ case EXCP_WATCHPOINT:
+ /* debug exceptions */
+ env->regs[R_BA] = env->pc;
+ env->ie |= (env->ie & IE_IE) ? IE_BIE : 0;
+ env->ie &= ~IE_IE;
+ env->pc = env->deba + (cs->exception_index * 32);
+ log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ break;
+ default:
+ cpu_abort(cs, "unhandled exception type=%d\n",
+ cs->exception_index);
+ break;
+ }
+}
+
+bool lm32_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->ie & IE_IE)) {
+ cs->exception_index = EXCP_IRQ;
+ lm32_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+LM32CPU *cpu_lm32_init(const char *cpu_model)
+{
+ return LM32_CPU(cpu_generic_init(TYPE_LM32_CPU, cpu_model));
+}
+
+/* Some soc ignores the MSB on the address bus. Thus creating a shadow memory
+ * area. As a general rule, 0x00000000-0x7fffffff is cached, whereas
+ * 0x80000000-0xffffffff is not cached and used to access IO devices. */
+void cpu_lm32_set_phys_msb_ignore(CPULM32State *env, int value)
+{
+ if (value) {
+ env->flags |= LM32_FLAG_IGNORE_MSB;
+ } else {
+ env->flags &= ~LM32_FLAG_IGNORE_MSB;
+ }
+}
diff --git a/target/lm32/helper.h b/target/lm32/helper.h
new file mode 100644
index 0000000000..445578c439
--- /dev/null
+++ b/target/lm32/helper.h
@@ -0,0 +1,14 @@
+DEF_HELPER_2(raise_exception, void, env, i32)
+DEF_HELPER_1(hlt, void, env)
+DEF_HELPER_3(wcsr_bp, void, env, i32, i32)
+DEF_HELPER_3(wcsr_wp, void, env, i32, i32)
+DEF_HELPER_2(wcsr_dc, void, env, i32)
+DEF_HELPER_2(wcsr_im, void, env, i32)
+DEF_HELPER_2(wcsr_ip, void, env, i32)
+DEF_HELPER_2(wcsr_jtx, void, env, i32)
+DEF_HELPER_2(wcsr_jrx, void, env, i32)
+DEF_HELPER_1(rcsr_im, i32, env)
+DEF_HELPER_1(rcsr_ip, i32, env)
+DEF_HELPER_1(rcsr_jtx, i32, env)
+DEF_HELPER_1(rcsr_jrx, i32, env)
+DEF_HELPER_1(ill, void, env)
diff --git a/target/lm32/lm32-semi.c b/target/lm32/lm32-semi.c
new file mode 100644
index 0000000000..6a11a6299a
--- /dev/null
+++ b/target/lm32/lm32-semi.c
@@ -0,0 +1,212 @@
+/*
+ * Lattice Mico32 semihosting syscall interface
+ *
+ * Copyright (c) 2014 Michael Walle <michael@walle.cc>
+ *
+ * Based on target/m68k/m68k-semi.c, which is
+ * Copyright (c) 2005-2007 CodeSourcery.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/log.h"
+#include "exec/softmmu-semi.h"
+
+enum {
+ TARGET_SYS_exit = 1,
+ TARGET_SYS_open = 2,
+ TARGET_SYS_close = 3,
+ TARGET_SYS_read = 4,
+ TARGET_SYS_write = 5,
+ TARGET_SYS_lseek = 6,
+ TARGET_SYS_fstat = 10,
+ TARGET_SYS_stat = 15,
+};
+
+enum {
+ NEWLIB_O_RDONLY = 0x0,
+ NEWLIB_O_WRONLY = 0x1,
+ NEWLIB_O_RDWR = 0x2,
+ NEWLIB_O_APPEND = 0x8,
+ NEWLIB_O_CREAT = 0x200,
+ NEWLIB_O_TRUNC = 0x400,
+ NEWLIB_O_EXCL = 0x800,
+};
+
+static int translate_openflags(int flags)
+{
+ int hf;
+
+ if (flags & NEWLIB_O_WRONLY) {
+ hf = O_WRONLY;
+ } else if (flags & NEWLIB_O_RDWR) {
+ hf = O_RDWR;
+ } else {
+ hf = O_RDONLY;
+ }
+
+ if (flags & NEWLIB_O_APPEND) {
+ hf |= O_APPEND;
+ }
+
+ if (flags & NEWLIB_O_CREAT) {
+ hf |= O_CREAT;
+ }
+
+ if (flags & NEWLIB_O_TRUNC) {
+ hf |= O_TRUNC;
+ }
+
+ if (flags & NEWLIB_O_EXCL) {
+ hf |= O_EXCL;
+ }
+
+ return hf;
+}
+
+struct newlib_stat {
+ int16_t newlib_st_dev; /* device */
+ uint16_t newlib_st_ino; /* inode */
+ uint16_t newlib_st_mode; /* protection */
+ uint16_t newlib_st_nlink; /* number of hard links */
+ uint16_t newlib_st_uid; /* user ID of owner */
+ uint16_t newlib_st_gid; /* group ID of owner */
+ int16_t newlib_st_rdev; /* device type (if inode device) */
+ int32_t newlib_st_size; /* total size, in bytes */
+ int32_t newlib_st_atime; /* time of last access */
+ uint32_t newlib_st_spare1;
+ int32_t newlib_st_mtime; /* time of last modification */
+ uint32_t newlib_st_spare2;
+ int32_t newlib_st_ctime; /* time of last change */
+ uint32_t newlib_st_spare3;
+} QEMU_PACKED;
+
+static int translate_stat(CPULM32State *env, target_ulong addr,
+ struct stat *s)
+{
+ struct newlib_stat *p;
+
+ p = lock_user(VERIFY_WRITE, addr, sizeof(struct newlib_stat), 0);
+ if (!p) {
+ return 0;
+ }
+ p->newlib_st_dev = cpu_to_be16(s->st_dev);
+ p->newlib_st_ino = cpu_to_be16(s->st_ino);
+ p->newlib_st_mode = cpu_to_be16(s->st_mode);
+ p->newlib_st_nlink = cpu_to_be16(s->st_nlink);
+ p->newlib_st_uid = cpu_to_be16(s->st_uid);
+ p->newlib_st_gid = cpu_to_be16(s->st_gid);
+ p->newlib_st_rdev = cpu_to_be16(s->st_rdev);
+ p->newlib_st_size = cpu_to_be32(s->st_size);
+ p->newlib_st_atime = cpu_to_be32(s->st_atime);
+ p->newlib_st_mtime = cpu_to_be32(s->st_mtime);
+ p->newlib_st_ctime = cpu_to_be32(s->st_ctime);
+ unlock_user(p, addr, sizeof(struct newlib_stat));
+
+ return 1;
+}
+
+bool lm32_cpu_do_semihosting(CPUState *cs)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+
+ int ret = -1;
+ target_ulong nr, arg0, arg1, arg2;
+ void *p;
+ struct stat s;
+
+ nr = env->regs[R_R8];
+ arg0 = env->regs[R_R1];
+ arg1 = env->regs[R_R2];
+ arg2 = env->regs[R_R3];
+
+ switch (nr) {
+ case TARGET_SYS_exit:
+ /* void _exit(int rc) */
+ exit(arg0);
+
+ case TARGET_SYS_open:
+ /* int open(const char *pathname, int flags) */
+ p = lock_user_string(arg0);
+ if (!p) {
+ ret = -1;
+ } else {
+ ret = open(p, translate_openflags(arg2));
+ unlock_user(p, arg0, 0);
+ }
+ break;
+
+ case TARGET_SYS_read:
+ /* ssize_t read(int fd, const void *buf, size_t count) */
+ p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
+ if (!p) {
+ ret = -1;
+ } else {
+ ret = read(arg0, p, arg2);
+ unlock_user(p, arg1, arg2);
+ }
+ break;
+
+ case TARGET_SYS_write:
+ /* ssize_t write(int fd, const void *buf, size_t count) */
+ p = lock_user(VERIFY_READ, arg1, arg2, 1);
+ if (!p) {
+ ret = -1;
+ } else {
+ ret = write(arg0, p, arg2);
+ unlock_user(p, arg1, 0);
+ }
+ break;
+
+ case TARGET_SYS_close:
+ /* int close(int fd) */
+ /* don't close stdin/stdout/stderr */
+ if (arg0 > 2) {
+ ret = close(arg0);
+ } else {
+ ret = 0;
+ }
+ break;
+
+ case TARGET_SYS_lseek:
+ /* off_t lseek(int fd, off_t offset, int whence */
+ ret = lseek(arg0, arg1, arg2);
+ break;
+
+ case TARGET_SYS_stat:
+ /* int stat(const char *path, struct stat *buf) */
+ p = lock_user_string(arg0);
+ if (!p) {
+ ret = -1;
+ } else {
+ ret = stat(p, &s);
+ unlock_user(p, arg0, 0);
+ if (translate_stat(env, arg1, &s) == 0) {
+ ret = -1;
+ }
+ }
+ break;
+
+ case TARGET_SYS_fstat:
+ /* int stat(int fd, struct stat *buf) */
+ ret = fstat(arg0, &s);
+ if (ret == 0) {
+ if (translate_stat(env, arg1, &s) == 0) {
+ ret = -1;
+ }
+ }
+ break;
+
+ default:
+ /* unhandled */
+ return false;
+ }
+
+ env->regs[R_R1] = ret;
+ return true;
+}
diff --git a/target/lm32/machine.c b/target/lm32/machine.c
new file mode 100644
index 0000000000..3c258a4bcc
--- /dev/null
+++ b/target/lm32/machine.c
@@ -0,0 +1,36 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "migration/cpu.h"
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(regs, CPULM32State, 32),
+ VMSTATE_UINT32(pc, CPULM32State),
+ VMSTATE_UINT32(ie, CPULM32State),
+ VMSTATE_UINT32(icc, CPULM32State),
+ VMSTATE_UINT32(dcc, CPULM32State),
+ VMSTATE_UINT32(cc, CPULM32State),
+ VMSTATE_UINT32(eba, CPULM32State),
+ VMSTATE_UINT32(dc, CPULM32State),
+ VMSTATE_UINT32(deba, CPULM32State),
+ VMSTATE_UINT32_ARRAY(bp, CPULM32State, 4),
+ VMSTATE_UINT32_ARRAY(wp, CPULM32State, 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_lm32_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(env, LM32CPU, 1, vmstate_env, CPULM32State),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/lm32/op_helper.c b/target/lm32/op_helper.c
new file mode 100644
index 0000000000..2177c8ad12
--- /dev/null
+++ b/target/lm32/op_helper.c
@@ -0,0 +1,162 @@
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+
+#include "hw/lm32/lm32_pic.h"
+#include "hw/char/lm32_juart.h"
+
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/sysemu.h"
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+void raise_exception(CPULM32State *env, int index)
+{
+ CPUState *cs = CPU(lm32_env_get_cpu(env));
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
+
+void HELPER(raise_exception)(CPULM32State *env, uint32_t index)
+{
+ raise_exception(env, index);
+}
+
+void HELPER(hlt)(CPULM32State *env)
+{
+ CPUState *cs = CPU(lm32_env_get_cpu(env));
+
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+}
+
+void HELPER(ill)(CPULM32State *env)
+{
+#ifndef CONFIG_USER_ONLY
+ CPUState *cs = CPU(lm32_env_get_cpu(env));
+ fprintf(stderr, "VM paused due to illegal instruction. "
+ "Connect a debugger or switch to the monitor console "
+ "to find out more.\n");
+ vm_stop(RUN_STATE_PAUSED);
+ cs->halted = 1;
+ raise_exception(env, EXCP_HALTED);
+#endif
+}
+
+void HELPER(wcsr_bp)(CPULM32State *env, uint32_t bp, uint32_t idx)
+{
+ uint32_t addr = bp & ~1;
+
+ assert(idx < 4);
+
+ env->bp[idx] = bp;
+ lm32_breakpoint_remove(env, idx);
+ if (bp & 1) {
+ lm32_breakpoint_insert(env, idx, addr);
+ }
+}
+
+void HELPER(wcsr_wp)(CPULM32State *env, uint32_t wp, uint32_t idx)
+{
+ lm32_wp_t wp_type;
+
+ assert(idx < 4);
+
+ env->wp[idx] = wp;
+
+ wp_type = lm32_wp_type(env->dc, idx);
+ lm32_watchpoint_remove(env, idx);
+ if (wp_type != LM32_WP_DISABLED) {
+ lm32_watchpoint_insert(env, idx, wp, wp_type);
+ }
+}
+
+void HELPER(wcsr_dc)(CPULM32State *env, uint32_t dc)
+{
+ uint32_t old_dc;
+ int i;
+ lm32_wp_t old_type;
+ lm32_wp_t new_type;
+
+ old_dc = env->dc;
+ env->dc = dc;
+
+ for (i = 0; i < 4; i++) {
+ old_type = lm32_wp_type(old_dc, i);
+ new_type = lm32_wp_type(dc, i);
+
+ if (old_type != new_type) {
+ lm32_watchpoint_remove(env, i);
+ if (new_type != LM32_WP_DISABLED) {
+ lm32_watchpoint_insert(env, i, env->wp[i], new_type);
+ }
+ }
+ }
+}
+
+void HELPER(wcsr_im)(CPULM32State *env, uint32_t im)
+{
+ lm32_pic_set_im(env->pic_state, im);
+}
+
+void HELPER(wcsr_ip)(CPULM32State *env, uint32_t im)
+{
+ lm32_pic_set_ip(env->pic_state, im);
+}
+
+void HELPER(wcsr_jtx)(CPULM32State *env, uint32_t jtx)
+{
+ lm32_juart_set_jtx(env->juart_state, jtx);
+}
+
+void HELPER(wcsr_jrx)(CPULM32State *env, uint32_t jrx)
+{
+ lm32_juart_set_jrx(env->juart_state, jrx);
+}
+
+uint32_t HELPER(rcsr_im)(CPULM32State *env)
+{
+ return lm32_pic_get_im(env->pic_state);
+}
+
+uint32_t HELPER(rcsr_ip)(CPULM32State *env)
+{
+ return lm32_pic_get_ip(env->pic_state);
+}
+
+uint32_t HELPER(rcsr_jtx)(CPULM32State *env)
+{
+ return lm32_juart_get_jtx(env->juart_state);
+}
+
+uint32_t HELPER(rcsr_jrx)(CPULM32State *env)
+{
+ return lm32_juart_get_jrx(env->juart_state);
+}
+
+/* Try to fill the TLB and return an exception if error. If retaddr is
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = lm32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+#endif
+
diff --git a/target/lm32/translate.c b/target/lm32/translate.c
new file mode 100644
index 0000000000..692882f447
--- /dev/null
+++ b/target/lm32/translate.c
@@ -0,0 +1,1254 @@
+/*
+ * LatticeMico32 main translation routines.
+ *
+ * Copyright (c) 2010 Michael Walle <michael@walle.cc>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+
+#include "exec/cpu_ldst.h"
+#include "hw/lm32/lm32_pic.h"
+
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define DISAS_LM32 0
+
+#define LOG_DIS(...) \
+ do { \
+ if (DISAS_LM32) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+#define MEM_INDEX 0
+
+static TCGv_env cpu_env;
+static TCGv cpu_R[32];
+static TCGv cpu_pc;
+static TCGv cpu_ie;
+static TCGv cpu_icc;
+static TCGv cpu_dcc;
+static TCGv cpu_cc;
+static TCGv cpu_cfg;
+static TCGv cpu_eba;
+static TCGv cpu_dc;
+static TCGv cpu_deba;
+static TCGv cpu_bp[4];
+static TCGv cpu_wp[4];
+
+#include "exec/gen-icount.h"
+
+enum {
+ OP_FMT_RI,
+ OP_FMT_RR,
+ OP_FMT_CR,
+ OP_FMT_I
+};
+
+/* This is the state at translation time. */
+typedef struct DisasContext {
+ target_ulong pc;
+
+ /* Decoder. */
+ int format;
+ uint32_t ir;
+ uint8_t opcode;
+ uint8_t r0, r1, r2, csr;
+ uint16_t imm5;
+ uint16_t imm16;
+ uint32_t imm26;
+
+ unsigned int delayed_branch;
+ unsigned int tb_flags, synced_flags; /* tb dependent flags. */
+ int is_jmp;
+
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+
+ uint32_t features;
+ uint8_t num_breakpoints;
+ uint8_t num_watchpoints;
+} DisasContext;
+
+static const char *regnames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
+ "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
+ "wp1", "wp2", "wp3"
+};
+
+static inline int zero_extend(unsigned int val, int width)
+{
+ return val & ((1 << width) - 1);
+}
+
+static inline int sign_extend(unsigned int val, int width)
+{
+ int sval;
+
+ /* LSL. */
+ val <<= 32 - width;
+ sval = val;
+ /* ASR. */
+ sval >>= 32 - width;
+
+ return sval;
+}
+
+static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static inline void t_gen_illegal_insn(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_helper_ill(cpu_env);
+}
+
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+ if (unlikely(dc->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (use_goto_tb(dc, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_pc, dest);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dest);
+ if (dc->singlestep_enabled) {
+ t_gen_raise_exception(dc, EXCP_DEBUG);
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void dec_add(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ if (dc->r0 == R_R0) {
+ if (dc->r1 == R_R0 && dc->imm16 == 0) {
+ LOG_DIS("nop\n");
+ } else {
+ LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
+ }
+ } else {
+ LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16));
+ }
+ } else {
+ LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
+ sign_extend(dc->imm16, 16));
+ } else {
+ tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_and(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
+ zero_extend(dc->imm16, 16));
+ } else {
+ if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
+ gen_helper_hlt(cpu_env);
+ } else {
+ tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+ }
+}
+
+static void dec_andhi(DisasContext *dc)
+{
+ LOG_DIS("andhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
+
+ tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
+}
+
+static void dec_b(DisasContext *dc)
+{
+ if (dc->r0 == R_RA) {
+ LOG_DIS("ret\n");
+ } else if (dc->r0 == R_EA) {
+ LOG_DIS("eret\n");
+ } else if (dc->r0 == R_BA) {
+ LOG_DIS("bret\n");
+ } else {
+ LOG_DIS("b r%d\n", dc->r0);
+ }
+
+ /* restore IE.IE in case of an eret */
+ if (dc->r0 == R_EA) {
+ TCGv t0 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
+ tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
+ tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ } else if (dc->r0 == R_BA) {
+ TCGv t0 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
+ tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
+ tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ }
+ tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+static void dec_bi(DisasContext *dc)
+{
+ LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
+
+ gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
+
+ dc->is_jmp = DISAS_TB_JUMP;
+}
+
+static inline void gen_cond_branch(DisasContext *dc, int cond)
+{
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
+ gen_goto_tb(dc, 0, dc->pc + 4);
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
+ dc->is_jmp = DISAS_TB_JUMP;
+}
+
+static void dec_be(DisasContext *dc)
+{
+ LOG_DIS("be r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16) * 4);
+
+ gen_cond_branch(dc, TCG_COND_EQ);
+}
+
+static void dec_bg(DisasContext *dc)
+{
+ LOG_DIS("bg r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16 * 4));
+
+ gen_cond_branch(dc, TCG_COND_GT);
+}
+
+static void dec_bge(DisasContext *dc)
+{
+ LOG_DIS("bge r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16) * 4);
+
+ gen_cond_branch(dc, TCG_COND_GE);
+}
+
+static void dec_bgeu(DisasContext *dc)
+{
+ LOG_DIS("bgeu r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16) * 4);
+
+ gen_cond_branch(dc, TCG_COND_GEU);
+}
+
+static void dec_bgu(DisasContext *dc)
+{
+ LOG_DIS("bgu r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16) * 4);
+
+ gen_cond_branch(dc, TCG_COND_GTU);
+}
+
+static void dec_bne(DisasContext *dc)
+{
+ LOG_DIS("bne r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16) * 4);
+
+ gen_cond_branch(dc, TCG_COND_NE);
+}
+
+static void dec_call(DisasContext *dc)
+{
+ LOG_DIS("call r%d\n", dc->r0);
+
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
+ tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
+
+ dc->is_jmp = DISAS_JUMP;
+}
+
+static void dec_calli(DisasContext *dc)
+{
+ LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
+
+ tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
+ gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
+
+ dc->is_jmp = DISAS_TB_JUMP;
+}
+
+static inline void gen_compare(DisasContext *dc, int cond)
+{
+ int i;
+
+ if (dc->format == OP_FMT_RI) {
+ switch (cond) {
+ case TCG_COND_GEU:
+ case TCG_COND_GTU:
+ i = zero_extend(dc->imm16, 16);
+ break;
+ default:
+ i = sign_extend(dc->imm16, 16);
+ break;
+ }
+
+ tcg_gen_setcondi_tl(cond, cpu_R[dc->r1], cpu_R[dc->r0], i);
+ } else {
+ tcg_gen_setcond_tl(cond, cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_cmpe(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("cmpei r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ gen_compare(dc, TCG_COND_EQ);
+}
+
+static void dec_cmpg(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ gen_compare(dc, TCG_COND_GT);
+}
+
+static void dec_cmpge(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ gen_compare(dc, TCG_COND_GE);
+}
+
+static void dec_cmpgeu(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ gen_compare(dc, TCG_COND_GEU);
+}
+
+static void dec_cmpgu(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ gen_compare(dc, TCG_COND_GTU);
+}
+
+static void dec_cmpne(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ gen_compare(dc, TCG_COND_NE);
+}
+
+static void dec_divu(DisasContext *dc)
+{
+ TCGLabel *l1;
+
+ LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+
+ if (!(dc->features & LM32_FEATURE_DIVIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
+ gen_set_label(l1);
+ tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+}
+
+static void dec_lb(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_lbu(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_lh(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_lhu(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_lw(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_modu(DisasContext *dc)
+{
+ TCGLabel *l1;
+
+ LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
+
+ if (!(dc->features & LM32_FEATURE_DIVIDE)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
+ gen_set_label(l1);
+ tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+}
+
+static void dec_mul(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("muli r%d, r%d, %d\n", dc->r1, dc->r0,
+ sign_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (!(dc->features & LM32_FEATURE_MULTIPLY)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware multiplier is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
+ sign_extend(dc->imm16, 16));
+ } else {
+ tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_nor(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("nori r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
+ tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
+ tcg_temp_free(t0);
+ } else {
+ tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_or(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ if (dc->r1 == R_R0) {
+ LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
+ } else {
+ LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
+ zero_extend(dc->imm16, 16));
+ } else {
+ tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_orhi(DisasContext *dc)
+{
+ if (dc->r0 == R_R0) {
+ LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
+ } else {
+ LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
+ }
+
+ tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
+}
+
+static void dec_scall(DisasContext *dc)
+{
+ switch (dc->imm5) {
+ case 2:
+ LOG_DIS("break\n");
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_BREAKPOINT);
+ break;
+ case 7:
+ LOG_DIS("scall\n");
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc);
+ t_gen_illegal_insn(dc);
+ break;
+ }
+}
+
+static void dec_rcsr(DisasContext *dc)
+{
+ LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
+
+ switch (dc->csr) {
+ case CSR_IE:
+ tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
+ break;
+ case CSR_IM:
+ gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env);
+ break;
+ case CSR_IP:
+ gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env);
+ break;
+ case CSR_CC:
+ tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
+ break;
+ case CSR_CFG:
+ tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
+ break;
+ case CSR_EBA:
+ tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
+ break;
+ case CSR_DC:
+ tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
+ break;
+ case CSR_DEBA:
+ tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
+ break;
+ case CSR_JTX:
+ gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env);
+ break;
+ case CSR_JRX:
+ gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env);
+ break;
+ case CSR_ICC:
+ case CSR_DCC:
+ case CSR_BP0:
+ case CSR_BP1:
+ case CSR_BP2:
+ case CSR_BP3:
+ case CSR_WP0:
+ case CSR_WP1:
+ case CSR_WP2:
+ case CSR_WP3:
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr);
+ break;
+ }
+}
+
+static void dec_sb(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_sextb(DisasContext *dc)
+{
+ LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
+
+ if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware sign extender is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+
+ tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
+}
+
+static void dec_sexth(DisasContext *dc)
+{
+ LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
+
+ if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware sign extender is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+
+ tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
+}
+
+static void dec_sh(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_sl(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
+ } else {
+ LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (!(dc->features & LM32_FEATURE_SHIFT)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
+ } else {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
+ tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
+ tcg_temp_free(t0);
+ }
+}
+
+static void dec_sr(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
+ } else {
+ LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ /* The real CPU (w/o hardware shifter) only supports right shift by exactly
+ * one bit */
+ if (dc->format == OP_FMT_RI) {
+ if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware shifter is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+ tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
+ } else {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
+
+ if (!(dc->features & LM32_FEATURE_SHIFT)) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
+ t_gen_illegal_insn(dc);
+ tcg_gen_br(l2);
+ }
+
+ gen_set_label(l1);
+ tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
+ gen_set_label(l2);
+
+ tcg_temp_free(t0);
+ }
+}
+
+static void dec_sru(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
+ } else {
+ LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "hardware shifter is not available\n");
+ t_gen_illegal_insn(dc);
+ return;
+ }
+ tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
+ } else {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
+
+ if (!(dc->features & LM32_FEATURE_SHIFT)) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
+ t_gen_illegal_insn(dc);
+ tcg_gen_br(l2);
+ }
+
+ gen_set_label(l1);
+ tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
+ gen_set_label(l2);
+
+ tcg_temp_free(t0);
+ }
+}
+
+static void dec_sub(DisasContext *dc)
+{
+ LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+
+ tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+}
+
+static void dec_sw(DisasContext *dc)
+{
+ TCGv t0;
+
+ LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
+
+ t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
+ tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
+ tcg_temp_free(t0);
+}
+
+static void dec_user(DisasContext *dc)
+{
+ LOG_DIS("user");
+
+ qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n");
+ t_gen_illegal_insn(dc);
+}
+
+static void dec_wcsr(DisasContext *dc)
+{
+ int no;
+
+ LOG_DIS("wcsr %d, r%d\n", dc->csr, dc->r1);
+
+ switch (dc->csr) {
+ case CSR_IE:
+ tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
+ tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ case CSR_IM:
+ /* mark as an io operation because it could cause an interrupt */
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
+ tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ case CSR_IP:
+ /* mark as an io operation because it could cause an interrupt */
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
+ tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
+ if (dc->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+ case CSR_ICC:
+ /* TODO */
+ break;
+ case CSR_DCC:
+ /* TODO */
+ break;
+ case CSR_EBA:
+ tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
+ break;
+ case CSR_DEBA:
+ tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
+ break;
+ case CSR_JTX:
+ gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]);
+ break;
+ case CSR_JRX:
+ gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
+ break;
+ case CSR_DC:
+ gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]);
+ break;
+ case CSR_BP0:
+ case CSR_BP1:
+ case CSR_BP2:
+ case CSR_BP3:
+ no = dc->csr - CSR_BP0;
+ if (dc->num_breakpoints <= no) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "breakpoint #%i is not available\n", no);
+ t_gen_illegal_insn(dc);
+ break;
+ }
+ gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
+ break;
+ case CSR_WP0:
+ case CSR_WP1:
+ case CSR_WP2:
+ case CSR_WP3:
+ no = dc->csr - CSR_WP0;
+ if (dc->num_watchpoints <= no) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "watchpoint #%i is not available\n", no);
+ t_gen_illegal_insn(dc);
+ break;
+ }
+ gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
+ break;
+ case CSR_CC:
+ case CSR_CFG:
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n",
+ dc->csr);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n",
+ dc->csr);
+ break;
+ }
+}
+
+static void dec_xnor(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("xnori r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ if (dc->r1 == R_R0) {
+ LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
+ } else {
+ LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
+ zero_extend(dc->imm16, 16));
+ tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
+ } else {
+ tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_xor(DisasContext *dc)
+{
+ if (dc->format == OP_FMT_RI) {
+ LOG_DIS("xori r%d, r%d, %d\n", dc->r1, dc->r0,
+ zero_extend(dc->imm16, 16));
+ } else {
+ LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
+ }
+
+ if (dc->format == OP_FMT_RI) {
+ tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
+ zero_extend(dc->imm16, 16));
+ } else {
+ tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
+ }
+}
+
+static void dec_ill(DisasContext *dc)
+{
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode);
+ t_gen_illegal_insn(dc);
+}
+
+typedef void (*DecoderInfo)(DisasContext *dc);
+static const DecoderInfo decinfo[] = {
+ dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
+ dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
+ dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
+ dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
+ dec_cmpne,
+ dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
+ dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
+ dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
+ dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
+ dec_cmpne
+};
+
+static inline void decode(DisasContext *dc, uint32_t ir)
+{
+ dc->ir = ir;
+ LOG_DIS("%8.8x\t", dc->ir);
+
+ dc->opcode = EXTRACT_FIELD(ir, 26, 31);
+
+ dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
+ dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
+ dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
+
+ dc->csr = EXTRACT_FIELD(ir, 21, 25);
+ dc->r0 = EXTRACT_FIELD(ir, 21, 25);
+ dc->r1 = EXTRACT_FIELD(ir, 16, 20);
+ dc->r2 = EXTRACT_FIELD(ir, 11, 15);
+
+ /* bit 31 seems to indicate insn type. */
+ if (ir & (1 << 31)) {
+ dc->format = OP_FMT_RR;
+ } else {
+ dc->format = OP_FMT_RI;
+ }
+
+ assert(ARRAY_SIZE(decinfo) == 64);
+ assert(dc->opcode < 64);
+
+ decinfo[dc->opcode](dc);
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPULM32State *env, struct TranslationBlock *tb)
+{
+ LM32CPU *cpu = lm32_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ struct DisasContext ctx, *dc = &ctx;
+ uint32_t pc_start;
+ uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+ dc->features = cpu->features;
+ dc->num_breakpoints = cpu->num_breakpoints;
+ dc->num_watchpoints = cpu->num_watchpoints;
+ dc->tb = tb;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+
+ if (pc_start & 3) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "unaligned PC=%x. Ignoring lowest bits.\n", pc_start);
+ pc_start &= ~3;
+ }
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ do {
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ t_gen_raise_exception(dc, EXCP_DEBUG);
+ dc->is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 4;
+ break;
+ }
+
+ /* Pretty disas. */
+ LOG_DIS("%8.8x:\t", dc->pc);
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ decode(dc, cpu_ldl_code(env, dc->pc));
+ dc->pc += 4;
+ } while (!dc->is_jmp
+ && !tcg_op_buf_full()
+ && !cs->singlestep_enabled
+ && !singlestep
+ && (dc->pc < next_page_start)
+ && num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ if (unlikely(cs->singlestep_enabled)) {
+ if (dc->is_jmp == DISAS_NEXT) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ t_gen_raise_exception(dc, EXCP_DEBUG);
+ } else {
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used
+ to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ }
+
+ gen_tb_end(tb, num_insns);
+
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("\n");
+ log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void lm32_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ LM32CPU *cpu = LM32_CPU(cs);
+ CPULM32State *env = &cpu->env;
+ int i;
+
+ if (!env || !f) {
+ return;
+ }
+
+ cpu_fprintf(f, "IN: PC=%x %s\n",
+ env->pc, lookup_symbol(env->pc));
+
+ cpu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
+ env->ie,
+ (env->ie & IE_IE) ? 1 : 0,
+ (env->ie & IE_EIE) ? 1 : 0,
+ (env->ie & IE_BIE) ? 1 : 0,
+ lm32_pic_get_im(env->pic_state),
+ lm32_pic_get_ip(env->pic_state));
+ cpu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
+ env->eba,
+ env->deba);
+
+ for (i = 0; i < 32; i++) {
+ cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
+ if ((i + 1) % 4 == 0) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ cpu_fprintf(f, "\n\n");
+}
+
+void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
+
+void lm32_translate_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, regs[i]),
+ regnames[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
+ cpu_bp[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, bp[i]),
+ regnames[32+i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
+ cpu_wp[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, wp[i]),
+ regnames[36+i]);
+ }
+
+ cpu_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, pc),
+ "pc");
+ cpu_ie = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, ie),
+ "ie");
+ cpu_icc = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, icc),
+ "icc");
+ cpu_dcc = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, dcc),
+ "dcc");
+ cpu_cc = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, cc),
+ "cc");
+ cpu_cfg = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, cfg),
+ "cfg");
+ cpu_eba = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, eba),
+ "eba");
+ cpu_dc = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, dc),
+ "dc");
+ cpu_deba = tcg_global_mem_new(cpu_env,
+ offsetof(CPULM32State, deba),
+ "deba");
+}
+
diff --git a/target/m68k/Makefile.objs b/target/m68k/Makefile.objs
new file mode 100644
index 0000000000..02cf616a78
--- /dev/null
+++ b/target/m68k/Makefile.objs
@@ -0,0 +1,3 @@
+obj-y += m68k-semi.o
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
diff --git a/target/m68k/cpu-qom.h b/target/m68k/cpu-qom.h
new file mode 100644
index 0000000000..9885bba317
--- /dev/null
+++ b/target/m68k/cpu-qom.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU Motorola 68k CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_M68K_CPU_QOM_H
+#define QEMU_M68K_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_M68K_CPU "m68k-cpu"
+
+#define M68K_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(M68kCPUClass, (klass), TYPE_M68K_CPU)
+#define M68K_CPU(obj) \
+ OBJECT_CHECK(M68kCPU, (obj), TYPE_M68K_CPU)
+#define M68K_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(M68kCPUClass, (obj), TYPE_M68K_CPU)
+
+/**
+ * M68kCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Motorola 68k CPU model.
+ */
+typedef struct M68kCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} M68kCPUClass;
+
+typedef struct M68kCPU M68kCPU;
+
+#endif
diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c
new file mode 100644
index 0000000000..ba17480098
--- /dev/null
+++ b/target/m68k/cpu.c
@@ -0,0 +1,324 @@
+/*
+ * QEMU Motorola 68k CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+
+static void m68k_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool m68k_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+static void m68k_set_feature(CPUM68KState *env, int feature)
+{
+ env->features |= (1u << feature);
+}
+
+/* CPUClass::reset() */
+static void m68k_cpu_reset(CPUState *s)
+{
+ M68kCPU *cpu = M68K_CPU(s);
+ M68kCPUClass *mcc = M68K_CPU_GET_CLASS(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ mcc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUM68KState, features));
+#if !defined(CONFIG_USER_ONLY)
+ env->sr = 0x2700;
+#endif
+ m68k_switch_sp(env);
+ /* ??? FP regs should be initialized to NaN. */
+ cpu_m68k_set_ccr(env, 0);
+ /* TODO: We should set PC from the interrupt vector. */
+ env->pc = 0;
+ tlb_flush(s, 1);
+}
+
+static void m68k_cpu_disas_set_info(CPUState *s, disassemble_info *info)
+{
+ M68kCPU *cpu = M68K_CPU(s);
+ CPUM68KState *env = &cpu->env;
+ info->print_insn = print_insn_m68k;
+ if (m68k_feature(env, M68K_FEATURE_M68000)) {
+ info->mach = bfd_mach_m68040;
+ }
+}
+
+/* CPU models */
+
+static ObjectClass *m68k_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_M68K_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (object_class_dynamic_cast(oc, TYPE_M68K_CPU) == NULL ||
+ object_class_is_abstract(oc))) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void m5206_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+}
+
+static void m68000_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_M68000);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+ m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
+}
+
+static void m68020_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_M68000);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+ m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
+ m68k_set_feature(env, M68K_FEATURE_QUAD_MULDIV);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_BCCL);
+ m68k_set_feature(env, M68K_FEATURE_BITFIELD);
+ m68k_set_feature(env, M68K_FEATURE_EXT_FULL);
+ m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX);
+ m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV);
+ m68k_set_feature(env, M68K_FEATURE_FPU);
+ m68k_set_feature(env, M68K_FEATURE_CAS);
+ m68k_set_feature(env, M68K_FEATURE_BKPT);
+}
+#define m68030_cpu_initfn m68020_cpu_initfn
+#define m68040_cpu_initfn m68020_cpu_initfn
+
+static void m68060_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_M68000);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+ m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_BCCL);
+ m68k_set_feature(env, M68K_FEATURE_BITFIELD);
+ m68k_set_feature(env, M68K_FEATURE_EXT_FULL);
+ m68k_set_feature(env, M68K_FEATURE_SCALED_INDEX);
+ m68k_set_feature(env, M68K_FEATURE_LONG_MULDIV);
+ m68k_set_feature(env, M68K_FEATURE_FPU);
+ m68k_set_feature(env, M68K_FEATURE_CAS);
+ m68k_set_feature(env, M68K_FEATURE_BKPT);
+}
+
+static void m5208_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+}
+
+static void cfv4e_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_B);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_CF_FPU);
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+}
+
+static void any_cpu_initfn(Object *obj)
+{
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_A);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_B);
+ m68k_set_feature(env, M68K_FEATURE_CF_ISA_APLUSC);
+ m68k_set_feature(env, M68K_FEATURE_BRAL);
+ m68k_set_feature(env, M68K_FEATURE_CF_FPU);
+ /* MAC and EMAC are mututally exclusive, so pick EMAC.
+ It's mostly backwards compatible. */
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC);
+ m68k_set_feature(env, M68K_FEATURE_CF_EMAC_B);
+ m68k_set_feature(env, M68K_FEATURE_USP);
+ m68k_set_feature(env, M68K_FEATURE_EXT_FULL);
+ m68k_set_feature(env, M68K_FEATURE_WORD_INDEX);
+}
+
+typedef struct M68kCPUInfo {
+ const char *name;
+ void (*instance_init)(Object *obj);
+} M68kCPUInfo;
+
+static const M68kCPUInfo m68k_cpus[] = {
+ { .name = "m68000", .instance_init = m68000_cpu_initfn },
+ { .name = "m68020", .instance_init = m68020_cpu_initfn },
+ { .name = "m68030", .instance_init = m68030_cpu_initfn },
+ { .name = "m68040", .instance_init = m68040_cpu_initfn },
+ { .name = "m68060", .instance_init = m68060_cpu_initfn },
+ { .name = "m5206", .instance_init = m5206_cpu_initfn },
+ { .name = "m5208", .instance_init = m5208_cpu_initfn },
+ { .name = "cfv4e", .instance_init = cfv4e_cpu_initfn },
+ { .name = "any", .instance_init = any_cpu_initfn },
+};
+
+static void m68k_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ M68kCPU *cpu = M68K_CPU(dev);
+ M68kCPUClass *mcc = M68K_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ m68k_cpu_init_gdb(cpu);
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void m68k_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ M68kCPU *cpu = M68K_CPU(obj);
+ CPUM68KState *env = &cpu->env;
+ static bool inited;
+
+ cs->env_ptr = env;
+
+ if (tcg_enabled() && !inited) {
+ inited = true;
+ m68k_tcg_init();
+ }
+}
+
+static const VMStateDescription vmstate_m68k_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+static void m68k_cpu_class_init(ObjectClass *c, void *data)
+{
+ M68kCPUClass *mcc = M68K_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ mcc->parent_realize = dc->realize;
+ dc->realize = m68k_cpu_realizefn;
+
+ mcc->parent_reset = cc->reset;
+ cc->reset = m68k_cpu_reset;
+
+ cc->class_by_name = m68k_cpu_class_by_name;
+ cc->has_work = m68k_cpu_has_work;
+ cc->do_interrupt = m68k_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = m68k_cpu_exec_interrupt;
+ cc->dump_state = m68k_cpu_dump_state;
+ cc->set_pc = m68k_cpu_set_pc;
+ cc->gdb_read_register = m68k_cpu_gdb_read_register;
+ cc->gdb_write_register = m68k_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = m68k_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = m68k_cpu_get_phys_page_debug;
+#endif
+ cc->disas_set_info = m68k_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 18;
+ cc->gdb_core_xml_file = "cf-core.xml";
+
+ dc->vmsd = &vmstate_m68k_cpu;
+}
+
+static void register_cpu_type(const M68kCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_M68K_CPU,
+ .instance_init = info->instance_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_M68K_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo m68k_cpu_type_info = {
+ .name = TYPE_M68K_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(M68kCPU),
+ .instance_init = m68k_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(M68kCPUClass),
+ .class_init = m68k_cpu_class_init,
+};
+
+static void m68k_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&m68k_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(m68k_cpus); i++) {
+ register_cpu_type(&m68k_cpus[i]);
+ }
+}
+
+type_init(m68k_cpu_register_types)
diff --git a/target/m68k/cpu.h b/target/m68k/cpu.h
new file mode 100644
index 0000000000..6dfb54eb70
--- /dev/null
+++ b/target/m68k/cpu.h
@@ -0,0 +1,308 @@
+/*
+ * m68k virtual CPU header
+ *
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef M68K_CPU_H
+#define M68K_CPU_H
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUM68KState
+
+#include "qemu-common.h"
+#include "exec/cpu-defs.h"
+#include "cpu-qom.h"
+#include "fpu/softfloat.h"
+
+#define OS_BYTE 0
+#define OS_WORD 1
+#define OS_LONG 2
+#define OS_SINGLE 3
+#define OS_DOUBLE 4
+#define OS_EXTENDED 5
+#define OS_PACKED 6
+
+#define MAX_QREGS 32
+
+#define EXCP_ACCESS 2 /* Access (MMU) error. */
+#define EXCP_ADDRESS 3 /* Address error. */
+#define EXCP_ILLEGAL 4 /* Illegal instruction. */
+#define EXCP_DIV0 5 /* Divide by zero */
+#define EXCP_PRIVILEGE 8 /* Privilege violation. */
+#define EXCP_TRACE 9
+#define EXCP_LINEA 10 /* Unimplemented line-A (MAC) opcode. */
+#define EXCP_LINEF 11 /* Unimplemented line-F (FPU) opcode. */
+#define EXCP_DEBUGNBP 12 /* Non-breakpoint debug interrupt. */
+#define EXCP_DEBEGBP 13 /* Breakpoint debug interrupt. */
+#define EXCP_FORMAT 14 /* RTE format error. */
+#define EXCP_UNINITIALIZED 15
+#define EXCP_TRAP0 32 /* User trap #0. */
+#define EXCP_TRAP15 47 /* User trap #15. */
+#define EXCP_UNSUPPORTED 61
+#define EXCP_ICE 13
+
+#define EXCP_RTE 0x100
+#define EXCP_HALT_INSN 0x101
+
+#define NB_MMU_MODES 2
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+typedef struct CPUM68KState {
+ uint32_t dregs[8];
+ uint32_t aregs[8];
+ uint32_t pc;
+ uint32_t sr;
+
+ /* SSP and USP. The current_sp is stored in aregs[7], the other here. */
+ int current_sp;
+ uint32_t sp[2];
+
+ /* Condition flags. */
+ uint32_t cc_op;
+ uint32_t cc_x; /* always 0/1 */
+ uint32_t cc_n; /* in bit 31 (i.e. negative) */
+ uint32_t cc_v; /* in bit 31, unused, or computed from cc_n and cc_v */
+ uint32_t cc_c; /* either 0/1, unused, or computed from cc_n and cc_v */
+ uint32_t cc_z; /* == 0 or unused */
+
+ float64 fregs[8];
+ float64 fp_result;
+ uint32_t fpcr;
+ uint32_t fpsr;
+ float_status fp_status;
+
+ uint64_t mactmp;
+ /* EMAC Hardware deals with 48-bit values composed of one 32-bit and
+ two 8-bit parts. We store a single 64-bit value and
+ rearrange/extend this when changing modes. */
+ uint64_t macc[4];
+ uint32_t macsr;
+ uint32_t mac_mask;
+
+ /* Temporary storage for DIV helpers. */
+ uint32_t div1;
+ uint32_t div2;
+
+ /* MMU status. */
+ struct {
+ uint32_t ar;
+ } mmu;
+
+ /* Control registers. */
+ uint32_t vbr;
+ uint32_t mbar;
+ uint32_t rambar0;
+ uint32_t cacr;
+
+ int pending_vector;
+ int pending_level;
+
+ uint32_t qregs[MAX_QREGS];
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved across CPU reset. */
+ uint32_t features;
+} CPUM68KState;
+
+/**
+ * M68kCPU:
+ * @env: #CPUM68KState
+ *
+ * A Motorola 68k CPU.
+ */
+struct M68kCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUM68KState env;
+};
+
+static inline M68kCPU *m68k_env_get_cpu(CPUM68KState *env)
+{
+ return container_of(env, M68kCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(m68k_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(M68kCPU, env)
+
+void m68k_cpu_do_interrupt(CPUState *cpu);
+bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void m68k_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+hwaddr m68k_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int m68k_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int m68k_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void m68k_tcg_init(void);
+void m68k_cpu_init_gdb(M68kCPU *cpu);
+M68kCPU *cpu_m68k_init(const char *cpu_model);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_m68k_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+uint32_t cpu_m68k_get_ccr(CPUM68KState *env);
+void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t);
+
+
+/* Instead of computing the condition codes after each m68k instruction,
+ * QEMU just stores one operand (called CC_SRC), the result
+ * (called CC_DEST) and the type of operation (called CC_OP). When the
+ * condition codes are needed, the condition codes can be calculated
+ * using this information. Condition codes are not generated if they
+ * are only needed for conditional branches.
+ */
+typedef enum {
+ /* Translator only -- use env->cc_op. */
+ CC_OP_DYNAMIC = -1,
+
+ /* Each flag bit computed into cc_[xcnvz]. */
+ CC_OP_FLAGS,
+
+ /* X in cc_x, C = X, N in cc_n, Z in cc_n, V via cc_n/cc_v. */
+ CC_OP_ADDB, CC_OP_ADDW, CC_OP_ADDL,
+ CC_OP_SUBB, CC_OP_SUBW, CC_OP_SUBL,
+
+ /* X in cc_x, {N,Z,C,V} via cc_n/cc_v. */
+ CC_OP_CMPB, CC_OP_CMPW, CC_OP_CMPL,
+
+ /* X in cc_x, C = 0, V = 0, N in cc_n, Z in cc_n. */
+ CC_OP_LOGIC,
+
+ CC_OP_NB
+} CCOp;
+
+#define CCF_C 0x01
+#define CCF_V 0x02
+#define CCF_Z 0x04
+#define CCF_N 0x08
+#define CCF_X 0x10
+
+#define SR_I_SHIFT 8
+#define SR_I 0x0700
+#define SR_M 0x1000
+#define SR_S 0x2000
+#define SR_T 0x8000
+
+#define M68K_SSP 0
+#define M68K_USP 1
+
+/* CACR fields are implementation defined, but some bits are common. */
+#define M68K_CACR_EUSP 0x10
+
+#define MACSR_PAV0 0x100
+#define MACSR_OMC 0x080
+#define MACSR_SU 0x040
+#define MACSR_FI 0x020
+#define MACSR_RT 0x010
+#define MACSR_N 0x008
+#define MACSR_Z 0x004
+#define MACSR_V 0x002
+#define MACSR_EV 0x001
+
+void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector);
+void m68k_switch_sp(CPUM68KState *env);
+
+#define M68K_FPCR_PREC (1 << 6)
+
+void do_m68k_semihosting(CPUM68KState *env, int nr);
+
+/* There are 4 ColdFire core ISA revisions: A, A+, B and C.
+ Each feature covers the subset of instructions common to the
+ ISA revisions mentioned. */
+
+enum m68k_features {
+ M68K_FEATURE_M68000,
+ M68K_FEATURE_CF_ISA_A,
+ M68K_FEATURE_CF_ISA_B, /* (ISA B or C). */
+ M68K_FEATURE_CF_ISA_APLUSC, /* BIT/BITREV, FF1, STRLDSR (ISA A+ or C). */
+ M68K_FEATURE_BRAL, /* Long unconditional branch. (ISA A+ or B). */
+ M68K_FEATURE_CF_FPU,
+ M68K_FEATURE_CF_MAC,
+ M68K_FEATURE_CF_EMAC,
+ M68K_FEATURE_CF_EMAC_B, /* Revision B EMAC (dual accumulate). */
+ M68K_FEATURE_USP, /* User Stack Pointer. (ISA A+, B or C). */
+ M68K_FEATURE_EXT_FULL, /* 68020+ full extension word. */
+ M68K_FEATURE_WORD_INDEX, /* word sized address index registers. */
+ M68K_FEATURE_SCALED_INDEX, /* scaled address index registers. */
+ M68K_FEATURE_LONG_MULDIV, /* 32 bit multiply/divide. */
+ M68K_FEATURE_QUAD_MULDIV, /* 64 bit multiply/divide. */
+ M68K_FEATURE_BCCL, /* Long conditional branches. */
+ M68K_FEATURE_BITFIELD, /* Bit field insns. */
+ M68K_FEATURE_FPU,
+ M68K_FEATURE_CAS,
+ M68K_FEATURE_BKPT,
+};
+
+static inline int m68k_feature(CPUM68KState *env, int feature)
+{
+ return (env->features & (1u << feature)) != 0;
+}
+
+void m68k_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+
+void register_m68k_insns (CPUM68KState *env);
+
+#ifdef CONFIG_USER_ONLY
+/* Coldfire Linux uses 8k pages
+ * and m68k linux uses 4k pages
+ * use the smaller one
+ */
+#define TARGET_PAGE_BITS 12
+#else
+/* Smallest TLB entry size is 1k. */
+#define TARGET_PAGE_BITS 10
+#endif
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define cpu_init(cpu_model) CPU(cpu_m68k_init(cpu_model))
+
+#define cpu_signal_handler cpu_m68k_signal_handler
+#define cpu_list m68k_cpu_list
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
+{
+ return (env->sr & SR_S) == 0 ? 1 : 0;
+}
+
+int m68k_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUM68KState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
+ | (env->sr & SR_S) /* Bit 13 */
+ | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
+}
+
+#endif
diff --git a/target/m68k/gdbstub.c b/target/m68k/gdbstub.c
new file mode 100644
index 0000000000..c7f44c9bb3
--- /dev/null
+++ b/target/m68k/gdbstub.c
@@ -0,0 +1,76 @@
+/*
+ * m68k gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int m68k_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ if (n < 8) {
+ /* D0-D7 */
+ return gdb_get_reg32(mem_buf, env->dregs[n]);
+ } else if (n < 16) {
+ /* A0-A7 */
+ return gdb_get_reg32(mem_buf, env->aregs[n - 8]);
+ } else {
+ switch (n) {
+ case 16:
+ return gdb_get_reg32(mem_buf, env->sr);
+ case 17:
+ return gdb_get_reg32(mem_buf, env->pc);
+ }
+ }
+ /* FP registers not included here because they vary between
+ ColdFire and m68k. Use XML bits for these. */
+ return 0;
+}
+
+int m68k_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+ uint32_t tmp;
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 8) {
+ /* D0-D7 */
+ env->dregs[n] = tmp;
+ } else if (n < 16) {
+ /* A0-A7 */
+ env->aregs[n - 8] = tmp;
+ } else {
+ switch (n) {
+ case 16:
+ env->sr = tmp;
+ break;
+ case 17:
+ env->pc = tmp;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 4;
+}
diff --git a/target/m68k/helper.c b/target/m68k/helper.c
new file mode 100644
index 0000000000..7aed9ffd2f
--- /dev/null
+++ b/target/m68k/helper.c
@@ -0,0 +1,810 @@
+/*
+ * m68k op helpers
+ *
+ * Copyright (c) 2006-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+
+#include "exec/helper-proto.h"
+
+#define SIGNBIT (1u << 31)
+
+/* Sort alphabetically, except for "any". */
+static gint m68k_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any-" TYPE_M68K_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any-" TYPE_M68K_CPU) == 0) {
+ return -1;
+ } else {
+ return strcasecmp(name_a, name_b);
+ }
+}
+
+static void m68k_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *c = data;
+ CPUListState *s = user_data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(c);
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_M68K_CPU));
+ (*s->cpu_fprintf)(s->file, "%s\n",
+ name);
+ g_free(name);
+}
+
+void m68k_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_M68K_CPU, false);
+ list = g_slist_sort(list, m68k_cpu_list_compare);
+ g_slist_foreach(list, m68k_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+static int fpu_gdb_get_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 8) {
+ stfq_p(mem_buf, env->fregs[n]);
+ return 8;
+ }
+ if (n < 11) {
+ /* FP control registers (not implemented) */
+ memset(mem_buf, 0, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int fpu_gdb_set_reg(CPUM68KState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 8) {
+ env->fregs[n] = ldfq_p(mem_buf);
+ return 8;
+ }
+ if (n < 11) {
+ /* FP control registers (not implemented) */
+ return 4;
+ }
+ return 0;
+}
+
+M68kCPU *cpu_m68k_init(const char *cpu_model)
+{
+ M68kCPU *cpu;
+ CPUM68KState *env;
+ ObjectClass *oc;
+
+ oc = cpu_class_by_name(TYPE_M68K_CPU, cpu_model);
+ if (oc == NULL) {
+ return NULL;
+ }
+ cpu = M68K_CPU(object_new(object_class_get_name(oc)));
+ env = &cpu->env;
+
+ register_m68k_insns(env);
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+void m68k_cpu_init_gdb(M68kCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ if (m68k_feature(env, M68K_FEATURE_CF_FPU)) {
+ gdb_register_coprocessor(cs, fpu_gdb_get_reg, fpu_gdb_set_reg,
+ 11, "cf-fp.xml", 18);
+ }
+ /* TODO: Add [E]MAC registers. */
+}
+
+void HELPER(movec)(CPUM68KState *env, uint32_t reg, uint32_t val)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ switch (reg) {
+ case 0x02: /* CACR */
+ env->cacr = val;
+ m68k_switch_sp(env);
+ break;
+ case 0x04: case 0x05: case 0x06: case 0x07: /* ACR[0-3] */
+ /* TODO: Implement Access Control Registers. */
+ break;
+ case 0x801: /* VBR */
+ env->vbr = val;
+ break;
+ /* TODO: Implement control registers. */
+ default:
+ cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n",
+ reg, val);
+ }
+}
+
+void HELPER(set_macsr)(CPUM68KState *env, uint32_t val)
+{
+ uint32_t acc;
+ int8_t exthigh;
+ uint8_t extlow;
+ uint64_t regval;
+ int i;
+ if ((env->macsr ^ val) & (MACSR_FI | MACSR_SU)) {
+ for (i = 0; i < 4; i++) {
+ regval = env->macc[i];
+ exthigh = regval >> 40;
+ if (env->macsr & MACSR_FI) {
+ acc = regval >> 8;
+ extlow = regval;
+ } else {
+ acc = regval;
+ extlow = regval >> 32;
+ }
+ if (env->macsr & MACSR_FI) {
+ regval = (((uint64_t)acc) << 8) | extlow;
+ regval |= ((int64_t)exthigh) << 40;
+ } else if (env->macsr & MACSR_SU) {
+ regval = acc | (((int64_t)extlow) << 32);
+ regval |= ((int64_t)exthigh) << 40;
+ } else {
+ regval = acc | (((uint64_t)extlow) << 32);
+ regval |= ((uint64_t)(uint8_t)exthigh) << 40;
+ }
+ env->macc[i] = regval;
+ }
+ }
+ env->macsr = val;
+}
+
+void m68k_switch_sp(CPUM68KState *env)
+{
+ int new_sp;
+
+ env->sp[env->current_sp] = env->aregs[7];
+ new_sp = (env->sr & SR_S && env->cacr & M68K_CACR_EUSP)
+ ? M68K_SSP : M68K_USP;
+ env->aregs[7] = env->sp[new_sp];
+ env->current_sp = new_sp;
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+
+ cs->exception_index = EXCP_ACCESS;
+ cpu->env.mmu.ar = address;
+ return 1;
+}
+
+#else
+
+/* MMU */
+
+/* TODO: This will need fixing once the MMU is implemented. */
+hwaddr m68k_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ return addr;
+}
+
+int m68k_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ int prot;
+
+ address &= TARGET_PAGE_MASK;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
+}
+
+/* Notify CPU of a pending interrupt. Prioritization and vectoring should
+ be handled by the interrupt controller. Real hardware only requests
+ the vector when the interrupt is acknowledged by the CPU. For
+ simplicitly we calculate it when the interrupt is signalled. */
+void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector)
+{
+ CPUState *cs = CPU(cpu);
+ CPUM68KState *env = &cpu->env;
+
+ env->pending_level = level;
+ env->pending_vector = vector;
+ if (level) {
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+}
+
+#endif
+
+uint32_t HELPER(bitrev)(uint32_t x)
+{
+ x = ((x >> 1) & 0x55555555u) | ((x << 1) & 0xaaaaaaaau);
+ x = ((x >> 2) & 0x33333333u) | ((x << 2) & 0xccccccccu);
+ x = ((x >> 4) & 0x0f0f0f0fu) | ((x << 4) & 0xf0f0f0f0u);
+ return bswap32(x);
+}
+
+uint32_t HELPER(ff1)(uint32_t x)
+{
+ int n;
+ for (n = 32; x; n--)
+ x >>= 1;
+ return n;
+}
+
+uint32_t HELPER(sats)(uint32_t val, uint32_t v)
+{
+ /* The result has the opposite sign to the original value. */
+ if ((int32_t)v < 0) {
+ val = (((int32_t)val) >> 31) ^ SIGNBIT;
+ }
+ return val;
+}
+
+void HELPER(set_sr)(CPUM68KState *env, uint32_t val)
+{
+ env->sr = val & 0xffe0;
+ cpu_m68k_set_ccr(env, val);
+ m68k_switch_sp(env);
+}
+
+uint32_t HELPER(shl_cc)(CPUM68KState *env, uint32_t val, uint32_t shift)
+{
+ uint64_t result;
+
+ shift &= 63;
+ result = (uint64_t)val << shift;
+
+ env->cc_c = (result >> 32) & 1;
+ env->cc_n = result;
+ env->cc_z = result;
+ env->cc_v = 0;
+ env->cc_x = shift ? env->cc_c : env->cc_x;
+
+ return result;
+}
+
+uint32_t HELPER(shr_cc)(CPUM68KState *env, uint32_t val, uint32_t shift)
+{
+ uint64_t temp;
+ uint32_t result;
+
+ shift &= 63;
+ temp = (uint64_t)val << 32 >> shift;
+ result = temp >> 32;
+
+ env->cc_c = (temp >> 31) & 1;
+ env->cc_n = result;
+ env->cc_z = result;
+ env->cc_v = 0;
+ env->cc_x = shift ? env->cc_c : env->cc_x;
+
+ return result;
+}
+
+uint32_t HELPER(sar_cc)(CPUM68KState *env, uint32_t val, uint32_t shift)
+{
+ uint64_t temp;
+ uint32_t result;
+
+ shift &= 63;
+ temp = (int64_t)val << 32 >> shift;
+ result = temp >> 32;
+
+ env->cc_c = (temp >> 31) & 1;
+ env->cc_n = result;
+ env->cc_z = result;
+ env->cc_v = result ^ val;
+ env->cc_x = shift ? env->cc_c : env->cc_x;
+
+ return result;
+}
+
+/* FPU helpers. */
+uint32_t HELPER(f64_to_i32)(CPUM68KState *env, float64 val)
+{
+ return float64_to_int32(val, &env->fp_status);
+}
+
+float32 HELPER(f64_to_f32)(CPUM68KState *env, float64 val)
+{
+ return float64_to_float32(val, &env->fp_status);
+}
+
+float64 HELPER(i32_to_f64)(CPUM68KState *env, uint32_t val)
+{
+ return int32_to_float64(val, &env->fp_status);
+}
+
+float64 HELPER(f32_to_f64)(CPUM68KState *env, float32 val)
+{
+ return float32_to_float64(val, &env->fp_status);
+}
+
+float64 HELPER(iround_f64)(CPUM68KState *env, float64 val)
+{
+ return float64_round_to_int(val, &env->fp_status);
+}
+
+float64 HELPER(itrunc_f64)(CPUM68KState *env, float64 val)
+{
+ return float64_trunc_to_int(val, &env->fp_status);
+}
+
+float64 HELPER(sqrt_f64)(CPUM68KState *env, float64 val)
+{
+ return float64_sqrt(val, &env->fp_status);
+}
+
+float64 HELPER(abs_f64)(float64 val)
+{
+ return float64_abs(val);
+}
+
+float64 HELPER(chs_f64)(float64 val)
+{
+ return float64_chs(val);
+}
+
+float64 HELPER(add_f64)(CPUM68KState *env, float64 a, float64 b)
+{
+ return float64_add(a, b, &env->fp_status);
+}
+
+float64 HELPER(sub_f64)(CPUM68KState *env, float64 a, float64 b)
+{
+ return float64_sub(a, b, &env->fp_status);
+}
+
+float64 HELPER(mul_f64)(CPUM68KState *env, float64 a, float64 b)
+{
+ return float64_mul(a, b, &env->fp_status);
+}
+
+float64 HELPER(div_f64)(CPUM68KState *env, float64 a, float64 b)
+{
+ return float64_div(a, b, &env->fp_status);
+}
+
+float64 HELPER(sub_cmp_f64)(CPUM68KState *env, float64 a, float64 b)
+{
+ /* ??? This may incorrectly raise exceptions. */
+ /* ??? Should flush denormals to zero. */
+ float64 res;
+ res = float64_sub(a, b, &env->fp_status);
+ if (float64_is_quiet_nan(res, &env->fp_status)) {
+ /* +/-inf compares equal against itself, but sub returns nan. */
+ if (!float64_is_quiet_nan(a, &env->fp_status)
+ && !float64_is_quiet_nan(b, &env->fp_status)) {
+ res = float64_zero;
+ if (float64_lt_quiet(a, res, &env->fp_status))
+ res = float64_chs(res);
+ }
+ }
+ return res;
+}
+
+uint32_t HELPER(compare_f64)(CPUM68KState *env, float64 val)
+{
+ return float64_compare_quiet(val, float64_zero, &env->fp_status);
+}
+
+/* MAC unit. */
+/* FIXME: The MAC unit implementation is a bit of a mess. Some helpers
+ take values, others take register numbers and manipulate the contents
+ in-place. */
+void HELPER(mac_move)(CPUM68KState *env, uint32_t dest, uint32_t src)
+{
+ uint32_t mask;
+ env->macc[dest] = env->macc[src];
+ mask = MACSR_PAV0 << dest;
+ if (env->macsr & (MACSR_PAV0 << src))
+ env->macsr |= mask;
+ else
+ env->macsr &= ~mask;
+}
+
+uint64_t HELPER(macmuls)(CPUM68KState *env, uint32_t op1, uint32_t op2)
+{
+ int64_t product;
+ int64_t res;
+
+ product = (uint64_t)op1 * op2;
+ res = (product << 24) >> 24;
+ if (res != product) {
+ env->macsr |= MACSR_V;
+ if (env->macsr & MACSR_OMC) {
+ /* Make sure the accumulate operation overflows. */
+ if (product < 0)
+ res = ~(1ll << 50);
+ else
+ res = 1ll << 50;
+ }
+ }
+ return res;
+}
+
+uint64_t HELPER(macmulu)(CPUM68KState *env, uint32_t op1, uint32_t op2)
+{
+ uint64_t product;
+
+ product = (uint64_t)op1 * op2;
+ if (product & (0xffffffull << 40)) {
+ env->macsr |= MACSR_V;
+ if (env->macsr & MACSR_OMC) {
+ /* Make sure the accumulate operation overflows. */
+ product = 1ll << 50;
+ } else {
+ product &= ((1ull << 40) - 1);
+ }
+ }
+ return product;
+}
+
+uint64_t HELPER(macmulf)(CPUM68KState *env, uint32_t op1, uint32_t op2)
+{
+ uint64_t product;
+ uint32_t remainder;
+
+ product = (uint64_t)op1 * op2;
+ if (env->macsr & MACSR_RT) {
+ remainder = product & 0xffffff;
+ product >>= 24;
+ if (remainder > 0x800000)
+ product++;
+ else if (remainder == 0x800000)
+ product += (product & 1);
+ } else {
+ product >>= 24;
+ }
+ return product;
+}
+
+void HELPER(macsats)(CPUM68KState *env, uint32_t acc)
+{
+ int64_t tmp;
+ int64_t result;
+ tmp = env->macc[acc];
+ result = ((tmp << 16) >> 16);
+ if (result != tmp) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_V) {
+ env->macsr |= MACSR_PAV0 << acc;
+ if (env->macsr & MACSR_OMC) {
+ /* The result is saturated to 32 bits, despite overflow occurring
+ at 48 bits. Seems weird, but that's what the hardware docs
+ say. */
+ result = (result >> 63) ^ 0x7fffffff;
+ }
+ }
+ env->macc[acc] = result;
+}
+
+void HELPER(macsatu)(CPUM68KState *env, uint32_t acc)
+{
+ uint64_t val;
+
+ val = env->macc[acc];
+ if (val & (0xffffull << 48)) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_V) {
+ env->macsr |= MACSR_PAV0 << acc;
+ if (env->macsr & MACSR_OMC) {
+ if (val > (1ull << 53))
+ val = 0;
+ else
+ val = (1ull << 48) - 1;
+ } else {
+ val &= ((1ull << 48) - 1);
+ }
+ }
+ env->macc[acc] = val;
+}
+
+void HELPER(macsatf)(CPUM68KState *env, uint32_t acc)
+{
+ int64_t sum;
+ int64_t result;
+
+ sum = env->macc[acc];
+ result = (sum << 16) >> 16;
+ if (result != sum) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_V) {
+ env->macsr |= MACSR_PAV0 << acc;
+ if (env->macsr & MACSR_OMC) {
+ result = (result >> 63) ^ 0x7fffffffffffll;
+ }
+ }
+ env->macc[acc] = result;
+}
+
+void HELPER(mac_set_flags)(CPUM68KState *env, uint32_t acc)
+{
+ uint64_t val;
+ val = env->macc[acc];
+ if (val == 0) {
+ env->macsr |= MACSR_Z;
+ } else if (val & (1ull << 47)) {
+ env->macsr |= MACSR_N;
+ }
+ if (env->macsr & (MACSR_PAV0 << acc)) {
+ env->macsr |= MACSR_V;
+ }
+ if (env->macsr & MACSR_FI) {
+ val = ((int64_t)val) >> 40;
+ if (val != 0 && val != -1)
+ env->macsr |= MACSR_EV;
+ } else if (env->macsr & MACSR_SU) {
+ val = ((int64_t)val) >> 32;
+ if (val != 0 && val != -1)
+ env->macsr |= MACSR_EV;
+ } else {
+ if ((val >> 32) != 0)
+ env->macsr |= MACSR_EV;
+ }
+}
+
+#define EXTSIGN(val, index) ( \
+ (index == 0) ? (int8_t)(val) : ((index == 1) ? (int16_t)(val) : (val)) \
+)
+
+#define COMPUTE_CCR(op, x, n, z, v, c) { \
+ switch (op) { \
+ case CC_OP_FLAGS: \
+ /* Everything in place. */ \
+ break; \
+ case CC_OP_ADDB: \
+ case CC_OP_ADDW: \
+ case CC_OP_ADDL: \
+ res = n; \
+ src2 = v; \
+ src1 = EXTSIGN(res - src2, op - CC_OP_ADDB); \
+ c = x; \
+ z = n; \
+ v = (res ^ src1) & ~(src1 ^ src2); \
+ break; \
+ case CC_OP_SUBB: \
+ case CC_OP_SUBW: \
+ case CC_OP_SUBL: \
+ res = n; \
+ src2 = v; \
+ src1 = EXTSIGN(res + src2, op - CC_OP_SUBB); \
+ c = x; \
+ z = n; \
+ v = (res ^ src1) & (src1 ^ src2); \
+ break; \
+ case CC_OP_CMPB: \
+ case CC_OP_CMPW: \
+ case CC_OP_CMPL: \
+ src1 = n; \
+ src2 = v; \
+ res = EXTSIGN(src1 - src2, op - CC_OP_CMPB); \
+ n = res; \
+ z = res; \
+ c = src1 < src2; \
+ v = (res ^ src1) & (src1 ^ src2); \
+ break; \
+ case CC_OP_LOGIC: \
+ c = v = 0; \
+ z = n; \
+ break; \
+ default: \
+ cpu_abort(CPU(m68k_env_get_cpu(env)), "Bad CC_OP %d", op); \
+ } \
+} while (0)
+
+uint32_t cpu_m68k_get_ccr(CPUM68KState *env)
+{
+ uint32_t x, c, n, z, v;
+ uint32_t res, src1, src2;
+
+ x = env->cc_x;
+ n = env->cc_n;
+ z = env->cc_z;
+ v = env->cc_v;
+ c = env->cc_c;
+
+ COMPUTE_CCR(env->cc_op, x, n, z, v, c);
+
+ n = n >> 31;
+ z = (z == 0);
+ v = v >> 31;
+
+ return x * CCF_X + n * CCF_N + z * CCF_Z + v * CCF_V + c * CCF_C;
+}
+
+uint32_t HELPER(get_ccr)(CPUM68KState *env)
+{
+ return cpu_m68k_get_ccr(env);
+}
+
+void cpu_m68k_set_ccr(CPUM68KState *env, uint32_t ccr)
+{
+ env->cc_x = (ccr & CCF_X ? 1 : 0);
+ env->cc_n = (ccr & CCF_N ? -1 : 0);
+ env->cc_z = (ccr & CCF_Z ? 0 : 1);
+ env->cc_v = (ccr & CCF_V ? -1 : 0);
+ env->cc_c = (ccr & CCF_C ? 1 : 0);
+ env->cc_op = CC_OP_FLAGS;
+}
+
+void HELPER(set_ccr)(CPUM68KState *env, uint32_t ccr)
+{
+ cpu_m68k_set_ccr(env, ccr);
+}
+
+void HELPER(flush_flags)(CPUM68KState *env, uint32_t cc_op)
+{
+ uint32_t res, src1, src2;
+
+ COMPUTE_CCR(cc_op, env->cc_x, env->cc_n, env->cc_z, env->cc_v, env->cc_c);
+ env->cc_op = CC_OP_FLAGS;
+}
+
+uint32_t HELPER(get_macf)(CPUM68KState *env, uint64_t val)
+{
+ int rem;
+ uint32_t result;
+
+ if (env->macsr & MACSR_SU) {
+ /* 16-bit rounding. */
+ rem = val & 0xffffff;
+ val = (val >> 24) & 0xffffu;
+ if (rem > 0x800000)
+ val++;
+ else if (rem == 0x800000)
+ val += (val & 1);
+ } else if (env->macsr & MACSR_RT) {
+ /* 32-bit rounding. */
+ rem = val & 0xff;
+ val >>= 8;
+ if (rem > 0x80)
+ val++;
+ else if (rem == 0x80)
+ val += (val & 1);
+ } else {
+ /* No rounding. */
+ val >>= 8;
+ }
+ if (env->macsr & MACSR_OMC) {
+ /* Saturate. */
+ if (env->macsr & MACSR_SU) {
+ if (val != (uint16_t) val) {
+ result = ((val >> 63) ^ 0x7fff) & 0xffff;
+ } else {
+ result = val & 0xffff;
+ }
+ } else {
+ if (val != (uint32_t)val) {
+ result = ((uint32_t)(val >> 63) & 0x7fffffff);
+ } else {
+ result = (uint32_t)val;
+ }
+ }
+ } else {
+ /* No saturation. */
+ if (env->macsr & MACSR_SU) {
+ result = val & 0xffff;
+ } else {
+ result = (uint32_t)val;
+ }
+ }
+ return result;
+}
+
+uint32_t HELPER(get_macs)(uint64_t val)
+{
+ if (val == (int32_t)val) {
+ return (int32_t)val;
+ } else {
+ return (val >> 61) ^ ~SIGNBIT;
+ }
+}
+
+uint32_t HELPER(get_macu)(uint64_t val)
+{
+ if ((val >> 32) == 0) {
+ return (uint32_t)val;
+ } else {
+ return 0xffffffffu;
+ }
+}
+
+uint32_t HELPER(get_mac_extf)(CPUM68KState *env, uint32_t acc)
+{
+ uint32_t val;
+ val = env->macc[acc] & 0x00ff;
+ val |= (env->macc[acc] >> 32) & 0xff00;
+ val |= (env->macc[acc + 1] << 16) & 0x00ff0000;
+ val |= (env->macc[acc + 1] >> 16) & 0xff000000;
+ return val;
+}
+
+uint32_t HELPER(get_mac_exti)(CPUM68KState *env, uint32_t acc)
+{
+ uint32_t val;
+ val = (env->macc[acc] >> 32) & 0xffff;
+ val |= (env->macc[acc + 1] >> 16) & 0xffff0000;
+ return val;
+}
+
+void HELPER(set_mac_extf)(CPUM68KState *env, uint32_t val, uint32_t acc)
+{
+ int64_t res;
+ int32_t tmp;
+ res = env->macc[acc] & 0xffffffff00ull;
+ tmp = (int16_t)(val & 0xff00);
+ res |= ((int64_t)tmp) << 32;
+ res |= val & 0xff;
+ env->macc[acc] = res;
+ res = env->macc[acc + 1] & 0xffffffff00ull;
+ tmp = (val & 0xff000000);
+ res |= ((int64_t)tmp) << 16;
+ res |= (val >> 16) & 0xff;
+ env->macc[acc + 1] = res;
+}
+
+void HELPER(set_mac_exts)(CPUM68KState *env, uint32_t val, uint32_t acc)
+{
+ int64_t res;
+ int32_t tmp;
+ res = (uint32_t)env->macc[acc];
+ tmp = (int16_t)val;
+ res |= ((int64_t)tmp) << 32;
+ env->macc[acc] = res;
+ res = (uint32_t)env->macc[acc + 1];
+ tmp = val & 0xffff0000;
+ res |= (int64_t)tmp << 16;
+ env->macc[acc + 1] = res;
+}
+
+void HELPER(set_mac_extu)(CPUM68KState *env, uint32_t val, uint32_t acc)
+{
+ uint64_t res;
+ res = (uint32_t)env->macc[acc];
+ res |= ((uint64_t)(val & 0xffff)) << 32;
+ env->macc[acc] = res;
+ res = (uint32_t)env->macc[acc + 1];
+ res |= (uint64_t)(val & 0xffff0000) << 16;
+ env->macc[acc + 1] = res;
+}
diff --git a/target/m68k/helper.h b/target/m68k/helper.h
new file mode 100644
index 0000000000..2697e32d0b
--- /dev/null
+++ b/target/m68k/helper.h
@@ -0,0 +1,49 @@
+DEF_HELPER_1(bitrev, i32, i32)
+DEF_HELPER_1(ff1, i32, i32)
+DEF_HELPER_FLAGS_2(sats, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_2(divu, void, env, i32)
+DEF_HELPER_2(divs, void, env, i32)
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_2(set_sr, void, env, i32)
+DEF_HELPER_3(movec, void, env, i32, i32)
+
+DEF_HELPER_2(f64_to_i32, f32, env, f64)
+DEF_HELPER_2(f64_to_f32, f32, env, f64)
+DEF_HELPER_2(i32_to_f64, f64, env, i32)
+DEF_HELPER_2(f32_to_f64, f64, env, f32)
+DEF_HELPER_2(iround_f64, f64, env, f64)
+DEF_HELPER_2(itrunc_f64, f64, env, f64)
+DEF_HELPER_2(sqrt_f64, f64, env, f64)
+DEF_HELPER_1(abs_f64, f64, f64)
+DEF_HELPER_1(chs_f64, f64, f64)
+DEF_HELPER_3(add_f64, f64, env, f64, f64)
+DEF_HELPER_3(sub_f64, f64, env, f64, f64)
+DEF_HELPER_3(mul_f64, f64, env, f64, f64)
+DEF_HELPER_3(div_f64, f64, env, f64, f64)
+DEF_HELPER_3(sub_cmp_f64, f64, env, f64, f64)
+DEF_HELPER_2(compare_f64, i32, env, f64)
+
+DEF_HELPER_3(mac_move, void, env, i32, i32)
+DEF_HELPER_3(macmulf, i64, env, i32, i32)
+DEF_HELPER_3(macmuls, i64, env, i32, i32)
+DEF_HELPER_3(macmulu, i64, env, i32, i32)
+DEF_HELPER_2(macsats, void, env, i32)
+DEF_HELPER_2(macsatu, void, env, i32)
+DEF_HELPER_2(macsatf, void, env, i32)
+DEF_HELPER_2(mac_set_flags, void, env, i32)
+DEF_HELPER_2(set_macsr, void, env, i32)
+DEF_HELPER_2(get_macf, i32, env, i64)
+DEF_HELPER_1(get_macs, i32, i64)
+DEF_HELPER_1(get_macu, i32, i64)
+DEF_HELPER_2(get_mac_extf, i32, env, i32)
+DEF_HELPER_2(get_mac_exti, i32, env, i32)
+DEF_HELPER_3(set_mac_extf, void, env, i32, i32)
+DEF_HELPER_3(set_mac_exts, void, env, i32, i32)
+DEF_HELPER_3(set_mac_extu, void, env, i32, i32)
+
+DEF_HELPER_2(flush_flags, void, env, i32)
+DEF_HELPER_2(set_ccr, void, env, i32)
+DEF_HELPER_FLAGS_1(get_ccr, TCG_CALL_NO_WG_SE, i32, env)
+DEF_HELPER_2(raise_exception, void, env, i32)
diff --git a/target/m68k/m68k-semi.c b/target/m68k/m68k-semi.c
new file mode 100644
index 0000000000..1402145c8f
--- /dev/null
+++ b/target/m68k/m68k-semi.c
@@ -0,0 +1,462 @@
+/*
+ * m68k/ColdFire Semihosting syscall interface
+ *
+ * Copyright (c) 2005-2007 CodeSourcery.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#if defined(CONFIG_USER_ONLY)
+#include "qemu.h"
+#define SEMIHOSTING_HEAP_SIZE (128 * 1024 * 1024)
+#else
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "exec/softmmu-semi.h"
+#endif
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+
+#define HOSTED_EXIT 0
+#define HOSTED_INIT_SIM 1
+#define HOSTED_OPEN 2
+#define HOSTED_CLOSE 3
+#define HOSTED_READ 4
+#define HOSTED_WRITE 5
+#define HOSTED_LSEEK 6
+#define HOSTED_RENAME 7
+#define HOSTED_UNLINK 8
+#define HOSTED_STAT 9
+#define HOSTED_FSTAT 10
+#define HOSTED_GETTIMEOFDAY 11
+#define HOSTED_ISATTY 12
+#define HOSTED_SYSTEM 13
+
+typedef uint32_t gdb_mode_t;
+typedef uint32_t gdb_time_t;
+
+struct m68k_gdb_stat {
+ uint32_t gdb_st_dev; /* device */
+ uint32_t gdb_st_ino; /* inode */
+ gdb_mode_t gdb_st_mode; /* protection */
+ uint32_t gdb_st_nlink; /* number of hard links */
+ uint32_t gdb_st_uid; /* user ID of owner */
+ uint32_t gdb_st_gid; /* group ID of owner */
+ uint32_t gdb_st_rdev; /* device type (if inode device) */
+ uint64_t gdb_st_size; /* total size, in bytes */
+ uint64_t gdb_st_blksize; /* blocksize for filesystem I/O */
+ uint64_t gdb_st_blocks; /* number of blocks allocated */
+ gdb_time_t gdb_st_atime; /* time of last access */
+ gdb_time_t gdb_st_mtime; /* time of last modification */
+ gdb_time_t gdb_st_ctime; /* time of last change */
+} QEMU_PACKED;
+
+struct gdb_timeval {
+ gdb_time_t tv_sec; /* second */
+ uint64_t tv_usec; /* microsecond */
+} QEMU_PACKED;
+
+#define GDB_O_RDONLY 0x0
+#define GDB_O_WRONLY 0x1
+#define GDB_O_RDWR 0x2
+#define GDB_O_APPEND 0x8
+#define GDB_O_CREAT 0x200
+#define GDB_O_TRUNC 0x400
+#define GDB_O_EXCL 0x800
+
+static int translate_openflags(int flags)
+{
+ int hf;
+
+ if (flags & GDB_O_WRONLY)
+ hf = O_WRONLY;
+ else if (flags & GDB_O_RDWR)
+ hf = O_RDWR;
+ else
+ hf = O_RDONLY;
+
+ if (flags & GDB_O_APPEND) hf |= O_APPEND;
+ if (flags & GDB_O_CREAT) hf |= O_CREAT;
+ if (flags & GDB_O_TRUNC) hf |= O_TRUNC;
+ if (flags & GDB_O_EXCL) hf |= O_EXCL;
+
+ return hf;
+}
+
+static void translate_stat(CPUM68KState *env, target_ulong addr, struct stat *s)
+{
+ struct m68k_gdb_stat *p;
+
+ if (!(p = lock_user(VERIFY_WRITE, addr, sizeof(struct m68k_gdb_stat), 0)))
+ /* FIXME - should this return an error code? */
+ return;
+ p->gdb_st_dev = cpu_to_be32(s->st_dev);
+ p->gdb_st_ino = cpu_to_be32(s->st_ino);
+ p->gdb_st_mode = cpu_to_be32(s->st_mode);
+ p->gdb_st_nlink = cpu_to_be32(s->st_nlink);
+ p->gdb_st_uid = cpu_to_be32(s->st_uid);
+ p->gdb_st_gid = cpu_to_be32(s->st_gid);
+ p->gdb_st_rdev = cpu_to_be32(s->st_rdev);
+ p->gdb_st_size = cpu_to_be64(s->st_size);
+#ifdef _WIN32
+ /* Windows stat is missing some fields. */
+ p->gdb_st_blksize = 0;
+ p->gdb_st_blocks = 0;
+#else
+ p->gdb_st_blksize = cpu_to_be64(s->st_blksize);
+ p->gdb_st_blocks = cpu_to_be64(s->st_blocks);
+#endif
+ p->gdb_st_atime = cpu_to_be32(s->st_atime);
+ p->gdb_st_mtime = cpu_to_be32(s->st_mtime);
+ p->gdb_st_ctime = cpu_to_be32(s->st_ctime);
+ unlock_user(p, addr, sizeof(struct m68k_gdb_stat));
+}
+
+static void m68k_semi_return_u32(CPUM68KState *env, uint32_t ret, uint32_t err)
+{
+ target_ulong args = env->dregs[1];
+ if (put_user_u32(ret, args) ||
+ put_user_u32(err, args + 4)) {
+ /* The m68k semihosting ABI does not provide any way to report this
+ * error to the guest, so the best we can do is log it in qemu.
+ * It is always a guest error not to pass us a valid argument block.
+ */
+ qemu_log_mask(LOG_GUEST_ERROR, "m68k-semihosting: return value "
+ "discarded because argument block not writable\n");
+ }
+}
+
+static void m68k_semi_return_u64(CPUM68KState *env, uint64_t ret, uint32_t err)
+{
+ target_ulong args = env->dregs[1];
+ if (put_user_u32(ret >> 32, args) ||
+ put_user_u32(ret, args + 4) ||
+ put_user_u32(err, args + 8)) {
+ /* No way to report this via m68k semihosting ABI; just log it */
+ qemu_log_mask(LOG_GUEST_ERROR, "m68k-semihosting: return value "
+ "discarded because argument block not writable\n");
+ }
+}
+
+static int m68k_semi_is_fseek;
+
+static void m68k_semi_cb(CPUState *cs, target_ulong ret, target_ulong err)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ if (m68k_semi_is_fseek) {
+ /* FIXME: We've already lost the high bits of the fseek
+ return value. */
+ m68k_semi_return_u64(env, ret, err);
+ m68k_semi_is_fseek = 0;
+ } else {
+ m68k_semi_return_u32(env, ret, err);
+ }
+}
+
+/* Read the input value from the argument block; fail the semihosting
+ * call if the memory read fails.
+ */
+#define GET_ARG(n) do { \
+ if (get_user_ual(arg ## n, args + (n) * 4)) { \
+ result = -1; \
+ errno = EFAULT; \
+ goto failed; \
+ } \
+} while (0)
+
+void do_m68k_semihosting(CPUM68KState *env, int nr)
+{
+ uint32_t args;
+ target_ulong arg0, arg1, arg2, arg3;
+ void *p;
+ void *q;
+ uint32_t len;
+ uint32_t result;
+
+ args = env->dregs[1];
+ switch (nr) {
+ case HOSTED_EXIT:
+ gdb_exit(env, env->dregs[0]);
+ exit(env->dregs[0]);
+ case HOSTED_OPEN:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "open,%s,%x,%x", arg0, (int)arg1,
+ arg2, arg3);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = open(p, translate_openflags(arg2), arg3);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_CLOSE:
+ {
+ /* Ignore attempts to close stdin/out/err. */
+ GET_ARG(0);
+ int fd = arg0;
+ if (fd > 2) {
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "close,%x", arg0);
+ return;
+ } else {
+ result = close(fd);
+ }
+ } else {
+ result = 0;
+ }
+ break;
+ }
+ case HOSTED_READ:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "read,%x,%x,%x",
+ arg0, arg1, len);
+ return;
+ } else {
+ p = lock_user(VERIFY_WRITE, arg1, len, 0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = read(arg0, p, len);
+ unlock_user(p, arg1, len);
+ }
+ }
+ break;
+ case HOSTED_WRITE:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ len = arg2;
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "write,%x,%x,%x",
+ arg0, arg1, len);
+ return;
+ } else {
+ p = lock_user(VERIFY_READ, arg1, len, 1);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = write(arg0, p, len);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_LSEEK:
+ {
+ uint64_t off;
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ off = (uint32_t)arg2 | ((uint64_t)arg1 << 32);
+ if (use_gdb_syscalls()) {
+ m68k_semi_is_fseek = 1;
+ gdb_do_syscall(m68k_semi_cb, "fseek,%x,%lx,%x",
+ arg0, off, arg3);
+ } else {
+ off = lseek(arg0, off, arg3);
+ m68k_semi_return_u64(env, off, errno);
+ }
+ return;
+ }
+ case HOSTED_RENAME:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ GET_ARG(3);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "rename,%s,%s",
+ arg0, (int)arg1, arg2, (int)arg3);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ q = lock_user_string(arg2);
+ if (!p || !q) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = rename(p, q);
+ }
+ unlock_user(p, arg0, 0);
+ unlock_user(q, arg2, 0);
+ }
+ break;
+ case HOSTED_UNLINK:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "unlink,%s",
+ arg0, (int)arg1);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = unlink(p);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_STAT:
+ GET_ARG(0);
+ GET_ARG(1);
+ GET_ARG(2);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "stat,%s,%x",
+ arg0, (int)arg1, arg2);
+ return;
+ } else {
+ struct stat s;
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = stat(p, &s);
+ unlock_user(p, arg0, 0);
+ }
+ if (result == 0) {
+ translate_stat(env, arg2, &s);
+ }
+ }
+ break;
+ case HOSTED_FSTAT:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "fstat,%x,%x",
+ arg0, arg1);
+ return;
+ } else {
+ struct stat s;
+ result = fstat(arg0, &s);
+ if (result == 0) {
+ translate_stat(env, arg1, &s);
+ }
+ }
+ break;
+ case HOSTED_GETTIMEOFDAY:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "gettimeofday,%x,%x",
+ arg0, arg1);
+ return;
+ } else {
+ qemu_timeval tv;
+ struct gdb_timeval *p;
+ result = qemu_gettimeofday(&tv);
+ if (result != 0) {
+ if (!(p = lock_user(VERIFY_WRITE,
+ arg0, sizeof(struct gdb_timeval), 0))) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ p->tv_sec = cpu_to_be32(tv.tv_sec);
+ p->tv_usec = cpu_to_be64(tv.tv_usec);
+ unlock_user(p, arg0, sizeof(struct gdb_timeval));
+ }
+ }
+ }
+ break;
+ case HOSTED_ISATTY:
+ GET_ARG(0);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "isatty,%x", arg0);
+ return;
+ } else {
+ result = isatty(arg0);
+ }
+ break;
+ case HOSTED_SYSTEM:
+ GET_ARG(0);
+ GET_ARG(1);
+ if (use_gdb_syscalls()) {
+ gdb_do_syscall(m68k_semi_cb, "system,%s",
+ arg0, (int)arg1);
+ return;
+ } else {
+ p = lock_user_string(arg0);
+ if (!p) {
+ /* FIXME - check error code? */
+ result = -1;
+ } else {
+ result = system(p);
+ unlock_user(p, arg0, 0);
+ }
+ }
+ break;
+ case HOSTED_INIT_SIM:
+#if defined(CONFIG_USER_ONLY)
+ {
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+ TaskState *ts = cs->opaque;
+ /* Allocate the heap using sbrk. */
+ if (!ts->heap_limit) {
+ abi_ulong ret;
+ uint32_t size;
+ uint32_t base;
+
+ base = do_brk(0);
+ size = SEMIHOSTING_HEAP_SIZE;
+ /* Try a big heap, and reduce the size if that fails. */
+ for (;;) {
+ ret = do_brk(base + size);
+ if (ret >= (base + size)) {
+ break;
+ }
+ size >>= 1;
+ }
+ ts->heap_limit = base + size;
+ }
+ /* This call may happen before we have writable memory, so return
+ values directly in registers. */
+ env->dregs[1] = ts->heap_limit;
+ env->aregs[7] = ts->stack_base;
+ }
+#else
+ /* FIXME: This is wrong for boards where RAM does not start at
+ address zero. */
+ env->dregs[1] = ram_size;
+ env->aregs[7] = ram_size;
+#endif
+ return;
+ default:
+ cpu_abort(CPU(m68k_env_get_cpu(env)), "Unsupported semihosting syscall %d\n", nr);
+ result = 0;
+ }
+failed:
+ m68k_semi_return_u32(env, result, errno);
+}
diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c
new file mode 100644
index 0000000000..48e02e4062
--- /dev/null
+++ b/target/m68k/op_helper.c
@@ -0,0 +1,229 @@
+/*
+ * M68K helper routines
+ *
+ * Copyright (c) 2007 CodeSourcery
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/semihost.h"
+
+#if defined(CONFIG_USER_ONLY)
+
+void m68k_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
+{
+}
+
+#else
+
+/* Try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = m68k_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+
+static void do_rte(CPUM68KState *env)
+{
+ uint32_t sp;
+ uint32_t fmt;
+
+ sp = env->aregs[7];
+ fmt = cpu_ldl_kernel(env, sp);
+ env->pc = cpu_ldl_kernel(env, sp + 4);
+ sp |= (fmt >> 28) & 3;
+ env->aregs[7] = sp + 8;
+
+ helper_set_sr(env, fmt);
+}
+
+static void do_interrupt_all(CPUM68KState *env, int is_hw)
+{
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+ uint32_t sp;
+ uint32_t fmt;
+ uint32_t retaddr;
+ uint32_t vector;
+
+ fmt = 0;
+ retaddr = env->pc;
+
+ if (!is_hw) {
+ switch (cs->exception_index) {
+ case EXCP_RTE:
+ /* Return from an exception. */
+ do_rte(env);
+ return;
+ case EXCP_HALT_INSN:
+ if (semihosting_enabled()
+ && (env->sr & SR_S) != 0
+ && (env->pc & 3) == 0
+ && cpu_lduw_code(env, env->pc - 4) == 0x4e71
+ && cpu_ldl_code(env, env->pc) == 0x4e7bf000) {
+ env->pc += 4;
+ do_m68k_semihosting(env, env->dregs[0]);
+ return;
+ }
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ cpu_loop_exit(cs);
+ return;
+ }
+ if (cs->exception_index >= EXCP_TRAP0
+ && cs->exception_index <= EXCP_TRAP15) {
+ /* Move the PC after the trap instruction. */
+ retaddr += 2;
+ }
+ }
+
+ vector = cs->exception_index << 2;
+
+ fmt |= 0x40000000;
+ fmt |= vector << 16;
+ fmt |= env->sr;
+ fmt |= cpu_m68k_get_ccr(env);
+
+ env->sr |= SR_S;
+ if (is_hw) {
+ env->sr = (env->sr & ~SR_I) | (env->pending_level << SR_I_SHIFT);
+ env->sr &= ~SR_M;
+ }
+ m68k_switch_sp(env);
+ sp = env->aregs[7];
+ fmt |= (sp & 3) << 28;
+
+ /* ??? This could cause MMU faults. */
+ sp &= ~3;
+ sp -= 4;
+ cpu_stl_kernel(env, sp, retaddr);
+ sp -= 4;
+ cpu_stl_kernel(env, sp, fmt);
+ env->aregs[7] = sp;
+ /* Jump to vector. */
+ env->pc = cpu_ldl_kernel(env, env->vbr + vector);
+}
+
+void m68k_cpu_do_interrupt(CPUState *cs)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ do_interrupt_all(env, 0);
+}
+
+static inline void do_interrupt_m68k_hardirq(CPUM68KState *env)
+{
+ do_interrupt_all(env, 1);
+}
+#endif
+
+bool m68k_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+
+ if (interrupt_request & CPU_INTERRUPT_HARD
+ && ((env->sr & SR_I) >> SR_I_SHIFT) < env->pending_level) {
+ /* Real hardware gets the interrupt vector via an IACK cycle
+ at this point. Current emulated hardware doesn't rely on
+ this, so we provide/save the vector when the interrupt is
+ first signalled. */
+ cs->exception_index = env->pending_vector;
+ do_interrupt_m68k_hardirq(env);
+ return true;
+ }
+ return false;
+}
+
+static void raise_exception(CPUM68KState *env, int tt)
+{
+ CPUState *cs = CPU(m68k_env_get_cpu(env));
+
+ cs->exception_index = tt;
+ cpu_loop_exit(cs);
+}
+
+void HELPER(raise_exception)(CPUM68KState *env, uint32_t tt)
+{
+ raise_exception(env, tt);
+}
+
+void HELPER(divu)(CPUM68KState *env, uint32_t word)
+{
+ uint32_t num;
+ uint32_t den;
+ uint32_t quot;
+ uint32_t rem;
+
+ num = env->div1;
+ den = env->div2;
+ /* ??? This needs to make sure the throwing location is accurate. */
+ if (den == 0) {
+ raise_exception(env, EXCP_DIV0);
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_v = (word && quot > 0xffff ? -1 : 0);
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_c = 0;
+
+ env->div1 = quot;
+ env->div2 = rem;
+}
+
+void HELPER(divs)(CPUM68KState *env, uint32_t word)
+{
+ int32_t num;
+ int32_t den;
+ int32_t quot;
+ int32_t rem;
+
+ num = env->div1;
+ den = env->div2;
+ if (den == 0) {
+ raise_exception(env, EXCP_DIV0);
+ }
+ quot = num / den;
+ rem = num % den;
+
+ env->cc_v = (word && quot != (int16_t)quot ? -1 : 0);
+ env->cc_z = quot;
+ env->cc_n = quot;
+ env->cc_c = 0;
+
+ env->div1 = quot;
+ env->div2 = rem;
+}
diff --git a/target/m68k/qregs.def b/target/m68k/qregs.def
new file mode 100644
index 0000000000..156c0f558f
--- /dev/null
+++ b/target/m68k/qregs.def
@@ -0,0 +1,13 @@
+DEFF64(FP_RESULT, fp_result)
+DEFO32(PC, pc)
+DEFO32(SR, sr)
+DEFO32(CC_OP, cc_op)
+DEFO32(CC_X, cc_x)
+DEFO32(CC_C, cc_c)
+DEFO32(CC_N, cc_n)
+DEFO32(CC_V, cc_v)
+DEFO32(CC_Z, cc_z)
+DEFO32(DIV1, div1)
+DEFO32(DIV2, div2)
+DEFO32(MACSR, macsr)
+DEFO32(MAC_MASK, mac_mask)
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
new file mode 100644
index 0000000000..d6ed883882
--- /dev/null
+++ b/target/m68k/translate.c
@@ -0,0 +1,3595 @@
+/*
+ * m68k translation
+ *
+ * Copyright (c) 2005-2007 CodeSourcery
+ * Written by Paul Brook
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+//#define DEBUG_DISPATCH 1
+
+/* Fake floating point. */
+#define tcg_gen_mov_f64 tcg_gen_mov_i64
+#define tcg_gen_qemu_ldf64 tcg_gen_qemu_ld64
+#define tcg_gen_qemu_stf64 tcg_gen_qemu_st64
+
+#define DEFO32(name, offset) static TCGv QREG_##name;
+#define DEFO64(name, offset) static TCGv_i64 QREG_##name;
+#define DEFF64(name, offset) static TCGv_i64 QREG_##name;
+#include "qregs.def"
+#undef DEFO32
+#undef DEFO64
+#undef DEFF64
+
+static TCGv_i32 cpu_halted;
+static TCGv_i32 cpu_exception_index;
+
+static TCGv_env cpu_env;
+
+static char cpu_reg_names[3*8*3 + 5*4];
+static TCGv cpu_dregs[8];
+static TCGv cpu_aregs[8];
+static TCGv_i64 cpu_fregs[8];
+static TCGv_i64 cpu_macc[4];
+
+#define REG(insn, pos) (((insn) >> (pos)) & 7)
+#define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
+#define AREG(insn, pos) cpu_aregs[REG(insn, pos)]
+#define FREG(insn, pos) cpu_fregs[REG(insn, pos)]
+#define MACREG(acc) cpu_macc[acc]
+#define QREG_SP cpu_aregs[7]
+
+static TCGv NULL_QREG;
+#define IS_NULL_QREG(t) (TCGV_EQUAL(t, NULL_QREG))
+/* Used to distinguish stores from bad addressing modes. */
+static TCGv store_dummy;
+
+#include "exec/gen-icount.h"
+
+void m68k_tcg_init(void)
+{
+ char *p;
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+#define DEFO32(name, offset) \
+ QREG_##name = tcg_global_mem_new_i32(cpu_env, \
+ offsetof(CPUM68KState, offset), #name);
+#define DEFO64(name, offset) \
+ QREG_##name = tcg_global_mem_new_i64(cpu_env, \
+ offsetof(CPUM68KState, offset), #name);
+#define DEFF64(name, offset) DEFO64(name, offset)
+#include "qregs.def"
+#undef DEFO32
+#undef DEFO64
+#undef DEFF64
+
+ cpu_halted = tcg_global_mem_new_i32(cpu_env,
+ -offsetof(M68kCPU, env) +
+ offsetof(CPUState, halted), "HALTED");
+ cpu_exception_index = tcg_global_mem_new_i32(cpu_env,
+ -offsetof(M68kCPU, env) +
+ offsetof(CPUState, exception_index),
+ "EXCEPTION");
+
+ p = cpu_reg_names;
+ for (i = 0; i < 8; i++) {
+ sprintf(p, "D%d", i);
+ cpu_dregs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUM68KState, dregs[i]), p);
+ p += 3;
+ sprintf(p, "A%d", i);
+ cpu_aregs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUM68KState, aregs[i]), p);
+ p += 3;
+ sprintf(p, "F%d", i);
+ cpu_fregs[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUM68KState, fregs[i]), p);
+ p += 3;
+ }
+ for (i = 0; i < 4; i++) {
+ sprintf(p, "ACC%d", i);
+ cpu_macc[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUM68KState, macc[i]), p);
+ p += 5;
+ }
+
+ NULL_QREG = tcg_global_mem_new(cpu_env, -4, "NULL");
+ store_dummy = tcg_global_mem_new(cpu_env, -8, "NULL");
+}
+
+/* internal defines */
+typedef struct DisasContext {
+ CPUM68KState *env;
+ target_ulong insn_pc; /* Start of the current instruction. */
+ target_ulong pc;
+ int is_jmp;
+ CCOp cc_op; /* Current CC operation */
+ int cc_op_synced;
+ int user;
+ uint32_t fpcr;
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+ TCGv_i64 mactmp;
+ int done_mac;
+} DisasContext;
+
+#define DISAS_JUMP_NEXT 4
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(s) 1
+#else
+#define IS_USER(s) s->user
+#endif
+
+/* XXX: move that elsewhere */
+/* ??? Fix exceptions. */
+static void *gen_throws_exception;
+#define gen_last_qop NULL
+
+typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
+
+#ifdef DEBUG_DISPATCH
+#define DISAS_INSN(name) \
+ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn); \
+ static void disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn) \
+ { \
+ qemu_log("Dispatch " #name "\n"); \
+ real_disas_##name(env, s, insn); \
+ } \
+ static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn)
+#else
+#define DISAS_INSN(name) \
+ static void disas_##name(CPUM68KState *env, DisasContext *s, \
+ uint16_t insn)
+#endif
+
+static const uint8_t cc_op_live[CC_OP_NB] = {
+ [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
+ [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
+ [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
+ [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
+ [CC_OP_LOGIC] = CCF_X | CCF_N
+};
+
+static void set_cc_op(DisasContext *s, CCOp op)
+{
+ CCOp old_op = s->cc_op;
+ int dead;
+
+ if (old_op == op) {
+ return;
+ }
+ s->cc_op = op;
+ s->cc_op_synced = 0;
+
+ /* Discard CC computation that will no longer be used.
+ Note that X and N are never dead. */
+ dead = cc_op_live[old_op] & ~cc_op_live[op];
+ if (dead & CCF_C) {
+ tcg_gen_discard_i32(QREG_CC_C);
+ }
+ if (dead & CCF_Z) {
+ tcg_gen_discard_i32(QREG_CC_Z);
+ }
+ if (dead & CCF_V) {
+ tcg_gen_discard_i32(QREG_CC_V);
+ }
+}
+
+/* Update the CPU env CC_OP state. */
+static void update_cc_op(DisasContext *s)
+{
+ if (!s->cc_op_synced) {
+ s->cc_op_synced = 1;
+ tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
+ }
+}
+
+/* Generate a load from the specified address. Narrow values are
+ sign extended to full register width. */
+static inline TCGv gen_load(DisasContext * s, int opsize, TCGv addr, int sign)
+{
+ TCGv tmp;
+ int index = IS_USER(s);
+ tmp = tcg_temp_new_i32();
+ switch(opsize) {
+ case OS_BYTE:
+ if (sign)
+ tcg_gen_qemu_ld8s(tmp, addr, index);
+ else
+ tcg_gen_qemu_ld8u(tmp, addr, index);
+ break;
+ case OS_WORD:
+ if (sign)
+ tcg_gen_qemu_ld16s(tmp, addr, index);
+ else
+ tcg_gen_qemu_ld16u(tmp, addr, index);
+ break;
+ case OS_LONG:
+ case OS_SINGLE:
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ gen_throws_exception = gen_last_qop;
+ return tmp;
+}
+
+static inline TCGv_i64 gen_load64(DisasContext * s, TCGv addr)
+{
+ TCGv_i64 tmp;
+ int index = IS_USER(s);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_qemu_ldf64(tmp, addr, index);
+ gen_throws_exception = gen_last_qop;
+ return tmp;
+}
+
+/* Generate a store. */
+static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val)
+{
+ int index = IS_USER(s);
+ switch(opsize) {
+ case OS_BYTE:
+ tcg_gen_qemu_st8(val, addr, index);
+ break;
+ case OS_WORD:
+ tcg_gen_qemu_st16(val, addr, index);
+ break;
+ case OS_LONG:
+ case OS_SINGLE:
+ tcg_gen_qemu_st32(val, addr, index);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ gen_throws_exception = gen_last_qop;
+}
+
+static inline void gen_store64(DisasContext *s, TCGv addr, TCGv_i64 val)
+{
+ int index = IS_USER(s);
+ tcg_gen_qemu_stf64(val, addr, index);
+ gen_throws_exception = gen_last_qop;
+}
+
+typedef enum {
+ EA_STORE,
+ EA_LOADU,
+ EA_LOADS
+} ea_what;
+
+/* Generate an unsigned load if VAL is 0 a signed load if val is -1,
+ otherwise generate a store. */
+static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
+ ea_what what)
+{
+ if (what == EA_STORE) {
+ gen_store(s, opsize, addr, val);
+ return store_dummy;
+ } else {
+ return gen_load(s, opsize, addr, what == EA_LOADS);
+ }
+}
+
+/* Read a 16-bit immediate constant */
+static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
+{
+ uint16_t im;
+ im = cpu_lduw_code(env, s->pc);
+ s->pc += 2;
+ return im;
+}
+
+/* Read an 8-bit immediate constant */
+static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
+{
+ return read_im16(env, s);
+}
+
+/* Read a 32-bit immediate constant. */
+static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
+{
+ uint32_t im;
+ im = read_im16(env, s) << 16;
+ im |= 0xffff & read_im16(env, s);
+ return im;
+}
+
+/* Calculate and address index. */
+static TCGv gen_addr_index(uint16_t ext, TCGv tmp)
+{
+ TCGv add;
+ int scale;
+
+ add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
+ if ((ext & 0x800) == 0) {
+ tcg_gen_ext16s_i32(tmp, add);
+ add = tmp;
+ }
+ scale = (ext >> 9) & 3;
+ if (scale != 0) {
+ tcg_gen_shli_i32(tmp, add, scale);
+ add = tmp;
+ }
+ return add;
+}
+
+/* Handle a base + index + displacement effective addresss.
+ A NULL_QREG base means pc-relative. */
+static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
+{
+ uint32_t offset;
+ uint16_t ext;
+ TCGv add;
+ TCGv tmp;
+ uint32_t bd, od;
+
+ offset = s->pc;
+ ext = read_im16(env, s);
+
+ if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
+ return NULL_QREG;
+
+ if (m68k_feature(s->env, M68K_FEATURE_M68000) &&
+ !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
+ ext &= ~(3 << 9);
+ }
+
+ if (ext & 0x100) {
+ /* full extension word format */
+ if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
+ return NULL_QREG;
+
+ if ((ext & 0x30) > 0x10) {
+ /* base displacement */
+ if ((ext & 0x30) == 0x20) {
+ bd = (int16_t)read_im16(env, s);
+ } else {
+ bd = read_im32(env, s);
+ }
+ } else {
+ bd = 0;
+ }
+ tmp = tcg_temp_new();
+ if ((ext & 0x44) == 0) {
+ /* pre-index */
+ add = gen_addr_index(ext, tmp);
+ } else {
+ add = NULL_QREG;
+ }
+ if ((ext & 0x80) == 0) {
+ /* base not suppressed */
+ if (IS_NULL_QREG(base)) {
+ base = tcg_const_i32(offset + bd);
+ bd = 0;
+ }
+ if (!IS_NULL_QREG(add)) {
+ tcg_gen_add_i32(tmp, add, base);
+ add = tmp;
+ } else {
+ add = base;
+ }
+ }
+ if (!IS_NULL_QREG(add)) {
+ if (bd != 0) {
+ tcg_gen_addi_i32(tmp, add, bd);
+ add = tmp;
+ }
+ } else {
+ add = tcg_const_i32(bd);
+ }
+ if ((ext & 3) != 0) {
+ /* memory indirect */
+ base = gen_load(s, OS_LONG, add, 0);
+ if ((ext & 0x44) == 4) {
+ add = gen_addr_index(ext, tmp);
+ tcg_gen_add_i32(tmp, add, base);
+ add = tmp;
+ } else {
+ add = base;
+ }
+ if ((ext & 3) > 1) {
+ /* outer displacement */
+ if ((ext & 3) == 2) {
+ od = (int16_t)read_im16(env, s);
+ } else {
+ od = read_im32(env, s);
+ }
+ } else {
+ od = 0;
+ }
+ if (od != 0) {
+ tcg_gen_addi_i32(tmp, add, od);
+ add = tmp;
+ }
+ }
+ } else {
+ /* brief extension word format */
+ tmp = tcg_temp_new();
+ add = gen_addr_index(ext, tmp);
+ if (!IS_NULL_QREG(base)) {
+ tcg_gen_add_i32(tmp, add, base);
+ if ((int8_t)ext)
+ tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
+ } else {
+ tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
+ }
+ add = tmp;
+ }
+ return add;
+}
+
+/* Sign or zero extend a value. */
+
+static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
+{
+ switch (opsize) {
+ case OS_BYTE:
+ if (sign) {
+ tcg_gen_ext8s_i32(res, val);
+ } else {
+ tcg_gen_ext8u_i32(res, val);
+ }
+ break;
+ case OS_WORD:
+ if (sign) {
+ tcg_gen_ext16s_i32(res, val);
+ } else {
+ tcg_gen_ext16u_i32(res, val);
+ }
+ break;
+ case OS_LONG:
+ tcg_gen_mov_i32(res, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Evaluate all the CC flags. */
+
+static void gen_flush_flags(DisasContext *s)
+{
+ TCGv t0, t1;
+
+ switch (s->cc_op) {
+ case CC_OP_FLAGS:
+ return;
+
+ case CC_OP_ADDB:
+ case CC_OP_ADDW:
+ case CC_OP_ADDL:
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ /* Compute signed overflow for addition. */
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
+ gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
+ tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
+ tcg_temp_free(t1);
+ break;
+
+ case CC_OP_SUBB:
+ case CC_OP_SUBW:
+ case CC_OP_SUBL:
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ /* Compute signed overflow for subtraction. */
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
+ gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
+ tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
+ tcg_temp_free(t1);
+ break;
+
+ case CC_OP_CMPB:
+ case CC_OP_CMPW:
+ case CC_OP_CMPL:
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
+ tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
+ gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
+ /* Compute signed overflow for subtraction. */
+ t0 = tcg_temp_new();
+ tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
+ tcg_temp_free(t0);
+ tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
+ break;
+
+ case CC_OP_LOGIC:
+ tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
+ tcg_gen_movi_i32(QREG_CC_C, 0);
+ tcg_gen_movi_i32(QREG_CC_V, 0);
+ break;
+
+ case CC_OP_DYNAMIC:
+ gen_helper_flush_flags(cpu_env, QREG_CC_OP);
+ break;
+
+ default:
+ t0 = tcg_const_i32(s->cc_op);
+ gen_helper_flush_flags(cpu_env, t0);
+ tcg_temp_free(t0);
+ break;
+ }
+
+ /* Note that flush_flags also assigned to env->cc_op. */
+ s->cc_op = CC_OP_FLAGS;
+ s->cc_op_synced = 1;
+}
+
+static inline TCGv gen_extend(TCGv val, int opsize, int sign)
+{
+ TCGv tmp;
+
+ if (opsize == OS_LONG) {
+ tmp = val;
+ } else {
+ tmp = tcg_temp_new();
+ gen_ext(tmp, val, opsize, sign);
+ }
+
+ return tmp;
+}
+
+static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
+{
+ gen_ext(QREG_CC_N, val, opsize, 1);
+ set_cc_op(s, CC_OP_LOGIC);
+}
+
+static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
+{
+ tcg_gen_mov_i32(QREG_CC_N, dest);
+ tcg_gen_mov_i32(QREG_CC_V, src);
+ set_cc_op(s, CC_OP_CMPB + opsize);
+}
+
+static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
+{
+ gen_ext(QREG_CC_N, dest, opsize, 1);
+ tcg_gen_mov_i32(QREG_CC_V, src);
+}
+
+static inline int opsize_bytes(int opsize)
+{
+ switch (opsize) {
+ case OS_BYTE: return 1;
+ case OS_WORD: return 2;
+ case OS_LONG: return 4;
+ case OS_SINGLE: return 4;
+ case OS_DOUBLE: return 8;
+ case OS_EXTENDED: return 12;
+ case OS_PACKED: return 12;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+static inline int insn_opsize(int insn)
+{
+ switch ((insn >> 6) & 3) {
+ case 0: return OS_BYTE;
+ case 1: return OS_WORD;
+ case 2: return OS_LONG;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Assign value to a register. If the width is less than the register width
+ only the low part of the register is set. */
+static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
+{
+ TCGv tmp;
+ switch (opsize) {
+ case OS_BYTE:
+ tcg_gen_andi_i32(reg, reg, 0xffffff00);
+ tmp = tcg_temp_new();
+ tcg_gen_ext8u_i32(tmp, val);
+ tcg_gen_or_i32(reg, reg, tmp);
+ break;
+ case OS_WORD:
+ tcg_gen_andi_i32(reg, reg, 0xffff0000);
+ tmp = tcg_temp_new();
+ tcg_gen_ext16u_i32(tmp, val);
+ tcg_gen_or_i32(reg, reg, tmp);
+ break;
+ case OS_LONG:
+ case OS_SINGLE:
+ tcg_gen_mov_i32(reg, val);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/* Generate code for an "effective address". Does not adjust the base
+ register for autoincrement addressing modes. */
+static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize)
+{
+ TCGv reg;
+ TCGv tmp;
+ uint16_t ext;
+ uint32_t offset;
+
+ switch ((insn >> 3) & 7) {
+ case 0: /* Data register direct. */
+ case 1: /* Address register direct. */
+ return NULL_QREG;
+ case 2: /* Indirect register */
+ case 3: /* Indirect postincrement. */
+ return AREG(insn, 0);
+ case 4: /* Indirect predecrememnt. */
+ reg = AREG(insn, 0);
+ tmp = tcg_temp_new();
+ tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
+ return tmp;
+ case 5: /* Indirect displacement. */
+ reg = AREG(insn, 0);
+ tmp = tcg_temp_new();
+ ext = read_im16(env, s);
+ tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
+ return tmp;
+ case 6: /* Indirect index + displacement. */
+ reg = AREG(insn, 0);
+ return gen_lea_indexed(env, s, reg);
+ case 7: /* Other */
+ switch (insn & 7) {
+ case 0: /* Absolute short. */
+ offset = (int16_t)read_im16(env, s);
+ return tcg_const_i32(offset);
+ case 1: /* Absolute long. */
+ offset = read_im32(env, s);
+ return tcg_const_i32(offset);
+ case 2: /* pc displacement */
+ offset = s->pc;
+ offset += (int16_t)read_im16(env, s);
+ return tcg_const_i32(offset);
+ case 3: /* pc index+displacement. */
+ return gen_lea_indexed(env, s, NULL_QREG);
+ case 4: /* Immediate. */
+ default:
+ return NULL_QREG;
+ }
+ }
+ /* Should never happen. */
+ return NULL_QREG;
+}
+
+/* Helper function for gen_ea. Reuse the computed address between the
+ for read/write operands. */
+static inline TCGv gen_ea_once(CPUM68KState *env, DisasContext *s,
+ uint16_t insn, int opsize, TCGv val,
+ TCGv *addrp, ea_what what)
+{
+ TCGv tmp;
+
+ if (addrp && what == EA_STORE) {
+ tmp = *addrp;
+ } else {
+ tmp = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(tmp))
+ return tmp;
+ if (addrp)
+ *addrp = tmp;
+ }
+ return gen_ldst(s, opsize, tmp, val, what);
+}
+
+/* Generate code to load/store a value from/into an EA. If VAL > 0 this is
+ a write otherwise it is a read (0 == sign extend, -1 == zero extend).
+ ADDRP is non-null for readwrite operands. */
+static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int opsize, TCGv val, TCGv *addrp, ea_what what)
+{
+ TCGv reg;
+ TCGv result;
+ uint32_t offset;
+
+ switch ((insn >> 3) & 7) {
+ case 0: /* Data register direct. */
+ reg = DREG(insn, 0);
+ if (what == EA_STORE) {
+ gen_partset_reg(opsize, reg, val);
+ return store_dummy;
+ } else {
+ return gen_extend(reg, opsize, what == EA_LOADS);
+ }
+ case 1: /* Address register direct. */
+ reg = AREG(insn, 0);
+ if (what == EA_STORE) {
+ tcg_gen_mov_i32(reg, val);
+ return store_dummy;
+ } else {
+ return gen_extend(reg, opsize, what == EA_LOADS);
+ }
+ case 2: /* Indirect register */
+ reg = AREG(insn, 0);
+ return gen_ldst(s, opsize, reg, val, what);
+ case 3: /* Indirect postincrement. */
+ reg = AREG(insn, 0);
+ result = gen_ldst(s, opsize, reg, val, what);
+ /* ??? This is not exception safe. The instruction may still
+ fault after this point. */
+ if (what == EA_STORE || !addrp)
+ tcg_gen_addi_i32(reg, reg, opsize_bytes(opsize));
+ return result;
+ case 4: /* Indirect predecrememnt. */
+ {
+ TCGv tmp;
+ if (addrp && what == EA_STORE) {
+ tmp = *addrp;
+ } else {
+ tmp = gen_lea(env, s, insn, opsize);
+ if (IS_NULL_QREG(tmp))
+ return tmp;
+ if (addrp)
+ *addrp = tmp;
+ }
+ result = gen_ldst(s, opsize, tmp, val, what);
+ /* ??? This is not exception safe. The instruction may still
+ fault after this point. */
+ if (what == EA_STORE || !addrp) {
+ reg = AREG(insn, 0);
+ tcg_gen_mov_i32(reg, tmp);
+ }
+ }
+ return result;
+ case 5: /* Indirect displacement. */
+ case 6: /* Indirect index + displacement. */
+ return gen_ea_once(env, s, insn, opsize, val, addrp, what);
+ case 7: /* Other */
+ switch (insn & 7) {
+ case 0: /* Absolute short. */
+ case 1: /* Absolute long. */
+ case 2: /* pc displacement */
+ case 3: /* pc index+displacement. */
+ return gen_ea_once(env, s, insn, opsize, val, addrp, what);
+ case 4: /* Immediate. */
+ /* Sign extend values for consistency. */
+ switch (opsize) {
+ case OS_BYTE:
+ if (what == EA_LOADS) {
+ offset = (int8_t)read_im8(env, s);
+ } else {
+ offset = read_im8(env, s);
+ }
+ break;
+ case OS_WORD:
+ if (what == EA_LOADS) {
+ offset = (int16_t)read_im16(env, s);
+ } else {
+ offset = read_im16(env, s);
+ }
+ break;
+ case OS_LONG:
+ offset = read_im32(env, s);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return tcg_const_i32(offset);
+ default:
+ return NULL_QREG;
+ }
+ }
+ /* Should never happen. */
+ return NULL_QREG;
+}
+
+typedef struct {
+ TCGCond tcond;
+ bool g1;
+ bool g2;
+ TCGv v1;
+ TCGv v2;
+} DisasCompare;
+
+static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
+{
+ TCGv tmp, tmp2;
+ TCGCond tcond;
+ CCOp op = s->cc_op;
+
+ /* The CC_OP_CMP form can handle most normal comparisons directly. */
+ if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
+ c->g1 = c->g2 = 1;
+ c->v1 = QREG_CC_N;
+ c->v2 = QREG_CC_V;
+ switch (cond) {
+ case 2: /* HI */
+ case 3: /* LS */
+ tcond = TCG_COND_LEU;
+ goto done;
+ case 4: /* CC */
+ case 5: /* CS */
+ tcond = TCG_COND_LTU;
+ goto done;
+ case 6: /* NE */
+ case 7: /* EQ */
+ tcond = TCG_COND_EQ;
+ goto done;
+ case 10: /* PL */
+ case 11: /* MI */
+ c->g1 = c->g2 = 0;
+ c->v2 = tcg_const_i32(0);
+ c->v1 = tmp = tcg_temp_new();
+ tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
+ gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
+ /* fallthru */
+ case 12: /* GE */
+ case 13: /* LT */
+ tcond = TCG_COND_LT;
+ goto done;
+ case 14: /* GT */
+ case 15: /* LE */
+ tcond = TCG_COND_LE;
+ goto done;
+ }
+ }
+
+ c->g1 = 1;
+ c->g2 = 0;
+ c->v2 = tcg_const_i32(0);
+
+ switch (cond) {
+ case 0: /* T */
+ case 1: /* F */
+ c->v1 = c->v2;
+ tcond = TCG_COND_NEVER;
+ goto done;
+ case 14: /* GT (!(Z || (N ^ V))) */
+ case 15: /* LE (Z || (N ^ V)) */
+ /* Logic operations clear V, which simplifies LE to (Z || N),
+ and since Z and N are co-located, this becomes a normal
+ comparison vs N. */
+ if (op == CC_OP_LOGIC) {
+ c->v1 = QREG_CC_N;
+ tcond = TCG_COND_LE;
+ goto done;
+ }
+ break;
+ case 12: /* GE (!(N ^ V)) */
+ case 13: /* LT (N ^ V) */
+ /* Logic operations clear V, which simplifies this to N. */
+ if (op != CC_OP_LOGIC) {
+ break;
+ }
+ /* fallthru */
+ case 10: /* PL (!N) */
+ case 11: /* MI (N) */
+ /* Several cases represent N normally. */
+ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
+ op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
+ op == CC_OP_LOGIC) {
+ c->v1 = QREG_CC_N;
+ tcond = TCG_COND_LT;
+ goto done;
+ }
+ break;
+ case 6: /* NE (!Z) */
+ case 7: /* EQ (Z) */
+ /* Some cases fold Z into N. */
+ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
+ op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
+ op == CC_OP_LOGIC) {
+ tcond = TCG_COND_EQ;
+ c->v1 = QREG_CC_N;
+ goto done;
+ }
+ break;
+ case 4: /* CC (!C) */
+ case 5: /* CS (C) */
+ /* Some cases fold C into X. */
+ if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
+ op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL) {
+ tcond = TCG_COND_NE;
+ c->v1 = QREG_CC_X;
+ goto done;
+ }
+ /* fallthru */
+ case 8: /* VC (!V) */
+ case 9: /* VS (V) */
+ /* Logic operations clear V and C. */
+ if (op == CC_OP_LOGIC) {
+ tcond = TCG_COND_NEVER;
+ c->v1 = c->v2;
+ goto done;
+ }
+ break;
+ }
+
+ /* Otherwise, flush flag state to CC_OP_FLAGS. */
+ gen_flush_flags(s);
+
+ switch (cond) {
+ case 0: /* T */
+ case 1: /* F */
+ default:
+ /* Invalid, or handled above. */
+ abort();
+ case 2: /* HI (!C && !Z) -> !(C || Z)*/
+ case 3: /* LS (C || Z) */
+ c->v1 = tmp = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
+ tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
+ tcond = TCG_COND_NE;
+ break;
+ case 4: /* CC (!C) */
+ case 5: /* CS (C) */
+ c->v1 = QREG_CC_C;
+ tcond = TCG_COND_NE;
+ break;
+ case 6: /* NE (!Z) */
+ case 7: /* EQ (Z) */
+ c->v1 = QREG_CC_Z;
+ tcond = TCG_COND_EQ;
+ break;
+ case 8: /* VC (!V) */
+ case 9: /* VS (V) */
+ c->v1 = QREG_CC_V;
+ tcond = TCG_COND_LT;
+ break;
+ case 10: /* PL (!N) */
+ case 11: /* MI (N) */
+ c->v1 = QREG_CC_N;
+ tcond = TCG_COND_LT;
+ break;
+ case 12: /* GE (!(N ^ V)) */
+ case 13: /* LT (N ^ V) */
+ c->v1 = tmp = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
+ tcond = TCG_COND_LT;
+ break;
+ case 14: /* GT (!(Z || (N ^ V))) */
+ case 15: /* LE (Z || (N ^ V)) */
+ c->v1 = tmp = tcg_temp_new();
+ c->g1 = 0;
+ tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
+ tcg_gen_neg_i32(tmp, tmp);
+ tmp2 = tcg_temp_new();
+ tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ tcg_temp_free(tmp2);
+ tcond = TCG_COND_LT;
+ break;
+ }
+
+ done:
+ if ((cond & 1) == 0) {
+ tcond = tcg_invert_cond(tcond);
+ }
+ c->tcond = tcond;
+}
+
+static void free_cond(DisasCompare *c)
+{
+ if (!c->g1) {
+ tcg_temp_free(c->v1);
+ }
+ if (!c->g2) {
+ tcg_temp_free(c->v2);
+ }
+}
+
+static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
+{
+ DisasCompare c;
+
+ gen_cc_cond(&c, s, cond);
+ update_cc_op(s);
+ tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
+ free_cond(&c);
+}
+
+/* Force a TB lookup after an instruction that changes the CPU state. */
+static void gen_lookup_tb(DisasContext *s)
+{
+ update_cc_op(s);
+ tcg_gen_movi_i32(QREG_PC, s->pc);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+/* Generate a jump to an immediate address. */
+static void gen_jmp_im(DisasContext *s, uint32_t dest)
+{
+ update_cc_op(s);
+ tcg_gen_movi_i32(QREG_PC, dest);
+ s->is_jmp = DISAS_JUMP;
+}
+
+/* Generate a jump to the address in qreg DEST. */
+static void gen_jmp(DisasContext *s, TCGv dest)
+{
+ update_cc_op(s);
+ tcg_gen_mov_i32(QREG_PC, dest);
+ s->is_jmp = DISAS_JUMP;
+}
+
+static void gen_exception(DisasContext *s, uint32_t where, int nr)
+{
+ update_cc_op(s);
+ gen_jmp_im(s, where);
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(nr));
+}
+
+static inline void gen_addr_fault(DisasContext *s)
+{
+ gen_exception(s, s->insn_pc, EXCP_ADDRESS);
+}
+
+#define SRC_EA(env, result, opsize, op_sign, addrp) do { \
+ result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
+ op_sign ? EA_LOADS : EA_LOADU); \
+ if (IS_NULL_QREG(result)) { \
+ gen_addr_fault(s); \
+ return; \
+ } \
+ } while (0)
+
+#define DEST_EA(env, insn, opsize, val, addrp) do { \
+ TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, EA_STORE); \
+ if (IS_NULL_QREG(ea_result)) { \
+ gen_addr_fault(s); \
+ return; \
+ } \
+ } while (0)
+
+static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
+ (s->insn_pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+/* Generate a jump to an immediate address. */
+static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
+{
+ if (unlikely(s->singlestep_enabled)) {
+ gen_exception(s, dest, EXCP_DEBUG);
+ } else if (use_goto_tb(s, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(QREG_PC, dest);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
+ } else {
+ gen_jmp_im(s, dest);
+ tcg_gen_exit_tb(0);
+ }
+ s->is_jmp = DISAS_TB_JUMP;
+}
+
+DISAS_INSN(scc)
+{
+ DisasCompare c;
+ int cond;
+ TCGv tmp;
+
+ cond = (insn >> 8) & 0xf;
+ gen_cc_cond(&c, s, cond);
+
+ tmp = tcg_temp_new();
+ tcg_gen_setcond_i32(c.tcond, tmp, c.v1, c.v2);
+ free_cond(&c);
+
+ tcg_gen_neg_i32(tmp, tmp);
+ DEST_EA(env, insn, OS_BYTE, tmp, NULL);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(dbcc)
+{
+ TCGLabel *l1;
+ TCGv reg;
+ TCGv tmp;
+ int16_t offset;
+ uint32_t base;
+
+ reg = DREG(insn, 0);
+ base = s->pc;
+ offset = (int16_t)read_im16(env, s);
+ l1 = gen_new_label();
+ gen_jmpcc(s, (insn >> 8) & 0xf, l1);
+
+ tmp = tcg_temp_new();
+ tcg_gen_ext16s_i32(tmp, reg);
+ tcg_gen_addi_i32(tmp, tmp, -1);
+ gen_partset_reg(OS_WORD, reg, tmp);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
+ gen_jmp_tb(s, 1, base + offset);
+ gen_set_label(l1);
+ gen_jmp_tb(s, 0, s->pc);
+}
+
+DISAS_INSN(undef_mac)
+{
+ gen_exception(s, s->pc - 2, EXCP_LINEA);
+}
+
+DISAS_INSN(undef_fpu)
+{
+ gen_exception(s, s->pc - 2, EXCP_LINEF);
+}
+
+DISAS_INSN(undef)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ gen_exception(s, s->pc - 2, EXCP_UNSUPPORTED);
+ cpu_abort(CPU(cpu), "Illegal instruction: %04x @ %08x", insn, s->pc - 2);
+}
+
+DISAS_INSN(mulw)
+{
+ TCGv reg;
+ TCGv tmp;
+ TCGv src;
+ int sign;
+
+ sign = (insn & 0x100) != 0;
+ reg = DREG(insn, 9);
+ tmp = tcg_temp_new();
+ if (sign)
+ tcg_gen_ext16s_i32(tmp, reg);
+ else
+ tcg_gen_ext16u_i32(tmp, reg);
+ SRC_EA(env, src, OS_WORD, sign, NULL);
+ tcg_gen_mul_i32(tmp, tmp, src);
+ tcg_gen_mov_i32(reg, tmp);
+ gen_logic_cc(s, tmp, OS_LONG);
+}
+
+DISAS_INSN(divw)
+{
+ TCGv reg;
+ TCGv tmp;
+ TCGv src;
+ int sign;
+
+ sign = (insn & 0x100) != 0;
+ reg = DREG(insn, 9);
+ if (sign) {
+ tcg_gen_ext16s_i32(QREG_DIV1, reg);
+ } else {
+ tcg_gen_ext16u_i32(QREG_DIV1, reg);
+ }
+ SRC_EA(env, src, OS_WORD, sign, NULL);
+ tcg_gen_mov_i32(QREG_DIV2, src);
+ if (sign) {
+ gen_helper_divs(cpu_env, tcg_const_i32(1));
+ } else {
+ gen_helper_divu(cpu_env, tcg_const_i32(1));
+ }
+
+ tmp = tcg_temp_new();
+ src = tcg_temp_new();
+ tcg_gen_ext16u_i32(tmp, QREG_DIV1);
+ tcg_gen_shli_i32(src, QREG_DIV2, 16);
+ tcg_gen_or_i32(reg, tmp, src);
+
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(divl)
+{
+ TCGv num;
+ TCGv den;
+ TCGv reg;
+ uint16_t ext;
+
+ ext = read_im16(env, s);
+ if (ext & 0x87f8) {
+ gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
+ return;
+ }
+ num = DREG(ext, 12);
+ reg = DREG(ext, 0);
+ tcg_gen_mov_i32(QREG_DIV1, num);
+ SRC_EA(env, den, OS_LONG, 0, NULL);
+ tcg_gen_mov_i32(QREG_DIV2, den);
+ if (ext & 0x0800) {
+ gen_helper_divs(cpu_env, tcg_const_i32(0));
+ } else {
+ gen_helper_divu(cpu_env, tcg_const_i32(0));
+ }
+ if ((ext & 7) == ((ext >> 12) & 7)) {
+ /* div */
+ tcg_gen_mov_i32 (reg, QREG_DIV1);
+ } else {
+ /* rem */
+ tcg_gen_mov_i32 (reg, QREG_DIV2);
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(addsub)
+{
+ TCGv reg;
+ TCGv dest;
+ TCGv src;
+ TCGv tmp;
+ TCGv addr;
+ int add;
+ int opsize;
+
+ add = (insn & 0x4000) != 0;
+ opsize = insn_opsize(insn);
+ reg = gen_extend(DREG(insn, 9), opsize, 1);
+ dest = tcg_temp_new();
+ if (insn & 0x100) {
+ SRC_EA(env, tmp, opsize, 1, &addr);
+ src = reg;
+ } else {
+ tmp = reg;
+ SRC_EA(env, src, opsize, 1, NULL);
+ }
+ if (add) {
+ tcg_gen_add_i32(dest, tmp, src);
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
+ set_cc_op(s, CC_OP_ADDB + opsize);
+ } else {
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
+ tcg_gen_sub_i32(dest, tmp, src);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ }
+ gen_update_cc_add(dest, src, opsize);
+ if (insn & 0x100) {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ } else {
+ gen_partset_reg(opsize, DREG(insn, 9), dest);
+ }
+ tcg_temp_free(dest);
+}
+
+/* Reverse the order of the bits in REG. */
+DISAS_INSN(bitrev)
+{
+ TCGv reg;
+ reg = DREG(insn, 0);
+ gen_helper_bitrev(reg, reg);
+}
+
+DISAS_INSN(bitop_reg)
+{
+ int opsize;
+ int op;
+ TCGv src1;
+ TCGv src2;
+ TCGv tmp;
+ TCGv addr;
+ TCGv dest;
+
+ if ((insn & 0x38) != 0)
+ opsize = OS_BYTE;
+ else
+ opsize = OS_LONG;
+ op = (insn >> 6) & 3;
+ SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
+
+ gen_flush_flags(s);
+ src2 = tcg_temp_new();
+ if (opsize == OS_BYTE)
+ tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
+ else
+ tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
+
+ tmp = tcg_const_i32(1);
+ tcg_gen_shl_i32(tmp, tmp, src2);
+ tcg_temp_free(src2);
+
+ tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
+
+ dest = tcg_temp_new();
+ switch (op) {
+ case 1: /* bchg */
+ tcg_gen_xor_i32(dest, src1, tmp);
+ break;
+ case 2: /* bclr */
+ tcg_gen_andc_i32(dest, src1, tmp);
+ break;
+ case 3: /* bset */
+ tcg_gen_or_i32(dest, src1, tmp);
+ break;
+ default: /* btst */
+ break;
+ }
+ tcg_temp_free(tmp);
+ if (op) {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ }
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(sats)
+{
+ TCGv reg;
+ reg = DREG(insn, 0);
+ gen_flush_flags(s);
+ gen_helper_sats(reg, reg, QREG_CC_V);
+ gen_logic_cc(s, reg, OS_LONG);
+}
+
+static void gen_push(DisasContext *s, TCGv val)
+{
+ TCGv tmp;
+
+ tmp = tcg_temp_new();
+ tcg_gen_subi_i32(tmp, QREG_SP, 4);
+ gen_store(s, OS_LONG, tmp, val);
+ tcg_gen_mov_i32(QREG_SP, tmp);
+}
+
+DISAS_INSN(movem)
+{
+ TCGv addr;
+ int i;
+ uint16_t mask;
+ TCGv reg;
+ TCGv tmp;
+ int is_load;
+
+ mask = read_im16(env, s);
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ addr = tcg_temp_new();
+ tcg_gen_mov_i32(addr, tmp);
+ is_load = ((insn & 0x0400) != 0);
+ for (i = 0; i < 16; i++, mask >>= 1) {
+ if (mask & 1) {
+ if (i < 8)
+ reg = DREG(i, 0);
+ else
+ reg = AREG(i, 0);
+ if (is_load) {
+ tmp = gen_load(s, OS_LONG, addr, 0);
+ tcg_gen_mov_i32(reg, tmp);
+ } else {
+ gen_store(s, OS_LONG, addr, reg);
+ }
+ if (mask != 1)
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+}
+
+DISAS_INSN(bitop_im)
+{
+ int opsize;
+ int op;
+ TCGv src1;
+ uint32_t mask;
+ int bitnum;
+ TCGv tmp;
+ TCGv addr;
+
+ if ((insn & 0x38) != 0)
+ opsize = OS_BYTE;
+ else
+ opsize = OS_LONG;
+ op = (insn >> 6) & 3;
+
+ bitnum = read_im16(env, s);
+ if (bitnum & 0xff00) {
+ disas_undef(env, s, insn);
+ return;
+ }
+
+ SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
+
+ gen_flush_flags(s);
+ if (opsize == OS_BYTE)
+ bitnum &= 7;
+ else
+ bitnum &= 31;
+ mask = 1 << bitnum;
+
+ tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
+
+ if (op) {
+ tmp = tcg_temp_new();
+ switch (op) {
+ case 1: /* bchg */
+ tcg_gen_xori_i32(tmp, src1, mask);
+ break;
+ case 2: /* bclr */
+ tcg_gen_andi_i32(tmp, src1, ~mask);
+ break;
+ case 3: /* bset */
+ tcg_gen_ori_i32(tmp, src1, mask);
+ break;
+ default: /* btst */
+ break;
+ }
+ DEST_EA(env, insn, opsize, tmp, &addr);
+ tcg_temp_free(tmp);
+ }
+}
+
+DISAS_INSN(arith_im)
+{
+ int op;
+ TCGv im;
+ TCGv src1;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ op = (insn >> 9) & 7;
+ opsize = insn_opsize(insn);
+ switch (opsize) {
+ case OS_BYTE:
+ im = tcg_const_i32((int8_t)read_im8(env, s));
+ break;
+ case OS_WORD:
+ im = tcg_const_i32((int16_t)read_im16(env, s));
+ break;
+ case OS_LONG:
+ im = tcg_const_i32(read_im32(env, s));
+ break;
+ default:
+ abort();
+ }
+ SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
+ dest = tcg_temp_new();
+ switch (op) {
+ case 0: /* ori */
+ tcg_gen_or_i32(dest, src1, im);
+ gen_logic_cc(s, dest, opsize);
+ break;
+ case 1: /* andi */
+ tcg_gen_and_i32(dest, src1, im);
+ gen_logic_cc(s, dest, opsize);
+ break;
+ case 2: /* subi */
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
+ tcg_gen_sub_i32(dest, src1, im);
+ gen_update_cc_add(dest, im, opsize);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ break;
+ case 3: /* addi */
+ tcg_gen_add_i32(dest, src1, im);
+ gen_update_cc_add(dest, im, opsize);
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
+ set_cc_op(s, CC_OP_ADDB + opsize);
+ break;
+ case 5: /* eori */
+ tcg_gen_xor_i32(dest, src1, im);
+ gen_logic_cc(s, dest, opsize);
+ break;
+ case 6: /* cmpi */
+ gen_update_cc_cmp(s, src1, im, opsize);
+ break;
+ default:
+ abort();
+ }
+ tcg_temp_free(im);
+ if (op != 6) {
+ DEST_EA(env, insn, opsize, dest, &addr);
+ }
+ tcg_temp_free(dest);
+}
+
+DISAS_INSN(byterev)
+{
+ TCGv reg;
+
+ reg = DREG(insn, 0);
+ tcg_gen_bswap32_i32(reg, reg);
+}
+
+DISAS_INSN(move)
+{
+ TCGv src;
+ TCGv dest;
+ int op;
+ int opsize;
+
+ switch (insn >> 12) {
+ case 1: /* move.b */
+ opsize = OS_BYTE;
+ break;
+ case 2: /* move.l */
+ opsize = OS_LONG;
+ break;
+ case 3: /* move.w */
+ opsize = OS_WORD;
+ break;
+ default:
+ abort();
+ }
+ SRC_EA(env, src, opsize, 1, NULL);
+ op = (insn >> 6) & 7;
+ if (op == 1) {
+ /* movea */
+ /* The value will already have been sign extended. */
+ dest = AREG(insn, 9);
+ tcg_gen_mov_i32(dest, src);
+ } else {
+ /* normal move */
+ uint16_t dest_ea;
+ dest_ea = ((insn >> 9) & 7) | (op << 3);
+ DEST_EA(env, dest_ea, opsize, src, NULL);
+ /* This will be correct because loads sign extend. */
+ gen_logic_cc(s, src, opsize);
+ }
+}
+
+DISAS_INSN(negx)
+{
+ TCGv z;
+ TCGv src;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src, opsize, 1, &addr);
+
+ gen_flush_flags(s); /* compute old Z */
+
+ /* Perform substract with borrow.
+ * (X, N) = -(src + X);
+ */
+
+ z = tcg_const_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
+ tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
+ tcg_temp_free(z);
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+
+ tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
+
+ /* Compute signed-overflow for negation. The normal formula for
+ * subtraction is (res ^ src) & (src ^ dest), but with dest==0
+ * this simplies to res & src.
+ */
+
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
+
+ /* Copy the rest of the results into place. */
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ /* result is in QREG_CC_N */
+
+ DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
+}
+
+DISAS_INSN(lea)
+{
+ TCGv reg;
+ TCGv tmp;
+
+ reg = AREG(insn, 9);
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ tcg_gen_mov_i32(reg, tmp);
+}
+
+DISAS_INSN(clr)
+{
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ DEST_EA(env, insn, opsize, tcg_const_i32(0), NULL);
+ gen_logic_cc(s, tcg_const_i32(0), opsize);
+}
+
+static TCGv gen_get_ccr(DisasContext *s)
+{
+ TCGv dest;
+
+ gen_flush_flags(s);
+ update_cc_op(s);
+ dest = tcg_temp_new();
+ gen_helper_get_ccr(dest, cpu_env);
+ return dest;
+}
+
+DISAS_INSN(move_from_ccr)
+{
+ TCGv ccr;
+
+ ccr = gen_get_ccr(s);
+ DEST_EA(env, insn, OS_WORD, ccr, NULL);
+}
+
+DISAS_INSN(neg)
+{
+ TCGv src1;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src1, opsize, 1, &addr);
+ dest = tcg_temp_new();
+ tcg_gen_neg_i32(dest, src1);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ gen_update_cc_add(dest, src1, opsize);
+ tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ tcg_temp_free(dest);
+}
+
+static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
+{
+ if (ccr_only) {
+ tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
+ tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
+ tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
+ tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
+ tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
+ } else {
+ gen_helper_set_sr(cpu_env, tcg_const_i32(val));
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+static void gen_set_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
+ int ccr_only)
+{
+ if ((insn & 0x38) == 0) {
+ if (ccr_only) {
+ gen_helper_set_ccr(cpu_env, DREG(insn, 0));
+ } else {
+ gen_helper_set_sr(cpu_env, DREG(insn, 0));
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+ } else if ((insn & 0x3f) == 0x3c) {
+ uint16_t val;
+ val = read_im16(env, s);
+ gen_set_sr_im(s, val, ccr_only);
+ } else {
+ disas_undef(env, s, insn);
+ }
+}
+
+
+DISAS_INSN(move_to_ccr)
+{
+ gen_set_sr(env, s, insn, 1);
+}
+
+DISAS_INSN(not)
+{
+ TCGv src1;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src1, opsize, 1, &addr);
+ dest = tcg_temp_new();
+ tcg_gen_not_i32(dest, src1);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ gen_logic_cc(s, dest, opsize);
+}
+
+DISAS_INSN(swap)
+{
+ TCGv src1;
+ TCGv src2;
+ TCGv reg;
+
+ src1 = tcg_temp_new();
+ src2 = tcg_temp_new();
+ reg = DREG(insn, 0);
+ tcg_gen_shli_i32(src1, reg, 16);
+ tcg_gen_shri_i32(src2, reg, 16);
+ tcg_gen_or_i32(reg, src1, src2);
+ gen_logic_cc(s, reg, OS_LONG);
+}
+
+DISAS_INSN(bkpt)
+{
+ gen_exception(s, s->pc - 2, EXCP_DEBUG);
+}
+
+DISAS_INSN(pea)
+{
+ TCGv tmp;
+
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ gen_push(s, tmp);
+}
+
+DISAS_INSN(ext)
+{
+ int op;
+ TCGv reg;
+ TCGv tmp;
+
+ reg = DREG(insn, 0);
+ op = (insn >> 6) & 7;
+ tmp = tcg_temp_new();
+ if (op == 3)
+ tcg_gen_ext16s_i32(tmp, reg);
+ else
+ tcg_gen_ext8s_i32(tmp, reg);
+ if (op == 2)
+ gen_partset_reg(OS_WORD, reg, tmp);
+ else
+ tcg_gen_mov_i32(reg, tmp);
+ gen_logic_cc(s, tmp, OS_LONG);
+}
+
+DISAS_INSN(tst)
+{
+ int opsize;
+ TCGv tmp;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, tmp, opsize, 1, NULL);
+ gen_logic_cc(s, tmp, opsize);
+}
+
+DISAS_INSN(pulse)
+{
+ /* Implemented as a NOP. */
+}
+
+DISAS_INSN(illegal)
+{
+ gen_exception(s, s->pc - 2, EXCP_ILLEGAL);
+}
+
+/* ??? This should be atomic. */
+DISAS_INSN(tas)
+{
+ TCGv dest;
+ TCGv src1;
+ TCGv addr;
+
+ dest = tcg_temp_new();
+ SRC_EA(env, src1, OS_BYTE, 1, &addr);
+ gen_logic_cc(s, src1, OS_BYTE);
+ tcg_gen_ori_i32(dest, src1, 0x80);
+ DEST_EA(env, insn, OS_BYTE, dest, &addr);
+}
+
+DISAS_INSN(mull)
+{
+ uint16_t ext;
+ TCGv reg;
+ TCGv src1;
+ TCGv dest;
+
+ /* The upper 32 bits of the product are discarded, so
+ muls.l and mulu.l are functionally equivalent. */
+ ext = read_im16(env, s);
+ if (ext & 0x87ff) {
+ gen_exception(s, s->pc - 4, EXCP_UNSUPPORTED);
+ return;
+ }
+ reg = DREG(ext, 12);
+ SRC_EA(env, src1, OS_LONG, 0, NULL);
+ dest = tcg_temp_new();
+ tcg_gen_mul_i32(dest, src1, reg);
+ tcg_gen_mov_i32(reg, dest);
+ /* Unlike m68k, coldfire always clears the overflow bit. */
+ gen_logic_cc(s, dest, OS_LONG);
+}
+
+static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
+{
+ TCGv reg;
+ TCGv tmp;
+
+ reg = AREG(insn, 0);
+ tmp = tcg_temp_new();
+ tcg_gen_subi_i32(tmp, QREG_SP, 4);
+ gen_store(s, OS_LONG, tmp, reg);
+ if ((insn & 7) != 7) {
+ tcg_gen_mov_i32(reg, tmp);
+ }
+ tcg_gen_addi_i32(QREG_SP, tmp, offset);
+ tcg_temp_free(tmp);
+}
+
+DISAS_INSN(link)
+{
+ int16_t offset;
+
+ offset = read_im16(env, s);
+ gen_link(s, insn, offset);
+}
+
+DISAS_INSN(linkl)
+{
+ int32_t offset;
+
+ offset = read_im32(env, s);
+ gen_link(s, insn, offset);
+}
+
+DISAS_INSN(unlk)
+{
+ TCGv src;
+ TCGv reg;
+ TCGv tmp;
+
+ src = tcg_temp_new();
+ reg = AREG(insn, 0);
+ tcg_gen_mov_i32(src, reg);
+ tmp = gen_load(s, OS_LONG, src, 0);
+ tcg_gen_mov_i32(reg, tmp);
+ tcg_gen_addi_i32(QREG_SP, src, 4);
+}
+
+DISAS_INSN(nop)
+{
+}
+
+DISAS_INSN(rts)
+{
+ TCGv tmp;
+
+ tmp = gen_load(s, OS_LONG, QREG_SP, 0);
+ tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
+ gen_jmp(s, tmp);
+}
+
+DISAS_INSN(jump)
+{
+ TCGv tmp;
+
+ /* Load the target address first to ensure correct exception
+ behavior. */
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp)) {
+ gen_addr_fault(s);
+ return;
+ }
+ if ((insn & 0x40) == 0) {
+ /* jsr */
+ gen_push(s, tcg_const_i32(s->pc));
+ }
+ gen_jmp(s, tmp);
+}
+
+DISAS_INSN(addsubq)
+{
+ TCGv src;
+ TCGv dest;
+ TCGv val;
+ int imm;
+ TCGv addr;
+ int opsize;
+
+ if ((insn & 070) == 010) {
+ /* Operation on address register is always long. */
+ opsize = OS_LONG;
+ } else {
+ opsize = insn_opsize(insn);
+ }
+ SRC_EA(env, src, opsize, 1, &addr);
+ imm = (insn >> 9) & 7;
+ if (imm == 0) {
+ imm = 8;
+ }
+ val = tcg_const_i32(imm);
+ dest = tcg_temp_new();
+ tcg_gen_mov_i32(dest, src);
+ if ((insn & 0x38) == 0x08) {
+ /* Don't update condition codes if the destination is an
+ address register. */
+ if (insn & 0x0100) {
+ tcg_gen_sub_i32(dest, dest, val);
+ } else {
+ tcg_gen_add_i32(dest, dest, val);
+ }
+ } else {
+ if (insn & 0x0100) {
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
+ tcg_gen_sub_i32(dest, dest, val);
+ set_cc_op(s, CC_OP_SUBB + opsize);
+ } else {
+ tcg_gen_add_i32(dest, dest, val);
+ tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
+ set_cc_op(s, CC_OP_ADDB + opsize);
+ }
+ gen_update_cc_add(dest, val, opsize);
+ }
+ DEST_EA(env, insn, opsize, dest, &addr);
+}
+
+DISAS_INSN(tpf)
+{
+ switch (insn & 7) {
+ case 2: /* One extension word. */
+ s->pc += 2;
+ break;
+ case 3: /* Two extension words. */
+ s->pc += 4;
+ break;
+ case 4: /* No extension words. */
+ break;
+ default:
+ disas_undef(env, s, insn);
+ }
+}
+
+DISAS_INSN(branch)
+{
+ int32_t offset;
+ uint32_t base;
+ int op;
+ TCGLabel *l1;
+
+ base = s->pc;
+ op = (insn >> 8) & 0xf;
+ offset = (int8_t)insn;
+ if (offset == 0) {
+ offset = (int16_t)read_im16(env, s);
+ } else if (offset == -1) {
+ offset = read_im32(env, s);
+ }
+ if (op == 1) {
+ /* bsr */
+ gen_push(s, tcg_const_i32(s->pc));
+ }
+ if (op > 1) {
+ /* Bcc */
+ l1 = gen_new_label();
+ gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
+ gen_jmp_tb(s, 1, base + offset);
+ gen_set_label(l1);
+ gen_jmp_tb(s, 0, s->pc);
+ } else {
+ /* Unconditional branch. */
+ gen_jmp_tb(s, 0, base + offset);
+ }
+}
+
+DISAS_INSN(moveq)
+{
+ uint32_t val;
+
+ val = (int8_t)insn;
+ tcg_gen_movi_i32(DREG(insn, 9), val);
+ gen_logic_cc(s, tcg_const_i32(val), OS_LONG);
+}
+
+DISAS_INSN(mvzs)
+{
+ int opsize;
+ TCGv src;
+ TCGv reg;
+
+ if (insn & 0x40)
+ opsize = OS_WORD;
+ else
+ opsize = OS_BYTE;
+ SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
+ reg = DREG(insn, 9);
+ tcg_gen_mov_i32(reg, src);
+ gen_logic_cc(s, src, opsize);
+}
+
+DISAS_INSN(or)
+{
+ TCGv reg;
+ TCGv dest;
+ TCGv src;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ reg = gen_extend(DREG(insn, 9), opsize, 0);
+ dest = tcg_temp_new();
+ if (insn & 0x100) {
+ SRC_EA(env, src, opsize, 0, &addr);
+ tcg_gen_or_i32(dest, src, reg);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ } else {
+ SRC_EA(env, src, opsize, 0, NULL);
+ tcg_gen_or_i32(dest, src, reg);
+ gen_partset_reg(opsize, DREG(insn, 9), dest);
+ }
+ gen_logic_cc(s, dest, opsize);
+}
+
+DISAS_INSN(suba)
+{
+ TCGv src;
+ TCGv reg;
+
+ SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
+ reg = AREG(insn, 9);
+ tcg_gen_sub_i32(reg, reg, src);
+}
+
+static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
+{
+ TCGv tmp;
+
+ gen_flush_flags(s); /* compute old Z */
+
+ /* Perform substract with borrow.
+ * (X, N) = dest - (src + X);
+ */
+
+ tmp = tcg_const_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, tmp, QREG_CC_X, tmp);
+ tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, tmp, QREG_CC_N, QREG_CC_X);
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+ tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
+
+ /* Compute signed-overflow for substract. */
+
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
+ tcg_gen_xor_i32(tmp, dest, src);
+ tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
+ tcg_temp_free(tmp);
+
+ /* Copy the rest of the results into place. */
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ /* result is in QREG_CC_N */
+}
+
+DISAS_INSN(subx_reg)
+{
+ TCGv dest;
+ TCGv src;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ src = gen_extend(DREG(insn, 0), opsize, 1);
+ dest = gen_extend(DREG(insn, 9), opsize, 1);
+
+ gen_subx(s, src, dest, opsize);
+
+ gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
+}
+
+DISAS_INSN(subx_mem)
+{
+ TCGv src;
+ TCGv addr_src;
+ TCGv dest;
+ TCGv addr_dest;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ addr_src = AREG(insn, 0);
+ tcg_gen_subi_i32(addr_src, addr_src, opsize);
+ src = gen_load(s, opsize, addr_src, 1);
+
+ addr_dest = AREG(insn, 9);
+ tcg_gen_subi_i32(addr_dest, addr_dest, opsize);
+ dest = gen_load(s, opsize, addr_dest, 1);
+
+ gen_subx(s, src, dest, opsize);
+
+ gen_store(s, opsize, addr_dest, QREG_CC_N);
+}
+
+DISAS_INSN(mov3q)
+{
+ TCGv src;
+ int val;
+
+ val = (insn >> 9) & 7;
+ if (val == 0)
+ val = -1;
+ src = tcg_const_i32(val);
+ gen_logic_cc(s, src, OS_LONG);
+ DEST_EA(env, insn, OS_LONG, src, NULL);
+}
+
+DISAS_INSN(cmp)
+{
+ TCGv src;
+ TCGv reg;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+ SRC_EA(env, src, opsize, 1, NULL);
+ reg = gen_extend(DREG(insn, 9), opsize, 1);
+ gen_update_cc_cmp(s, reg, src, opsize);
+}
+
+DISAS_INSN(cmpa)
+{
+ int opsize;
+ TCGv src;
+ TCGv reg;
+
+ if (insn & 0x100) {
+ opsize = OS_LONG;
+ } else {
+ opsize = OS_WORD;
+ }
+ SRC_EA(env, src, opsize, 1, NULL);
+ reg = AREG(insn, 9);
+ gen_update_cc_cmp(s, reg, src, OS_LONG);
+}
+
+DISAS_INSN(eor)
+{
+ TCGv src;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ SRC_EA(env, src, opsize, 0, &addr);
+ dest = tcg_temp_new();
+ tcg_gen_xor_i32(dest, src, DREG(insn, 9));
+ gen_logic_cc(s, dest, opsize);
+ DEST_EA(env, insn, opsize, dest, &addr);
+}
+
+static void do_exg(TCGv reg1, TCGv reg2)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_mov_i32(temp, reg1);
+ tcg_gen_mov_i32(reg1, reg2);
+ tcg_gen_mov_i32(reg2, temp);
+ tcg_temp_free(temp);
+}
+
+DISAS_INSN(exg_dd)
+{
+ /* exchange Dx and Dy */
+ do_exg(DREG(insn, 9), DREG(insn, 0));
+}
+
+DISAS_INSN(exg_aa)
+{
+ /* exchange Ax and Ay */
+ do_exg(AREG(insn, 9), AREG(insn, 0));
+}
+
+DISAS_INSN(exg_da)
+{
+ /* exchange Dx and Ay */
+ do_exg(DREG(insn, 9), AREG(insn, 0));
+}
+
+DISAS_INSN(and)
+{
+ TCGv src;
+ TCGv reg;
+ TCGv dest;
+ TCGv addr;
+ int opsize;
+
+ dest = tcg_temp_new();
+
+ opsize = insn_opsize(insn);
+ reg = DREG(insn, 9);
+ if (insn & 0x100) {
+ SRC_EA(env, src, opsize, 0, &addr);
+ tcg_gen_and_i32(dest, src, reg);
+ DEST_EA(env, insn, opsize, dest, &addr);
+ } else {
+ SRC_EA(env, src, opsize, 0, NULL);
+ tcg_gen_and_i32(dest, src, reg);
+ gen_partset_reg(opsize, reg, dest);
+ }
+ tcg_temp_free(dest);
+ gen_logic_cc(s, dest, opsize);
+}
+
+DISAS_INSN(adda)
+{
+ TCGv src;
+ TCGv reg;
+
+ SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
+ reg = AREG(insn, 9);
+ tcg_gen_add_i32(reg, reg, src);
+}
+
+static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
+{
+ TCGv tmp;
+
+ gen_flush_flags(s); /* compute old Z */
+
+ /* Perform addition with carry.
+ * (X, N) = src + dest + X;
+ */
+
+ tmp = tcg_const_i32(0);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, tmp, dest, tmp);
+ tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, tmp);
+ gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
+
+ /* Compute signed-overflow for addition. */
+
+ tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
+ tcg_gen_xor_i32(tmp, dest, src);
+ tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
+ tcg_temp_free(tmp);
+
+ /* Copy the rest of the results into place. */
+ tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
+ tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ /* result is in QREG_CC_N */
+}
+
+DISAS_INSN(addx_reg)
+{
+ TCGv dest;
+ TCGv src;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ dest = gen_extend(DREG(insn, 9), opsize, 1);
+ src = gen_extend(DREG(insn, 0), opsize, 1);
+
+ gen_addx(s, src, dest, opsize);
+
+ gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
+}
+
+DISAS_INSN(addx_mem)
+{
+ TCGv src;
+ TCGv addr_src;
+ TCGv dest;
+ TCGv addr_dest;
+ int opsize;
+
+ opsize = insn_opsize(insn);
+
+ addr_src = AREG(insn, 0);
+ tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
+ src = gen_load(s, opsize, addr_src, 1);
+
+ addr_dest = AREG(insn, 9);
+ tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
+ dest = gen_load(s, opsize, addr_dest, 1);
+
+ gen_addx(s, src, dest, opsize);
+
+ gen_store(s, opsize, addr_dest, QREG_CC_N);
+}
+
+/* TODO: This could be implemented without helper functions. */
+DISAS_INSN(shift_im)
+{
+ TCGv reg;
+ int tmp;
+ TCGv shift;
+
+ set_cc_op(s, CC_OP_FLAGS);
+
+ reg = DREG(insn, 0);
+ tmp = (insn >> 9) & 7;
+ if (tmp == 0)
+ tmp = 8;
+ shift = tcg_const_i32(tmp);
+ /* No need to flush flags becuse we know we will set C flag. */
+ if (insn & 0x100) {
+ gen_helper_shl_cc(reg, cpu_env, reg, shift);
+ } else {
+ if (insn & 8) {
+ gen_helper_shr_cc(reg, cpu_env, reg, shift);
+ } else {
+ gen_helper_sar_cc(reg, cpu_env, reg, shift);
+ }
+ }
+}
+
+DISAS_INSN(shift_reg)
+{
+ TCGv reg;
+ TCGv shift;
+
+ reg = DREG(insn, 0);
+ shift = DREG(insn, 9);
+ if (insn & 0x100) {
+ gen_helper_shl_cc(reg, cpu_env, reg, shift);
+ } else {
+ if (insn & 8) {
+ gen_helper_shr_cc(reg, cpu_env, reg, shift);
+ } else {
+ gen_helper_sar_cc(reg, cpu_env, reg, shift);
+ }
+ }
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(ff1)
+{
+ TCGv reg;
+ reg = DREG(insn, 0);
+ gen_logic_cc(s, reg, OS_LONG);
+ gen_helper_ff1(reg, reg);
+}
+
+static TCGv gen_get_sr(DisasContext *s)
+{
+ TCGv ccr;
+ TCGv sr;
+
+ ccr = gen_get_ccr(s);
+ sr = tcg_temp_new();
+ tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
+ tcg_gen_or_i32(sr, sr, ccr);
+ return sr;
+}
+
+DISAS_INSN(strldsr)
+{
+ uint16_t ext;
+ uint32_t addr;
+
+ addr = s->pc - 2;
+ ext = read_im16(env, s);
+ if (ext != 0x46FC) {
+ gen_exception(s, addr, EXCP_UNSUPPORTED);
+ return;
+ }
+ ext = read_im16(env, s);
+ if (IS_USER(s) || (ext & SR_S) == 0) {
+ gen_exception(s, addr, EXCP_PRIVILEGE);
+ return;
+ }
+ gen_push(s, gen_get_sr(s));
+ gen_set_sr_im(s, ext, 0);
+}
+
+DISAS_INSN(move_from_sr)
+{
+ TCGv sr;
+
+ if (IS_USER(s) && !m68k_feature(env, M68K_FEATURE_M68000)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ sr = gen_get_sr(s);
+ DEST_EA(env, insn, OS_WORD, sr, NULL);
+}
+
+DISAS_INSN(move_to_sr)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ gen_set_sr(env, s, insn, 0);
+ gen_lookup_tb(s);
+}
+
+DISAS_INSN(move_from_usp)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ tcg_gen_ld_i32(AREG(insn, 0), cpu_env,
+ offsetof(CPUM68KState, sp[M68K_USP]));
+}
+
+DISAS_INSN(move_to_usp)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ tcg_gen_st_i32(AREG(insn, 0), cpu_env,
+ offsetof(CPUM68KState, sp[M68K_USP]));
+}
+
+DISAS_INSN(halt)
+{
+ gen_exception(s, s->pc, EXCP_HALT_INSN);
+}
+
+DISAS_INSN(stop)
+{
+ uint16_t ext;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ gen_set_sr_im(s, ext, 0);
+ tcg_gen_movi_i32(cpu_halted, 1);
+ gen_exception(s, s->pc, EXCP_HLT);
+}
+
+DISAS_INSN(rte)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ gen_exception(s, s->pc - 2, EXCP_RTE);
+}
+
+DISAS_INSN(movec)
+{
+ uint16_t ext;
+ TCGv reg;
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+
+ ext = read_im16(env, s);
+
+ if (ext & 0x8000) {
+ reg = AREG(ext, 12);
+ } else {
+ reg = DREG(ext, 12);
+ }
+ gen_helper_movec(cpu_env, tcg_const_i32(ext & 0xfff), reg);
+ gen_lookup_tb(s);
+}
+
+DISAS_INSN(intouch)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ /* ICache fetch. Implement as no-op. */
+}
+
+DISAS_INSN(cpushl)
+{
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ /* Cache push/invalidate. Implement as no-op. */
+}
+
+DISAS_INSN(wddata)
+{
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+}
+
+DISAS_INSN(wdebug)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ if (IS_USER(s)) {
+ gen_exception(s, s->pc - 2, EXCP_PRIVILEGE);
+ return;
+ }
+ /* TODO: Implement wdebug. */
+ cpu_abort(CPU(cpu), "WDEBUG not implemented");
+}
+
+DISAS_INSN(trap)
+{
+ gen_exception(s, s->pc - 2, EXCP_TRAP0 + (insn & 0xf));
+}
+
+/* ??? FP exceptions are not implemented. Most exceptions are deferred until
+ immediately before the next FP instruction is executed. */
+DISAS_INSN(fpu)
+{
+ uint16_t ext;
+ int32_t offset;
+ int opmode;
+ TCGv_i64 src;
+ TCGv_i64 dest;
+ TCGv_i64 res;
+ TCGv tmp32;
+ int round;
+ int set_dest;
+ int opsize;
+
+ ext = read_im16(env, s);
+ opmode = ext & 0x7f;
+ switch ((ext >> 13) & 7) {
+ case 0: case 2:
+ break;
+ case 1:
+ goto undef;
+ case 3: /* fmove out */
+ src = FREG(ext, 7);
+ tmp32 = tcg_temp_new_i32();
+ /* fmove */
+ /* ??? TODO: Proper behavior on overflow. */
+ switch ((ext >> 10) & 7) {
+ case 0:
+ opsize = OS_LONG;
+ gen_helper_f64_to_i32(tmp32, cpu_env, src);
+ break;
+ case 1:
+ opsize = OS_SINGLE;
+ gen_helper_f64_to_f32(tmp32, cpu_env, src);
+ break;
+ case 4:
+ opsize = OS_WORD;
+ gen_helper_f64_to_i32(tmp32, cpu_env, src);
+ break;
+ case 5: /* OS_DOUBLE */
+ tcg_gen_mov_i32(tmp32, AREG(insn, 0));
+ switch ((insn >> 3) & 7) {
+ case 2:
+ case 3:
+ break;
+ case 4:
+ tcg_gen_addi_i32(tmp32, tmp32, -8);
+ break;
+ case 5:
+ offset = cpu_ldsw_code(env, s->pc);
+ s->pc += 2;
+ tcg_gen_addi_i32(tmp32, tmp32, offset);
+ break;
+ default:
+ goto undef;
+ }
+ gen_store64(s, tmp32, src);
+ switch ((insn >> 3) & 7) {
+ case 3:
+ tcg_gen_addi_i32(tmp32, tmp32, 8);
+ tcg_gen_mov_i32(AREG(insn, 0), tmp32);
+ break;
+ case 4:
+ tcg_gen_mov_i32(AREG(insn, 0), tmp32);
+ break;
+ }
+ tcg_temp_free_i32(tmp32);
+ return;
+ case 6:
+ opsize = OS_BYTE;
+ gen_helper_f64_to_i32(tmp32, cpu_env, src);
+ break;
+ default:
+ goto undef;
+ }
+ DEST_EA(env, insn, opsize, tmp32, NULL);
+ tcg_temp_free_i32(tmp32);
+ return;
+ case 4: /* fmove to control register. */
+ switch ((ext >> 10) & 7) {
+ case 4: /* FPCR */
+ /* Not implemented. Ignore writes. */
+ break;
+ case 1: /* FPIAR */
+ case 2: /* FPSR */
+ default:
+ cpu_abort(NULL, "Unimplemented: fmove to control %d",
+ (ext >> 10) & 7);
+ }
+ break;
+ case 5: /* fmove from control register. */
+ switch ((ext >> 10) & 7) {
+ case 4: /* FPCR */
+ /* Not implemented. Always return zero. */
+ tmp32 = tcg_const_i32(0);
+ break;
+ case 1: /* FPIAR */
+ case 2: /* FPSR */
+ default:
+ cpu_abort(NULL, "Unimplemented: fmove from control %d",
+ (ext >> 10) & 7);
+ goto undef;
+ }
+ DEST_EA(env, insn, OS_LONG, tmp32, NULL);
+ break;
+ case 6: /* fmovem */
+ case 7:
+ {
+ TCGv addr;
+ uint16_t mask;
+ int i;
+ if ((ext & 0x1f00) != 0x1000 || (ext & 0xff) == 0)
+ goto undef;
+ tmp32 = gen_lea(env, s, insn, OS_LONG);
+ if (IS_NULL_QREG(tmp32)) {
+ gen_addr_fault(s);
+ return;
+ }
+ addr = tcg_temp_new_i32();
+ tcg_gen_mov_i32(addr, tmp32);
+ mask = 0x80;
+ for (i = 0; i < 8; i++) {
+ if (ext & mask) {
+ dest = FREG(i, 0);
+ if (ext & (1 << 13)) {
+ /* store */
+ tcg_gen_qemu_stf64(dest, addr, IS_USER(s));
+ } else {
+ /* load */
+ tcg_gen_qemu_ldf64(dest, addr, IS_USER(s));
+ }
+ if (ext & (mask - 1))
+ tcg_gen_addi_i32(addr, addr, 8);
+ }
+ mask >>= 1;
+ }
+ tcg_temp_free_i32(addr);
+ }
+ return;
+ }
+ if (ext & (1 << 14)) {
+ /* Source effective address. */
+ switch ((ext >> 10) & 7) {
+ case 0: opsize = OS_LONG; break;
+ case 1: opsize = OS_SINGLE; break;
+ case 4: opsize = OS_WORD; break;
+ case 5: opsize = OS_DOUBLE; break;
+ case 6: opsize = OS_BYTE; break;
+ default:
+ goto undef;
+ }
+ if (opsize == OS_DOUBLE) {
+ tmp32 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp32, AREG(insn, 0));
+ switch ((insn >> 3) & 7) {
+ case 2:
+ case 3:
+ break;
+ case 4:
+ tcg_gen_addi_i32(tmp32, tmp32, -8);
+ break;
+ case 5:
+ offset = cpu_ldsw_code(env, s->pc);
+ s->pc += 2;
+ tcg_gen_addi_i32(tmp32, tmp32, offset);
+ break;
+ case 7:
+ offset = cpu_ldsw_code(env, s->pc);
+ offset += s->pc - 2;
+ s->pc += 2;
+ tcg_gen_addi_i32(tmp32, tmp32, offset);
+ break;
+ default:
+ goto undef;
+ }
+ src = gen_load64(s, tmp32);
+ switch ((insn >> 3) & 7) {
+ case 3:
+ tcg_gen_addi_i32(tmp32, tmp32, 8);
+ tcg_gen_mov_i32(AREG(insn, 0), tmp32);
+ break;
+ case 4:
+ tcg_gen_mov_i32(AREG(insn, 0), tmp32);
+ break;
+ }
+ tcg_temp_free_i32(tmp32);
+ } else {
+ SRC_EA(env, tmp32, opsize, 1, NULL);
+ src = tcg_temp_new_i64();
+ switch (opsize) {
+ case OS_LONG:
+ case OS_WORD:
+ case OS_BYTE:
+ gen_helper_i32_to_f64(src, cpu_env, tmp32);
+ break;
+ case OS_SINGLE:
+ gen_helper_f32_to_f64(src, cpu_env, tmp32);
+ break;
+ }
+ }
+ } else {
+ /* Source register. */
+ src = FREG(ext, 10);
+ }
+ dest = FREG(ext, 7);
+ res = tcg_temp_new_i64();
+ if (opmode != 0x3a)
+ tcg_gen_mov_f64(res, dest);
+ round = 1;
+ set_dest = 1;
+ switch (opmode) {
+ case 0: case 0x40: case 0x44: /* fmove */
+ tcg_gen_mov_f64(res, src);
+ break;
+ case 1: /* fint */
+ gen_helper_iround_f64(res, cpu_env, src);
+ round = 0;
+ break;
+ case 3: /* fintrz */
+ gen_helper_itrunc_f64(res, cpu_env, src);
+ round = 0;
+ break;
+ case 4: case 0x41: case 0x45: /* fsqrt */
+ gen_helper_sqrt_f64(res, cpu_env, src);
+ break;
+ case 0x18: case 0x58: case 0x5c: /* fabs */
+ gen_helper_abs_f64(res, src);
+ break;
+ case 0x1a: case 0x5a: case 0x5e: /* fneg */
+ gen_helper_chs_f64(res, src);
+ break;
+ case 0x20: case 0x60: case 0x64: /* fdiv */
+ gen_helper_div_f64(res, cpu_env, res, src);
+ break;
+ case 0x22: case 0x62: case 0x66: /* fadd */
+ gen_helper_add_f64(res, cpu_env, res, src);
+ break;
+ case 0x23: case 0x63: case 0x67: /* fmul */
+ gen_helper_mul_f64(res, cpu_env, res, src);
+ break;
+ case 0x28: case 0x68: case 0x6c: /* fsub */
+ gen_helper_sub_f64(res, cpu_env, res, src);
+ break;
+ case 0x38: /* fcmp */
+ gen_helper_sub_cmp_f64(res, cpu_env, res, src);
+ set_dest = 0;
+ round = 0;
+ break;
+ case 0x3a: /* ftst */
+ tcg_gen_mov_f64(res, src);
+ set_dest = 0;
+ round = 0;
+ break;
+ default:
+ goto undef;
+ }
+ if (ext & (1 << 14)) {
+ tcg_temp_free_i64(src);
+ }
+ if (round) {
+ if (opmode & 0x40) {
+ if ((opmode & 0x4) != 0)
+ round = 0;
+ } else if ((s->fpcr & M68K_FPCR_PREC) == 0) {
+ round = 0;
+ }
+ }
+ if (round) {
+ TCGv tmp = tcg_temp_new_i32();
+ gen_helper_f64_to_f32(tmp, cpu_env, res);
+ gen_helper_f32_to_f64(res, cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ }
+ tcg_gen_mov_f64(QREG_FP_RESULT, res);
+ if (set_dest) {
+ tcg_gen_mov_f64(dest, res);
+ }
+ tcg_temp_free_i64(res);
+ return;
+undef:
+ /* FIXME: Is this right for offset addressing modes? */
+ s->pc -= 2;
+ disas_undef_fpu(env, s, insn);
+}
+
+DISAS_INSN(fbcc)
+{
+ uint32_t offset;
+ uint32_t addr;
+ TCGv flag;
+ TCGLabel *l1;
+
+ addr = s->pc;
+ offset = cpu_ldsw_code(env, s->pc);
+ s->pc += 2;
+ if (insn & (1 << 6)) {
+ offset = (offset << 16) | read_im16(env, s);
+ }
+
+ l1 = gen_new_label();
+ /* TODO: Raise BSUN exception. */
+ flag = tcg_temp_new();
+ gen_helper_compare_f64(flag, cpu_env, QREG_FP_RESULT);
+ /* Jump to l1 if condition is true. */
+ switch (insn & 0xf) {
+ case 0: /* f */
+ break;
+ case 1: /* eq (=0) */
+ tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
+ break;
+ case 2: /* ogt (=1) */
+ tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(1), l1);
+ break;
+ case 3: /* oge (=0 or =1) */
+ tcg_gen_brcond_i32(TCG_COND_LEU, flag, tcg_const_i32(1), l1);
+ break;
+ case 4: /* olt (=-1) */
+ tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(0), l1);
+ break;
+ case 5: /* ole (=-1 or =0) */
+ tcg_gen_brcond_i32(TCG_COND_LE, flag, tcg_const_i32(0), l1);
+ break;
+ case 6: /* ogl (=-1 or =1) */
+ tcg_gen_andi_i32(flag, flag, 1);
+ tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
+ break;
+ case 7: /* or (=2) */
+ tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(2), l1);
+ break;
+ case 8: /* un (<2) */
+ tcg_gen_brcond_i32(TCG_COND_LT, flag, tcg_const_i32(2), l1);
+ break;
+ case 9: /* ueq (=0 or =2) */
+ tcg_gen_andi_i32(flag, flag, 1);
+ tcg_gen_brcond_i32(TCG_COND_EQ, flag, tcg_const_i32(0), l1);
+ break;
+ case 10: /* ugt (>0) */
+ tcg_gen_brcond_i32(TCG_COND_GT, flag, tcg_const_i32(0), l1);
+ break;
+ case 11: /* uge (>=0) */
+ tcg_gen_brcond_i32(TCG_COND_GE, flag, tcg_const_i32(0), l1);
+ break;
+ case 12: /* ult (=-1 or =2) */
+ tcg_gen_brcond_i32(TCG_COND_GEU, flag, tcg_const_i32(2), l1);
+ break;
+ case 13: /* ule (!=1) */
+ tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(1), l1);
+ break;
+ case 14: /* ne (!=0) */
+ tcg_gen_brcond_i32(TCG_COND_NE, flag, tcg_const_i32(0), l1);
+ break;
+ case 15: /* t */
+ tcg_gen_br(l1);
+ break;
+ }
+ gen_jmp_tb(s, 0, s->pc);
+ gen_set_label(l1);
+ gen_jmp_tb(s, 1, addr + offset);
+}
+
+DISAS_INSN(frestore)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ /* TODO: Implement frestore. */
+ cpu_abort(CPU(cpu), "FRESTORE not implemented");
+}
+
+DISAS_INSN(fsave)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+
+ /* TODO: Implement fsave. */
+ cpu_abort(CPU(cpu), "FSAVE not implemented");
+}
+
+static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
+{
+ TCGv tmp = tcg_temp_new();
+ if (s->env->macsr & MACSR_FI) {
+ if (upper)
+ tcg_gen_andi_i32(tmp, val, 0xffff0000);
+ else
+ tcg_gen_shli_i32(tmp, val, 16);
+ } else if (s->env->macsr & MACSR_SU) {
+ if (upper)
+ tcg_gen_sari_i32(tmp, val, 16);
+ else
+ tcg_gen_ext16s_i32(tmp, val);
+ } else {
+ if (upper)
+ tcg_gen_shri_i32(tmp, val, 16);
+ else
+ tcg_gen_ext16u_i32(tmp, val);
+ }
+ return tmp;
+}
+
+static void gen_mac_clear_flags(void)
+{
+ tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
+ ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
+}
+
+DISAS_INSN(mac)
+{
+ TCGv rx;
+ TCGv ry;
+ uint16_t ext;
+ int acc;
+ TCGv tmp;
+ TCGv addr;
+ TCGv loadval;
+ int dual;
+ TCGv saved_flags;
+
+ if (!s->done_mac) {
+ s->mactmp = tcg_temp_new_i64();
+ s->done_mac = 1;
+ }
+
+ ext = read_im16(env, s);
+
+ acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
+ dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
+ if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
+ disas_undef(env, s, insn);
+ return;
+ }
+ if (insn & 0x30) {
+ /* MAC with load. */
+ tmp = gen_lea(env, s, insn, OS_LONG);
+ addr = tcg_temp_new();
+ tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
+ /* Load the value now to ensure correct exception behavior.
+ Perform writeback after reading the MAC inputs. */
+ loadval = gen_load(s, OS_LONG, addr, 0);
+
+ acc ^= 1;
+ rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
+ ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
+ } else {
+ loadval = addr = NULL_QREG;
+ rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
+ ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ }
+
+ gen_mac_clear_flags();
+#if 0
+ l1 = -1;
+ /* Disabled because conditional branches clobber temporary vars. */
+ if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
+ /* Skip the multiply if we know we will ignore it. */
+ l1 = gen_new_label();
+ tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+#endif
+
+ if ((ext & 0x0800) == 0) {
+ /* Word. */
+ rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
+ ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
+ }
+ if (s->env->macsr & MACSR_FI) {
+ gen_helper_macmulf(s->mactmp, cpu_env, rx, ry);
+ } else {
+ if (s->env->macsr & MACSR_SU)
+ gen_helper_macmuls(s->mactmp, cpu_env, rx, ry);
+ else
+ gen_helper_macmulu(s->mactmp, cpu_env, rx, ry);
+ switch ((ext >> 9) & 3) {
+ case 1:
+ tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
+ break;
+ case 3:
+ tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
+ break;
+ }
+ }
+
+ if (dual) {
+ /* Save the overflow flag from the multiply. */
+ saved_flags = tcg_temp_new();
+ tcg_gen_mov_i32(saved_flags, QREG_MACSR);
+ } else {
+ saved_flags = NULL_QREG;
+ }
+
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
+ /* Skip the accumulate if the value is already saturated. */
+ l1 = gen_new_label();
+ tmp = tcg_temp_new();
+ gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+#endif
+
+ if (insn & 0x100)
+ tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
+ else
+ tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
+
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ else if (s->env->macsr & MACSR_SU)
+ gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ else
+ gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if (l1 != -1)
+ gen_set_label(l1);
+#endif
+
+ if (dual) {
+ /* Dual accumulate variant. */
+ acc = (ext >> 2) & 3;
+ /* Restore the overflow flag from the multiplier. */
+ tcg_gen_mov_i32(QREG_MACSR, saved_flags);
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if ((s->env->macsr & MACSR_OMC) != 0) {
+ /* Skip the accumulate if the value is already saturated. */
+ l1 = gen_new_label();
+ tmp = tcg_temp_new();
+ gen_op_and32(tmp, QREG_MACSR, tcg_const_i32(MACSR_PAV0 << acc));
+ gen_op_jmp_nz32(tmp, l1);
+ }
+#endif
+ if (ext & 2)
+ tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
+ else
+ tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_macsatf(cpu_env, tcg_const_i32(acc));
+ else if (s->env->macsr & MACSR_SU)
+ gen_helper_macsats(cpu_env, tcg_const_i32(acc));
+ else
+ gen_helper_macsatu(cpu_env, tcg_const_i32(acc));
+#if 0
+ /* Disabled because conditional branches clobber temporary vars. */
+ if (l1 != -1)
+ gen_set_label(l1);
+#endif
+ }
+ gen_helper_mac_set_flags(cpu_env, tcg_const_i32(acc));
+
+ if (insn & 0x30) {
+ TCGv rw;
+ rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
+ tcg_gen_mov_i32(rw, loadval);
+ /* FIXME: Should address writeback happen with the masked or
+ unmasked value? */
+ switch ((insn >> 3) & 7) {
+ case 3: /* Post-increment. */
+ tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
+ break;
+ case 4: /* Pre-decrement. */
+ tcg_gen_mov_i32(AREG(insn, 0), addr);
+ }
+ }
+}
+
+DISAS_INSN(from_mac)
+{
+ TCGv rx;
+ TCGv_i64 acc;
+ int accnum;
+
+ rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ accnum = (insn >> 9) & 3;
+ acc = MACREG(accnum);
+ if (s->env->macsr & MACSR_FI) {
+ gen_helper_get_macf(rx, cpu_env, acc);
+ } else if ((s->env->macsr & MACSR_OMC) == 0) {
+ tcg_gen_extrl_i64_i32(rx, acc);
+ } else if (s->env->macsr & MACSR_SU) {
+ gen_helper_get_macs(rx, acc);
+ } else {
+ gen_helper_get_macu(rx, acc);
+ }
+ if (insn & 0x40) {
+ tcg_gen_movi_i64(acc, 0);
+ tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
+ }
+}
+
+DISAS_INSN(move_mac)
+{
+ /* FIXME: This can be done without a helper. */
+ int src;
+ TCGv dest;
+ src = insn & 3;
+ dest = tcg_const_i32((insn >> 9) & 3);
+ gen_helper_mac_move(cpu_env, dest, tcg_const_i32(src));
+ gen_mac_clear_flags();
+ gen_helper_mac_set_flags(cpu_env, dest);
+}
+
+DISAS_INSN(from_macsr)
+{
+ TCGv reg;
+
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ tcg_gen_mov_i32(reg, QREG_MACSR);
+}
+
+DISAS_INSN(from_mask)
+{
+ TCGv reg;
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ tcg_gen_mov_i32(reg, QREG_MAC_MASK);
+}
+
+DISAS_INSN(from_mext)
+{
+ TCGv reg;
+ TCGv acc;
+ reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
+ acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_get_mac_extf(reg, cpu_env, acc);
+ else
+ gen_helper_get_mac_exti(reg, cpu_env, acc);
+}
+
+DISAS_INSN(macsr_to_ccr)
+{
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf);
+ gen_helper_set_sr(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ set_cc_op(s, CC_OP_FLAGS);
+}
+
+DISAS_INSN(to_mac)
+{
+ TCGv_i64 acc;
+ TCGv val;
+ int accnum;
+ accnum = (insn >> 9) & 3;
+ acc = MACREG(accnum);
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ if (s->env->macsr & MACSR_FI) {
+ tcg_gen_ext_i32_i64(acc, val);
+ tcg_gen_shli_i64(acc, acc, 8);
+ } else if (s->env->macsr & MACSR_SU) {
+ tcg_gen_ext_i32_i64(acc, val);
+ } else {
+ tcg_gen_extu_i32_i64(acc, val);
+ }
+ tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
+ gen_mac_clear_flags();
+ gen_helper_mac_set_flags(cpu_env, tcg_const_i32(accnum));
+}
+
+DISAS_INSN(to_macsr)
+{
+ TCGv val;
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ gen_helper_set_macsr(cpu_env, val);
+ gen_lookup_tb(s);
+}
+
+DISAS_INSN(to_mask)
+{
+ TCGv val;
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
+}
+
+DISAS_INSN(to_mext)
+{
+ TCGv val;
+ TCGv acc;
+ SRC_EA(env, val, OS_LONG, 0, NULL);
+ acc = tcg_const_i32((insn & 0x400) ? 2 : 0);
+ if (s->env->macsr & MACSR_FI)
+ gen_helper_set_mac_extf(cpu_env, val, acc);
+ else if (s->env->macsr & MACSR_SU)
+ gen_helper_set_mac_exts(cpu_env, val, acc);
+ else
+ gen_helper_set_mac_extu(cpu_env, val, acc);
+}
+
+static disas_proc opcode_table[65536];
+
+static void
+register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
+{
+ int i;
+ int from;
+ int to;
+
+ /* Sanity check. All set bits must be included in the mask. */
+ if (opcode & ~mask) {
+ fprintf(stderr,
+ "qemu internal error: bogus opcode definition %04x/%04x\n",
+ opcode, mask);
+ abort();
+ }
+ /* This could probably be cleverer. For now just optimize the case where
+ the top bits are known. */
+ /* Find the first zero bit in the mask. */
+ i = 0x8000;
+ while ((i & mask) != 0)
+ i >>= 1;
+ /* Iterate over all combinations of this and lower bits. */
+ if (i == 0)
+ i = 1;
+ else
+ i <<= 1;
+ from = opcode & ~(i - 1);
+ to = from + i;
+ for (i = from; i < to; i++) {
+ if ((i & mask) == opcode)
+ opcode_table[i] = proc;
+ }
+}
+
+/* Register m68k opcode handlers. Order is important.
+ Later insn override earlier ones. */
+void register_m68k_insns (CPUM68KState *env)
+{
+ /* Build the opcode table only once to avoid
+ multithreading issues. */
+ if (opcode_table[0] != NULL) {
+ return;
+ }
+
+ /* use BASE() for instruction available
+ * for CF_ISA_A and M68000.
+ */
+#define BASE(name, opcode, mask) \
+ register_opcode(disas_##name, 0x##opcode, 0x##mask)
+#define INSN(name, opcode, mask, feature) do { \
+ if (m68k_feature(env, M68K_FEATURE_##feature)) \
+ BASE(name, opcode, mask); \
+ } while(0)
+ BASE(undef, 0000, 0000);
+ INSN(arith_im, 0080, fff8, CF_ISA_A);
+ INSN(arith_im, 0000, ff00, M68000);
+ INSN(undef, 00c0, ffc0, M68000);
+ INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
+ BASE(bitop_reg, 0100, f1c0);
+ BASE(bitop_reg, 0140, f1c0);
+ BASE(bitop_reg, 0180, f1c0);
+ BASE(bitop_reg, 01c0, f1c0);
+ INSN(arith_im, 0280, fff8, CF_ISA_A);
+ INSN(arith_im, 0200, ff00, M68000);
+ INSN(undef, 02c0, ffc0, M68000);
+ INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
+ INSN(arith_im, 0480, fff8, CF_ISA_A);
+ INSN(arith_im, 0400, ff00, M68000);
+ INSN(undef, 04c0, ffc0, M68000);
+ INSN(arith_im, 0600, ff00, M68000);
+ INSN(undef, 06c0, ffc0, M68000);
+ INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
+ INSN(arith_im, 0680, fff8, CF_ISA_A);
+ INSN(arith_im, 0c00, ff38, CF_ISA_A);
+ INSN(arith_im, 0c00, ff00, M68000);
+ BASE(bitop_im, 0800, ffc0);
+ BASE(bitop_im, 0840, ffc0);
+ BASE(bitop_im, 0880, ffc0);
+ BASE(bitop_im, 08c0, ffc0);
+ INSN(arith_im, 0a80, fff8, CF_ISA_A);
+ INSN(arith_im, 0a00, ff00, M68000);
+ BASE(move, 1000, f000);
+ BASE(move, 2000, f000);
+ BASE(move, 3000, f000);
+ INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
+ INSN(negx, 4080, fff8, CF_ISA_A);
+ INSN(negx, 4000, ff00, M68000);
+ INSN(undef, 40c0, ffc0, M68000);
+ INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
+ INSN(move_from_sr, 40c0, ffc0, M68000);
+ BASE(lea, 41c0, f1c0);
+ BASE(clr, 4200, ff00);
+ BASE(undef, 42c0, ffc0);
+ INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
+ INSN(move_from_ccr, 42c0, ffc0, M68000);
+ INSN(neg, 4480, fff8, CF_ISA_A);
+ INSN(neg, 4400, ff00, M68000);
+ INSN(undef, 44c0, ffc0, M68000);
+ BASE(move_to_ccr, 44c0, ffc0);
+ INSN(not, 4680, fff8, CF_ISA_A);
+ INSN(not, 4600, ff00, M68000);
+ INSN(undef, 46c0, ffc0, M68000);
+ INSN(move_to_sr, 46c0, ffc0, CF_ISA_A);
+ INSN(linkl, 4808, fff8, M68000);
+ BASE(pea, 4840, ffc0);
+ BASE(swap, 4840, fff8);
+ INSN(bkpt, 4848, fff8, BKPT);
+ BASE(movem, 48c0, fbc0);
+ BASE(ext, 4880, fff8);
+ BASE(ext, 48c0, fff8);
+ BASE(ext, 49c0, fff8);
+ BASE(tst, 4a00, ff00);
+ INSN(tas, 4ac0, ffc0, CF_ISA_B);
+ INSN(tas, 4ac0, ffc0, M68000);
+ INSN(halt, 4ac8, ffff, CF_ISA_A);
+ INSN(pulse, 4acc, ffff, CF_ISA_A);
+ BASE(illegal, 4afc, ffff);
+ INSN(mull, 4c00, ffc0, CF_ISA_A);
+ INSN(mull, 4c00, ffc0, LONG_MULDIV);
+ INSN(divl, 4c40, ffc0, CF_ISA_A);
+ INSN(divl, 4c40, ffc0, LONG_MULDIV);
+ INSN(sats, 4c80, fff8, CF_ISA_B);
+ BASE(trap, 4e40, fff0);
+ BASE(link, 4e50, fff8);
+ BASE(unlk, 4e58, fff8);
+ INSN(move_to_usp, 4e60, fff8, USP);
+ INSN(move_from_usp, 4e68, fff8, USP);
+ BASE(nop, 4e71, ffff);
+ BASE(stop, 4e72, ffff);
+ BASE(rte, 4e73, ffff);
+ BASE(rts, 4e75, ffff);
+ INSN(movec, 4e7b, ffff, CF_ISA_A);
+ BASE(jump, 4e80, ffc0);
+ BASE(jump, 4ec0, ffc0);
+ INSN(addsubq, 5000, f080, M68000);
+ BASE(addsubq, 5080, f0c0);
+ INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
+ INSN(scc, 50c0, f0c0, M68000); /* Scc.B <EA> */
+ INSN(dbcc, 50c8, f0f8, M68000);
+ INSN(tpf, 51f8, fff8, CF_ISA_A);
+
+ /* Branch instructions. */
+ BASE(branch, 6000, f000);
+ /* Disable long branch instructions, then add back the ones we want. */
+ BASE(undef, 60ff, f0ff); /* All long branches. */
+ INSN(branch, 60ff, f0ff, CF_ISA_B);
+ INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
+ INSN(branch, 60ff, ffff, BRAL);
+ INSN(branch, 60ff, f0ff, BCCL);
+
+ BASE(moveq, 7000, f100);
+ INSN(mvzs, 7100, f100, CF_ISA_B);
+ BASE(or, 8000, f000);
+ BASE(divw, 80c0, f0c0);
+ BASE(addsub, 9000, f000);
+ INSN(undef, 90c0, f0c0, CF_ISA_A);
+ INSN(subx_reg, 9180, f1f8, CF_ISA_A);
+ INSN(subx_reg, 9100, f138, M68000);
+ INSN(subx_mem, 9108, f138, M68000);
+ INSN(suba, 91c0, f1c0, CF_ISA_A);
+ INSN(suba, 90c0, f0c0, M68000);
+
+ BASE(undef_mac, a000, f000);
+ INSN(mac, a000, f100, CF_EMAC);
+ INSN(from_mac, a180, f9b0, CF_EMAC);
+ INSN(move_mac, a110, f9fc, CF_EMAC);
+ INSN(from_macsr,a980, f9f0, CF_EMAC);
+ INSN(from_mask, ad80, fff0, CF_EMAC);
+ INSN(from_mext, ab80, fbf0, CF_EMAC);
+ INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
+ INSN(to_mac, a100, f9c0, CF_EMAC);
+ INSN(to_macsr, a900, ffc0, CF_EMAC);
+ INSN(to_mext, ab00, fbc0, CF_EMAC);
+ INSN(to_mask, ad00, ffc0, CF_EMAC);
+
+ INSN(mov3q, a140, f1c0, CF_ISA_B);
+ INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
+ INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
+ INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
+ INSN(cmp, b080, f1c0, CF_ISA_A);
+ INSN(cmpa, b1c0, f1c0, CF_ISA_A);
+ INSN(cmp, b000, f100, M68000);
+ INSN(eor, b100, f100, M68000);
+ INSN(cmpa, b0c0, f0c0, M68000);
+ INSN(eor, b180, f1c0, CF_ISA_A);
+ BASE(and, c000, f000);
+ INSN(exg_dd, c140, f1f8, M68000);
+ INSN(exg_aa, c148, f1f8, M68000);
+ INSN(exg_da, c188, f1f8, M68000);
+ BASE(mulw, c0c0, f0c0);
+ BASE(addsub, d000, f000);
+ INSN(undef, d0c0, f0c0, CF_ISA_A);
+ INSN(addx_reg, d180, f1f8, CF_ISA_A);
+ INSN(addx_reg, d100, f138, M68000);
+ INSN(addx_mem, d108, f138, M68000);
+ INSN(adda, d1c0, f1c0, CF_ISA_A);
+ INSN(adda, d0c0, f0c0, M68000);
+ INSN(shift_im, e080, f0f0, CF_ISA_A);
+ INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
+ INSN(undef_fpu, f000, f000, CF_ISA_A);
+ INSN(fpu, f200, ffc0, CF_FPU);
+ INSN(fbcc, f280, ffc0, CF_FPU);
+ INSN(frestore, f340, ffc0, CF_FPU);
+ INSN(fsave, f340, ffc0, CF_FPU);
+ INSN(intouch, f340, ffc0, CF_ISA_A);
+ INSN(cpushl, f428, ff38, CF_ISA_A);
+ INSN(wddata, fb00, ff00, CF_ISA_A);
+ INSN(wdebug, fbc0, ffc0, CF_ISA_A);
+#undef INSN
+}
+
+/* ??? Some of this implementation is not exception safe. We should always
+ write back the result to memory before setting the condition codes. */
+static void disas_m68k_insn(CPUM68KState * env, DisasContext *s)
+{
+ uint16_t insn;
+
+ insn = read_im16(env, s);
+
+ opcode_table[insn](env, s, insn);
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUM68KState *env, TranslationBlock *tb)
+{
+ M68kCPU *cpu = m68k_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc1, *dc = &dc1;
+ target_ulong pc_start;
+ int pc_offset;
+ int num_insns;
+ int max_insns;
+
+ /* generate intermediate code */
+ pc_start = tb->pc;
+
+ dc->tb = tb;
+
+ dc->env = env;
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->cc_op_synced = 1;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->fpcr = env->fpcr;
+ dc->user = (env->sr & SR_S) == 0;
+ dc->done_mac = 0;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ do {
+ pc_offset = dc->pc - pc_start;
+ gen_throws_exception = NULL;
+ tcg_gen_insn_start(dc->pc, dc->cc_op);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ gen_exception(dc, dc->pc, EXCP_DEBUG);
+ dc->is_jmp = DISAS_JUMP;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 2;
+ break;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ dc->insn_pc = dc->pc;
+ disas_m68k_insn(env, dc);
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
+ !cs->singlestep_enabled &&
+ !singlestep &&
+ (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ if (unlikely(cs->singlestep_enabled)) {
+ /* Make sure the pc is updated, and raise a debug exception. */
+ if (!dc->is_jmp) {
+ update_cc_op(dc);
+ tcg_gen_movi_i32(QREG_PC, dc->pc);
+ }
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(EXCP_DEBUG));
+ } else {
+ switch(dc->is_jmp) {
+ case DISAS_NEXT:
+ update_cc_op(dc);
+ gen_jmp_tb(dc, 0, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ update_cc_op(dc);
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ }
+ gen_tb_end(tb, num_insns);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+}
+
+void m68k_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ M68kCPU *cpu = M68K_CPU(cs);
+ CPUM68KState *env = &cpu->env;
+ int i;
+ uint16_t sr;
+ CPU_DoubleU u;
+ for (i = 0; i < 8; i++)
+ {
+ u.d = env->fregs[i];
+ cpu_fprintf(f, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
+ i, env->dregs[i], i, env->aregs[i],
+ i, u.l.upper, u.l.lower, *(double *)&u.d);
+ }
+ cpu_fprintf (f, "PC = %08x ", env->pc);
+ sr = env->sr | cpu_m68k_get_ccr(env);
+ cpu_fprintf(f, "SR = %04x %c%c%c%c%c ", sr, (sr & CCF_X) ? 'X' : '-',
+ (sr & CCF_N) ? 'N' : '-', (sr & CCF_Z) ? 'Z' : '-',
+ (sr & CCF_V) ? 'V' : '-', (sr & CCF_C) ? 'C' : '-');
+ cpu_fprintf (f, "FPRESULT = %12g\n", *(double *)&env->fp_result);
+}
+
+void restore_state_to_opc(CPUM68KState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ int cc_op = data[1];
+ env->pc = data[0];
+ if (cc_op != CC_OP_DYNAMIC) {
+ env->cc_op = cc_op;
+ }
+}
diff --git a/target/microblaze/Makefile.objs b/target/microblaze/Makefile.objs
new file mode 100644
index 0000000000..f3d7b44c89
--- /dev/null
+++ b/target/microblaze/Makefile.objs
@@ -0,0 +1,3 @@
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
+obj-$(CONFIG_SOFTMMU) += mmu.o
diff --git a/target/microblaze/cpu-qom.h b/target/microblaze/cpu-qom.h
new file mode 100644
index 0000000000..1a61db77d0
--- /dev/null
+++ b/target/microblaze/cpu-qom.h
@@ -0,0 +1,52 @@
+/*
+ * QEMU MicroBlaze CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_MICROBLAZE_CPU_QOM_H
+#define QEMU_MICROBLAZE_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_MICROBLAZE_CPU "microblaze-cpu"
+
+#define MICROBLAZE_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(MicroBlazeCPUClass, (klass), TYPE_MICROBLAZE_CPU)
+#define MICROBLAZE_CPU(obj) \
+ OBJECT_CHECK(MicroBlazeCPU, (obj), TYPE_MICROBLAZE_CPU)
+#define MICROBLAZE_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(MicroBlazeCPUClass, (obj), TYPE_MICROBLAZE_CPU)
+
+/**
+ * MicroBlazeCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A MicroBlaze CPU model.
+ */
+typedef struct MicroBlazeCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} MicroBlazeCPUClass;
+
+typedef struct MicroBlazeCPU MicroBlazeCPU;
+
+#endif
diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
new file mode 100644
index 0000000000..389c7b691e
--- /dev/null
+++ b/target/microblaze/cpu.c
@@ -0,0 +1,292 @@
+/*
+ * QEMU MicroBlaze CPU
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2009 Edgar E. Iglesias, Axis Communications AB.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+static const struct {
+ const char *name;
+ uint8_t version_id;
+} mb_cpu_lookup[] = {
+ /* These key value are as per MBV field in PVR0 */
+ {"5.00.a", 0x01},
+ {"5.00.b", 0x02},
+ {"5.00.c", 0x03},
+ {"6.00.a", 0x04},
+ {"6.00.b", 0x06},
+ {"7.00.a", 0x05},
+ {"7.00.b", 0x07},
+ {"7.10.a", 0x08},
+ {"7.10.b", 0x09},
+ {"7.10.c", 0x0a},
+ {"7.10.d", 0x0b},
+ {"7.20.a", 0x0c},
+ {"7.20.b", 0x0d},
+ {"7.20.c", 0x0e},
+ {"7.20.d", 0x0f},
+ {"7.30.a", 0x10},
+ {"7.30.b", 0x11},
+ {"8.00.a", 0x12},
+ {"8.00.b", 0x13},
+ {"8.10.a", 0x14},
+ {"8.20.a", 0x15},
+ {"8.20.b", 0x16},
+ {"8.30.a", 0x17},
+ {"8.40.a", 0x18},
+ {"8.40.b", 0x19},
+ {"8.50.a", 0x1A},
+ {"9.0", 0x1B},
+ {"9.1", 0x1D},
+ {"9.2", 0x1F},
+ {"9.3", 0x20},
+ {NULL, 0},
+};
+
+static void mb_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+
+ cpu->env.sregs[SR_PC] = value;
+}
+
+static bool mb_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void microblaze_cpu_set_irq(void *opaque, int irq, int level)
+{
+ MicroBlazeCPU *cpu = opaque;
+ CPUState *cs = CPU(cpu);
+ int type = irq ? CPU_INTERRUPT_NMI : CPU_INTERRUPT_HARD;
+
+ if (level) {
+ cpu_interrupt(cs, type);
+ } else {
+ cpu_reset_interrupt(cs, type);
+ }
+}
+#endif
+
+/* CPUClass::reset() */
+static void mb_cpu_reset(CPUState *s)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(s);
+ MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(cpu);
+ CPUMBState *env = &cpu->env;
+
+ mcc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUMBState, pvr));
+ env->res_addr = RES_ADDR_NONE;
+ tlb_flush(s, 1);
+
+ /* Disable stack protector. */
+ env->shr = ~0;
+
+ env->sregs[SR_PC] = cpu->cfg.base_vectors;
+
+#if defined(CONFIG_USER_ONLY)
+ /* start in user mode with interrupts enabled. */
+ env->sregs[SR_MSR] = MSR_EE | MSR_IE | MSR_VM | MSR_UM;
+#else
+ env->sregs[SR_MSR] = 0;
+ mmu_init(&env->mmu);
+ env->mmu.c_mmu = 3;
+ env->mmu.c_mmu_tlb_access = 3;
+ env->mmu.c_mmu_zones = 16;
+#endif
+}
+
+static void mb_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_arch_microblaze;
+ info->print_insn = print_insn_microblaze;
+}
+
+static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_GET_CLASS(dev);
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ uint8_t version_code = 0;
+ int i = 0;
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ env->pvr.regs[0] = PVR0_USE_BARREL_MASK \
+ | PVR0_USE_DIV_MASK \
+ | PVR0_USE_HW_MUL_MASK \
+ | PVR0_USE_EXC_MASK \
+ | PVR0_USE_ICACHE_MASK \
+ | PVR0_USE_DCACHE_MASK \
+ | (0xb << 8);
+ env->pvr.regs[2] = PVR2_D_OPB_MASK \
+ | PVR2_D_LMB_MASK \
+ | PVR2_I_OPB_MASK \
+ | PVR2_I_LMB_MASK \
+ | PVR2_USE_MSR_INSTR \
+ | PVR2_USE_PCMP_INSTR \
+ | PVR2_USE_BARREL_MASK \
+ | PVR2_USE_DIV_MASK \
+ | PVR2_USE_HW_MUL_MASK \
+ | PVR2_USE_MUL64_MASK \
+ | PVR2_FPU_EXC_MASK \
+ | 0;
+
+ for (i = 0; mb_cpu_lookup[i].name && cpu->cfg.version; i++) {
+ if (strcmp(mb_cpu_lookup[i].name, cpu->cfg.version) == 0) {
+ version_code = mb_cpu_lookup[i].version_id;
+ break;
+ }
+ }
+
+ if (!version_code) {
+ qemu_log("Invalid MicroBlaze version number: %s\n", cpu->cfg.version);
+ }
+
+ env->pvr.regs[0] |= (cpu->cfg.stackprot ? PVR0_SPROT_MASK : 0) |
+ (cpu->cfg.use_fpu ? PVR0_USE_FPU_MASK : 0) |
+ (cpu->cfg.use_mmu ? PVR0_USE_MMU_MASK : 0) |
+ (cpu->cfg.endi ? PVR0_ENDI_MASK : 0) |
+ (version_code << 16) |
+ (cpu->cfg.pvr == C_PVR_FULL ? PVR0_PVR_FULL_MASK : 0);
+
+ env->pvr.regs[2] |= (cpu->cfg.use_fpu ? PVR2_USE_FPU_MASK : 0) |
+ (cpu->cfg.use_fpu > 1 ? PVR2_USE_FPU2_MASK : 0);
+
+ env->pvr.regs[5] |= cpu->cfg.dcache_writeback ?
+ PVR5_DCACHE_WRITEBACK_MASK : 0;
+
+ env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */
+ env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void mb_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj);
+ CPUMBState *env = &cpu->env;
+ static bool tcg_initialized;
+
+ cs->env_ptr = env;
+
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+
+#ifndef CONFIG_USER_ONLY
+ /* Inbound IRQ and FIR lines */
+ qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2);
+#endif
+
+ if (tcg_enabled() && !tcg_initialized) {
+ tcg_initialized = true;
+ mb_tcg_init();
+ }
+}
+
+static const VMStateDescription vmstate_mb_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+static Property mb_properties[] = {
+ DEFINE_PROP_UINT32("base-vectors", MicroBlazeCPU, cfg.base_vectors, 0),
+ DEFINE_PROP_BOOL("use-stack-protection", MicroBlazeCPU, cfg.stackprot,
+ false),
+ /* If use-fpu > 0 - FPU is enabled
+ * If use-fpu = 2 - Floating point conversion and square root instructions
+ * are enabled
+ */
+ DEFINE_PROP_UINT8("use-fpu", MicroBlazeCPU, cfg.use_fpu, 2),
+ DEFINE_PROP_BOOL("use-mmu", MicroBlazeCPU, cfg.use_mmu, true),
+ DEFINE_PROP_BOOL("dcache-writeback", MicroBlazeCPU, cfg.dcache_writeback,
+ false),
+ DEFINE_PROP_BOOL("endianness", MicroBlazeCPU, cfg.endi, false),
+ DEFINE_PROP_STRING("version", MicroBlazeCPU, cfg.version),
+ DEFINE_PROP_UINT8("pvr", MicroBlazeCPU, cfg.pvr, C_PVR_FULL),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void mb_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ MicroBlazeCPUClass *mcc = MICROBLAZE_CPU_CLASS(oc);
+
+ mcc->parent_realize = dc->realize;
+ dc->realize = mb_cpu_realizefn;
+
+ mcc->parent_reset = cc->reset;
+ cc->reset = mb_cpu_reset;
+
+ cc->has_work = mb_cpu_has_work;
+ cc->do_interrupt = mb_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = mb_cpu_exec_interrupt;
+ cc->dump_state = mb_cpu_dump_state;
+ cc->set_pc = mb_cpu_set_pc;
+ cc->gdb_read_register = mb_cpu_gdb_read_register;
+ cc->gdb_write_register = mb_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = mb_cpu_handle_mmu_fault;
+#else
+ cc->do_unassigned_access = mb_cpu_unassigned_access;
+ cc->get_phys_page_debug = mb_cpu_get_phys_page_debug;
+#endif
+ dc->vmsd = &vmstate_mb_cpu;
+ dc->props = mb_properties;
+ cc->gdb_num_core_regs = 32 + 5;
+
+ cc->disas_set_info = mb_disas_set_info;
+}
+
+static const TypeInfo mb_cpu_type_info = {
+ .name = TYPE_MICROBLAZE_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(MicroBlazeCPU),
+ .instance_init = mb_cpu_initfn,
+ .class_size = sizeof(MicroBlazeCPUClass),
+ .class_init = mb_cpu_class_init,
+};
+
+static void mb_cpu_register_types(void)
+{
+ type_register_static(&mb_cpu_type_info);
+}
+
+type_init(mb_cpu_register_types)
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
new file mode 100644
index 0000000000..beb75ffd26
--- /dev/null
+++ b/target/microblaze/cpu.h
@@ -0,0 +1,381 @@
+/*
+ * MicroBlaze virtual CPU header
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MICROBLAZE_CPU_H
+#define MICROBLAZE_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUMBState
+
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+struct CPUMBState;
+typedef struct CPUMBState CPUMBState;
+#if !defined(CONFIG_USER_ONLY)
+#include "mmu.h"
+#endif
+
+#define EXCP_MMU 1
+#define EXCP_IRQ 2
+#define EXCP_BREAK 3
+#define EXCP_HW_BREAK 4
+#define EXCP_HW_EXCP 5
+
+/* MicroBlaze-specific interrupt pending bits. */
+#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
+
+/* Meanings of the MBCPU object's two inbound GPIO lines */
+#define MB_CPU_IRQ 0
+#define MB_CPU_FIR 1
+
+/* Register aliases. R0 - R15 */
+#define R_SP 1
+#define SR_PC 0
+#define SR_MSR 1
+#define SR_EAR 3
+#define SR_ESR 5
+#define SR_FSR 7
+#define SR_BTR 0xb
+#define SR_EDR 0xd
+
+/* MSR flags. */
+#define MSR_BE (1<<0) /* 0x001 */
+#define MSR_IE (1<<1) /* 0x002 */
+#define MSR_C (1<<2) /* 0x004 */
+#define MSR_BIP (1<<3) /* 0x008 */
+#define MSR_FSL (1<<4) /* 0x010 */
+#define MSR_ICE (1<<5) /* 0x020 */
+#define MSR_DZ (1<<6) /* 0x040 */
+#define MSR_DCE (1<<7) /* 0x080 */
+#define MSR_EE (1<<8) /* 0x100 */
+#define MSR_EIP (1<<9) /* 0x200 */
+#define MSR_PVR (1<<10) /* 0x400 */
+#define MSR_CC (1<<31)
+
+/* Machine State Register (MSR) Fields */
+#define MSR_UM (1<<11) /* User Mode */
+#define MSR_UMS (1<<12) /* User Mode Save */
+#define MSR_VM (1<<13) /* Virtual Mode */
+#define MSR_VMS (1<<14) /* Virtual Mode Save */
+
+#define MSR_KERNEL MSR_EE|MSR_VM
+//#define MSR_USER MSR_KERNEL|MSR_UM|MSR_IE
+#define MSR_KERNEL_VMS MSR_EE|MSR_VMS
+//#define MSR_USER_VMS MSR_KERNEL_VMS|MSR_UMS|MSR_IE
+
+/* Exception State Register (ESR) Fields */
+#define ESR_DIZ (1<<11) /* Zone Protection */
+#define ESR_S (1<<10) /* Store instruction */
+
+#define ESR_ESS_FSL_OFFSET 5
+
+#define ESR_EC_FSL 0
+#define ESR_EC_UNALIGNED_DATA 1
+#define ESR_EC_ILLEGAL_OP 2
+#define ESR_EC_INSN_BUS 3
+#define ESR_EC_DATA_BUS 4
+#define ESR_EC_DIVZERO 5
+#define ESR_EC_FPU 6
+#define ESR_EC_PRIVINSN 7
+#define ESR_EC_STACKPROT 7 /* Same as PRIVINSN. */
+#define ESR_EC_DATA_STORAGE 8
+#define ESR_EC_INSN_STORAGE 9
+#define ESR_EC_DATA_TLB 10
+#define ESR_EC_INSN_TLB 11
+#define ESR_EC_MASK 31
+
+/* Floating Point Status Register (FSR) Bits */
+#define FSR_IO (1<<4) /* Invalid operation */
+#define FSR_DZ (1<<3) /* Divide-by-zero */
+#define FSR_OF (1<<2) /* Overflow */
+#define FSR_UF (1<<1) /* Underflow */
+#define FSR_DO (1<<0) /* Denormalized operand error */
+
+/* Version reg. */
+/* Basic PVR mask */
+#define PVR0_PVR_FULL_MASK 0x80000000
+#define PVR0_USE_BARREL_MASK 0x40000000
+#define PVR0_USE_DIV_MASK 0x20000000
+#define PVR0_USE_HW_MUL_MASK 0x10000000
+#define PVR0_USE_FPU_MASK 0x08000000
+#define PVR0_USE_EXC_MASK 0x04000000
+#define PVR0_USE_ICACHE_MASK 0x02000000
+#define PVR0_USE_DCACHE_MASK 0x01000000
+#define PVR0_USE_MMU_MASK 0x00800000
+#define PVR0_USE_BTC 0x00400000
+#define PVR0_ENDI_MASK 0x00200000
+#define PVR0_FAULT 0x00100000
+#define PVR0_VERSION_MASK 0x0000FF00
+#define PVR0_USER1_MASK 0x000000FF
+#define PVR0_SPROT_MASK 0x00000001
+
+/* User 2 PVR mask */
+#define PVR1_USER2_MASK 0xFFFFFFFF
+
+/* Configuration PVR masks */
+#define PVR2_D_OPB_MASK 0x80000000
+#define PVR2_D_LMB_MASK 0x40000000
+#define PVR2_I_OPB_MASK 0x20000000
+#define PVR2_I_LMB_MASK 0x10000000
+#define PVR2_INTERRUPT_IS_EDGE_MASK 0x08000000
+#define PVR2_EDGE_IS_POSITIVE_MASK 0x04000000
+#define PVR2_D_PLB_MASK 0x02000000 /* new */
+#define PVR2_I_PLB_MASK 0x01000000 /* new */
+#define PVR2_INTERCONNECT 0x00800000 /* new */
+#define PVR2_USE_EXTEND_FSL 0x00080000 /* new */
+#define PVR2_USE_FSL_EXC 0x00040000 /* new */
+#define PVR2_USE_MSR_INSTR 0x00020000
+#define PVR2_USE_PCMP_INSTR 0x00010000
+#define PVR2_AREA_OPTIMISED 0x00008000
+#define PVR2_USE_BARREL_MASK 0x00004000
+#define PVR2_USE_DIV_MASK 0x00002000
+#define PVR2_USE_HW_MUL_MASK 0x00001000
+#define PVR2_USE_FPU_MASK 0x00000800
+#define PVR2_USE_MUL64_MASK 0x00000400
+#define PVR2_USE_FPU2_MASK 0x00000200 /* new */
+#define PVR2_USE_IPLBEXC 0x00000100
+#define PVR2_USE_DPLBEXC 0x00000080
+#define PVR2_OPCODE_0x0_ILL_MASK 0x00000040
+#define PVR2_UNALIGNED_EXC_MASK 0x00000020
+#define PVR2_ILL_OPCODE_EXC_MASK 0x00000010
+#define PVR2_IOPB_BUS_EXC_MASK 0x00000008
+#define PVR2_DOPB_BUS_EXC_MASK 0x00000004
+#define PVR2_DIV_ZERO_EXC_MASK 0x00000002
+#define PVR2_FPU_EXC_MASK 0x00000001
+
+/* Debug and exception PVR masks */
+#define PVR3_DEBUG_ENABLED_MASK 0x80000000
+#define PVR3_NUMBER_OF_PC_BRK_MASK 0x1E000000
+#define PVR3_NUMBER_OF_RD_ADDR_BRK_MASK 0x00380000
+#define PVR3_NUMBER_OF_WR_ADDR_BRK_MASK 0x0000E000
+#define PVR3_FSL_LINKS_MASK 0x00000380
+
+/* ICache config PVR masks */
+#define PVR4_USE_ICACHE_MASK 0x80000000
+#define PVR4_ICACHE_ADDR_TAG_BITS_MASK 0x7C000000
+#define PVR4_ICACHE_USE_FSL_MASK 0x02000000
+#define PVR4_ICACHE_ALLOW_WR_MASK 0x01000000
+#define PVR4_ICACHE_LINE_LEN_MASK 0x00E00000
+#define PVR4_ICACHE_BYTE_SIZE_MASK 0x001F0000
+
+/* DCache config PVR masks */
+#define PVR5_USE_DCACHE_MASK 0x80000000
+#define PVR5_DCACHE_ADDR_TAG_BITS_MASK 0x7C000000
+#define PVR5_DCACHE_USE_FSL_MASK 0x02000000
+#define PVR5_DCACHE_ALLOW_WR_MASK 0x01000000
+#define PVR5_DCACHE_LINE_LEN_MASK 0x00E00000
+#define PVR5_DCACHE_BYTE_SIZE_MASK 0x001F0000
+#define PVR5_DCACHE_WRITEBACK_MASK 0x00004000
+
+/* ICache base address PVR mask */
+#define PVR6_ICACHE_BASEADDR_MASK 0xFFFFFFFF
+
+/* ICache high address PVR mask */
+#define PVR7_ICACHE_HIGHADDR_MASK 0xFFFFFFFF
+
+/* DCache base address PVR mask */
+#define PVR8_DCACHE_BASEADDR_MASK 0xFFFFFFFF
+
+/* DCache high address PVR mask */
+#define PVR9_DCACHE_HIGHADDR_MASK 0xFFFFFFFF
+
+/* Target family PVR mask */
+#define PVR10_TARGET_FAMILY_MASK 0xFF000000
+
+/* MMU descrtiption */
+#define PVR11_USE_MMU 0xC0000000
+#define PVR11_MMU_ITLB_SIZE 0x38000000
+#define PVR11_MMU_DTLB_SIZE 0x07000000
+#define PVR11_MMU_TLB_ACCESS 0x00C00000
+#define PVR11_MMU_ZONES 0x003E0000
+/* MSR Reset value PVR mask */
+#define PVR11_MSR_RESET_VALUE_MASK 0x000007FF
+
+#define C_PVR_NONE 0
+#define C_PVR_BASIC 1
+#define C_PVR_FULL 2
+
+/* CPU flags. */
+
+/* Condition codes. */
+#define CC_GE 5
+#define CC_GT 4
+#define CC_LE 3
+#define CC_LT 2
+#define CC_NE 1
+#define CC_EQ 0
+
+#define NB_MMU_MODES 3
+
+#define STREAM_EXCEPTION (1 << 0)
+#define STREAM_ATOMIC (1 << 1)
+#define STREAM_TEST (1 << 2)
+#define STREAM_CONTROL (1 << 3)
+#define STREAM_NONBLOCK (1 << 4)
+
+struct CPUMBState {
+ uint32_t debug;
+ uint32_t btaken;
+ uint32_t btarget;
+ uint32_t bimm;
+
+ uint32_t imm;
+ uint32_t regs[33];
+ uint32_t sregs[24];
+ float_status fp_status;
+ /* Stack protectors. Yes, it's a hw feature. */
+ uint32_t slr, shr;
+
+ /* lwx/swx reserved address */
+#define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */
+ uint32_t res_addr;
+ uint32_t res_val;
+
+ /* Internal flags. */
+#define IMM_FLAG 4
+#define MSR_EE_FLAG (1 << 8)
+#define DRTI_FLAG (1 << 16)
+#define DRTE_FLAG (1 << 17)
+#define DRTB_FLAG (1 << 18)
+#define D_FLAG (1 << 19) /* Bit in ESR. */
+/* TB dependent CPUMBState. */
+#define IFLAGS_TB_MASK (D_FLAG | IMM_FLAG | DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)
+ uint32_t iflags;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* Unified MMU. */
+ struct microblaze_mmu mmu;
+#endif
+
+ CPU_COMMON
+
+ /* These fields are preserved on reset. */
+
+ struct {
+ uint32_t regs[16];
+ } pvr;
+};
+
+/**
+ * MicroBlazeCPU:
+ * @env: #CPUMBState
+ *
+ * A MicroBlaze CPU.
+ */
+struct MicroBlazeCPU {
+ /*< private >*/
+ CPUState parent_obj;
+
+ /*< public >*/
+
+ /* Microblaze Configuration Settings */
+ struct {
+ bool stackprot;
+ uint32_t base_vectors;
+ uint8_t use_fpu;
+ bool use_mmu;
+ bool dcache_writeback;
+ bool endi;
+ char *version;
+ uint8_t pvr;
+ } cfg;
+
+ CPUMBState env;
+};
+
+static inline MicroBlazeCPU *mb_env_get_cpu(CPUMBState *env)
+{
+ return container_of(env, MicroBlazeCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(mb_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(MicroBlazeCPU, env)
+
+void mb_cpu_do_interrupt(CPUState *cs);
+bool mb_cpu_exec_interrupt(CPUState *cs, int int_req);
+void mb_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+hwaddr mb_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int mb_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int mb_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void mb_tcg_init(void);
+MicroBlazeCPU *cpu_mb_init(const char *cpu_model);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_mb_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+
+/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define cpu_init(cpu_model) CPU(cpu_mb_init(cpu_model))
+
+#define cpu_signal_handler cpu_mb_signal_handler
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _nommu
+#define MMU_MODE1_SUFFIX _kernel
+#define MMU_MODE2_SUFFIX _user
+#define MMU_NOMMU_IDX 0
+#define MMU_KERNEL_IDX 1
+#define MMU_USER_IDX 2
+/* See NB_MMU_MODES further up the file. */
+
+static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
+{
+ /* Are we in nommu mode?. */
+ if (!(env->sregs[SR_MSR] & MSR_VM))
+ return MMU_NOMMU_IDX;
+
+ if (env->sregs[SR_MSR] & MSR_UM)
+ return MMU_USER_IDX;
+ return MMU_KERNEL_IDX;
+}
+
+int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->sregs[SR_PC];
+ *cs_base = 0;
+ *flags = (env->iflags & IFLAGS_TB_MASK) |
+ (env->sregs[SR_MSR] & (MSR_UM | MSR_VM | MSR_EE));
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void mb_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size);
+#endif
+
+#endif
diff --git a/target/microblaze/gdbstub.c b/target/microblaze/gdbstub.c
new file mode 100644
index 0000000000..7fb076c2e9
--- /dev/null
+++ b/target/microblaze/gdbstub.c
@@ -0,0 +1,57 @@
+/*
+ * MicroBlaze gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int mb_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_reg32(mem_buf, env->regs[n]);
+ } else {
+ return gdb_get_reg32(mem_buf, env->sregs[n - 32]);
+ }
+ return 0;
+}
+
+int mb_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUMBState *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 32) {
+ env->regs[n] = tmp;
+ } else {
+ env->sregs[n - 32] = tmp;
+ }
+ return 4;
+}
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
new file mode 100644
index 0000000000..da394d1dfc
--- /dev/null
+++ b/target/microblaze/helper.c
@@ -0,0 +1,307 @@
+/*
+ * MicroBlaze helper routines.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/log.h"
+
+#define D(x)
+
+#if defined(CONFIG_USER_ONLY)
+
+void mb_cpu_do_interrupt(CPUState *cs)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+
+ cs->exception_index = -1;
+ env->res_addr = RES_ADDR_NONE;
+ env->regs[14] = env->sregs[SR_PC];
+}
+
+int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ cs->exception_index = 0xaa;
+ cpu_dump_state(cs, stderr, fprintf, 0);
+ return 1;
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ unsigned int hit;
+ unsigned int mmu_available;
+ int r = 1;
+ int prot;
+
+ mmu_available = 0;
+ if (cpu->cfg.use_mmu) {
+ mmu_available = 1;
+ if ((cpu->cfg.pvr == C_PVR_FULL) &&
+ (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) {
+ mmu_available = 0;
+ }
+ }
+
+ /* Translate if the MMU is available and enabled. */
+ if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
+ target_ulong vaddr, paddr;
+ struct microblaze_mmu_lookup lu;
+
+ hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
+ if (hit) {
+ vaddr = address & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+
+ qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
+ mmu_idx, vaddr, paddr, lu.prot);
+ tlb_set_page(cs, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
+ } else {
+ env->sregs[SR_EAR] = address;
+ qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
+ mmu_idx, address);
+
+ switch (lu.err) {
+ case ERR_PROT:
+ env->sregs[SR_ESR] = rw == 2 ? 17 : 16;
+ env->sregs[SR_ESR] |= (rw == 1) << 10;
+ break;
+ case ERR_MISS:
+ env->sregs[SR_ESR] = rw == 2 ? 19 : 18;
+ env->sregs[SR_ESR] |= (rw == 1) << 10;
+ break;
+ default:
+ abort();
+ break;
+ }
+
+ if (cs->exception_index == EXCP_MMU) {
+ cpu_abort(cs, "recursive faults\n");
+ }
+
+ /* TLB miss. */
+ cs->exception_index = EXCP_MMU;
+ }
+ } else {
+ /* MMU disabled or not available. */
+ address &= TARGET_PAGE_MASK;
+ prot = PAGE_BITS;
+ tlb_set_page(cs, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
+ }
+ return r;
+}
+
+void mb_cpu_do_interrupt(CPUState *cs)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ uint32_t t;
+
+ /* IMM flag cannot propagate across a branch and into the dslot. */
+ assert(!((env->iflags & D_FLAG) && (env->iflags & IMM_FLAG)));
+ assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
+/* assert(env->sregs[SR_MSR] & (MSR_EE)); Only for HW exceptions. */
+ env->res_addr = RES_ADDR_NONE;
+ switch (cs->exception_index) {
+ case EXCP_HW_EXCP:
+ if (!(env->pvr.regs[0] & PVR0_USE_EXC_MASK)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Exception raised on system without exceptions!\n");
+ return;
+ }
+
+ env->regs[17] = env->sregs[SR_PC] + 4;
+ env->sregs[SR_ESR] &= ~(1 << 12);
+
+ /* Exception breaks branch + dslot sequence? */
+ if (env->iflags & D_FLAG) {
+ env->sregs[SR_ESR] |= 1 << 12 ;
+ env->sregs[SR_BTR] = env->btarget;
+ }
+
+ /* Disable the MMU. */
+ t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
+ env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
+ env->sregs[SR_MSR] |= t;
+ /* Exception in progress. */
+ env->sregs[SR_MSR] |= MSR_EIP;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "hw exception at pc=%x ear=%x esr=%x iflags=%x\n",
+ env->sregs[SR_PC], env->sregs[SR_EAR],
+ env->sregs[SR_ESR], env->iflags);
+ log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ env->iflags &= ~(IMM_FLAG | D_FLAG);
+ env->sregs[SR_PC] = cpu->cfg.base_vectors + 0x20;
+ break;
+
+ case EXCP_MMU:
+ env->regs[17] = env->sregs[SR_PC];
+
+ env->sregs[SR_ESR] &= ~(1 << 12);
+ /* Exception breaks branch + dslot sequence? */
+ if (env->iflags & D_FLAG) {
+ D(qemu_log("D_FLAG set at exception bimm=%d\n", env->bimm));
+ env->sregs[SR_ESR] |= 1 << 12 ;
+ env->sregs[SR_BTR] = env->btarget;
+
+ /* Reexecute the branch. */
+ env->regs[17] -= 4;
+ /* was the branch immprefixed?. */
+ if (env->bimm) {
+ qemu_log_mask(CPU_LOG_INT,
+ "bimm exception at pc=%x iflags=%x\n",
+ env->sregs[SR_PC], env->iflags);
+ env->regs[17] -= 4;
+ log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ }
+ } else if (env->iflags & IMM_FLAG) {
+ D(qemu_log("IMM_FLAG set at exception\n"));
+ env->regs[17] -= 4;
+ }
+
+ /* Disable the MMU. */
+ t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
+ env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
+ env->sregs[SR_MSR] |= t;
+ /* Exception in progress. */
+ env->sregs[SR_MSR] |= MSR_EIP;
+
+ qemu_log_mask(CPU_LOG_INT,
+ "exception at pc=%x ear=%x iflags=%x\n",
+ env->sregs[SR_PC], env->sregs[SR_EAR], env->iflags);
+ log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ env->iflags &= ~(IMM_FLAG | D_FLAG);
+ env->sregs[SR_PC] = cpu->cfg.base_vectors + 0x20;
+ break;
+
+ case EXCP_IRQ:
+ assert(!(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP)));
+ assert(env->sregs[SR_MSR] & MSR_IE);
+ assert(!(env->iflags & D_FLAG));
+
+ t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
+
+#if 0
+#include "disas/disas.h"
+
+/* Useful instrumentation when debugging interrupt issues in either
+ the models or in sw. */
+ {
+ const char *sym;
+
+ sym = lookup_symbol(env->sregs[SR_PC]);
+ if (sym
+ && (!strcmp("netif_rx", sym)
+ || !strcmp("process_backlog", sym))) {
+
+ qemu_log(
+ "interrupt at pc=%x msr=%x %x iflags=%x sym=%s\n",
+ env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags,
+ sym);
+
+ log_cpu_state(cs, 0);
+ }
+ }
+#endif
+ qemu_log_mask(CPU_LOG_INT,
+ "interrupt at pc=%x msr=%x %x iflags=%x\n",
+ env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
+
+ env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM \
+ | MSR_UM | MSR_IE);
+ env->sregs[SR_MSR] |= t;
+
+ env->regs[14] = env->sregs[SR_PC];
+ env->sregs[SR_PC] = cpu->cfg.base_vectors + 0x10;
+ //log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ break;
+
+ case EXCP_BREAK:
+ case EXCP_HW_BREAK:
+ assert(!(env->iflags & IMM_FLAG));
+ assert(!(env->iflags & D_FLAG));
+ t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
+ qemu_log_mask(CPU_LOG_INT,
+ "break at pc=%x msr=%x %x iflags=%x\n",
+ env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
+ log_cpu_state_mask(CPU_LOG_INT, cs, 0);
+ env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
+ env->sregs[SR_MSR] |= t;
+ env->sregs[SR_MSR] |= MSR_BIP;
+ if (cs->exception_index == EXCP_HW_BREAK) {
+ env->regs[16] = env->sregs[SR_PC];
+ env->sregs[SR_MSR] |= MSR_BIP;
+ env->sregs[SR_PC] = cpu->cfg.base_vectors + 0x18;
+ } else
+ env->sregs[SR_PC] = env->btarget;
+ break;
+ default:
+ cpu_abort(cs, "unhandled exception type=%d\n",
+ cs->exception_index);
+ break;
+ }
+}
+
+hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ target_ulong vaddr, paddr = 0;
+ struct microblaze_mmu_lookup lu;
+ unsigned int hit;
+
+ if (env->sregs[SR_MSR] & MSR_VM) {
+ hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
+ if (hit) {
+ vaddr = addr & TARGET_PAGE_MASK;
+ paddr = lu.paddr + vaddr - lu.vaddr;
+ } else
+ paddr = 0; /* ???. */
+ } else
+ paddr = addr & TARGET_PAGE_MASK;
+
+ return paddr;
+}
+#endif
+
+bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD)
+ && (env->sregs[SR_MSR] & MSR_IE)
+ && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
+ && !(env->iflags & (D_FLAG | IMM_FLAG))) {
+ cs->exception_index = EXCP_IRQ;
+ mb_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
new file mode 100644
index 0000000000..bd13826de0
--- /dev/null
+++ b/target/microblaze/helper.h
@@ -0,0 +1,37 @@
+DEF_HELPER_2(raise_exception, void, env, i32)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_FLAGS_3(carry, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32)
+DEF_HELPER_2(cmp, i32, i32, i32)
+DEF_HELPER_2(cmpu, i32, i32, i32)
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
+
+DEF_HELPER_3(divs, i32, env, i32, i32)
+DEF_HELPER_3(divu, i32, env, i32, i32)
+
+DEF_HELPER_3(fadd, i32, env, i32, i32)
+DEF_HELPER_3(frsub, i32, env, i32, i32)
+DEF_HELPER_3(fmul, i32, env, i32, i32)
+DEF_HELPER_3(fdiv, i32, env, i32, i32)
+DEF_HELPER_2(flt, i32, env, i32)
+DEF_HELPER_2(fint, i32, env, i32)
+DEF_HELPER_2(fsqrt, i32, env, i32)
+
+DEF_HELPER_3(fcmp_un, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_lt, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_eq, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_le, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_gt, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_ne, i32, env, i32, i32)
+DEF_HELPER_3(fcmp_ge, i32, env, i32, i32)
+
+DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(mmu_read, i32, env, i32)
+DEF_HELPER_3(mmu_write, void, env, i32, i32)
+#endif
+
+DEF_HELPER_5(memalign, void, env, i32, i32, i32, i32)
+DEF_HELPER_2(stackprot, void, env, i32)
+
+DEF_HELPER_2(get, i32, i32, i32)
+DEF_HELPER_3(put, void, i32, i32, i32)
diff --git a/target/microblaze/microblaze-decode.h b/target/microblaze/microblaze-decode.h
new file mode 100644
index 0000000000..401319ed46
--- /dev/null
+++ b/target/microblaze/microblaze-decode.h
@@ -0,0 +1,55 @@
+/*
+ * MicroBlaze insn decoding macros.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Convenient binary macros. */
+#define HEX__(n) 0x##n##LU
+#define B8__(x) ((x&0x0000000FLU)?1:0) \
+ + ((x&0x000000F0LU)?2:0) \
+ + ((x&0x00000F00LU)?4:0) \
+ + ((x&0x0000F000LU)?8:0) \
+ + ((x&0x000F0000LU)?16:0) \
+ + ((x&0x00F00000LU)?32:0) \
+ + ((x&0x0F000000LU)?64:0) \
+ + ((x&0xF0000000LU)?128:0)
+#define B8(d) ((unsigned char)B8__(HEX__(d)))
+
+/* Decode logic, value and mask. */
+#define DEC_ADD {B8(00000000), B8(00110001)}
+#define DEC_SUB {B8(00000001), B8(00110001)}
+#define DEC_AND {B8(00100001), B8(00110101)}
+#define DEC_XOR {B8(00100010), B8(00110111)}
+#define DEC_OR {B8(00100000), B8(00110111)}
+#define DEC_BIT {B8(00100100), B8(00111111)}
+#define DEC_MSR {B8(00100101), B8(00111111)}
+
+#define DEC_BARREL {B8(00010001), B8(00110111)}
+#define DEC_MUL {B8(00010000), B8(00110111)}
+#define DEC_DIV {B8(00010010), B8(00110111)}
+#define DEC_FPU {B8(00010110), B8(00111111)}
+
+#define DEC_LD {B8(00110000), B8(00110100)}
+#define DEC_ST {B8(00110100), B8(00110100)}
+#define DEC_IMM {B8(00101100), B8(00111111)}
+
+#define DEC_BR {B8(00100110), B8(00110111)}
+#define DEC_BCC {B8(00100111), B8(00110111)}
+#define DEC_RTS {B8(00101101), B8(00111111)}
+
+#define DEC_STREAM {B8(00010011), B8(00110111)}
+
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
new file mode 100644
index 0000000000..a22a496ebb
--- /dev/null
+++ b/target/microblaze/mmu.c
@@ -0,0 +1,303 @@
+/*
+ * Microblaze MMU emulation for qemu.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+
+#define D(x)
+
+static unsigned int tlb_decode_size(unsigned int f)
+{
+ static const unsigned int sizes[] = {
+ 1 * 1024, 4 * 1024, 16 * 1024, 64 * 1024, 256 * 1024,
+ 1 * 1024 * 1024, 4 * 1024 * 1024, 16 * 1024 * 1024
+ };
+ assert(f < ARRAY_SIZE(sizes));
+ return sizes[f];
+}
+
+static void mmu_flush_idx(CPUMBState *env, unsigned int idx)
+{
+ CPUState *cs = CPU(mb_env_get_cpu(env));
+ struct microblaze_mmu *mmu = &env->mmu;
+ unsigned int tlb_size;
+ uint32_t tlb_tag, end, t;
+
+ t = mmu->rams[RAM_TAG][idx];
+ if (!(t & TLB_VALID))
+ return;
+
+ tlb_tag = t & TLB_EPN_MASK;
+ tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
+ end = tlb_tag + tlb_size;
+
+ while (tlb_tag < end) {
+ tlb_flush_page(cs, tlb_tag);
+ tlb_tag += TARGET_PAGE_SIZE;
+ }
+}
+
+static void mmu_change_pid(CPUMBState *env, unsigned int newpid)
+{
+ struct microblaze_mmu *mmu = &env->mmu;
+ unsigned int i;
+ uint32_t t;
+
+ if (newpid & ~0xff)
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal rpid=%x\n", newpid);
+
+ for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
+ /* Lookup and decode. */
+ t = mmu->rams[RAM_TAG][i];
+ if (t & TLB_VALID) {
+ if (mmu->tids[i] && ((mmu->regs[MMU_R_PID] & 0xff) == mmu->tids[i]))
+ mmu_flush_idx(env, i);
+ }
+ }
+}
+
+/* rw - 0 = read, 1 = write, 2 = fetch. */
+unsigned int mmu_translate(struct microblaze_mmu *mmu,
+ struct microblaze_mmu_lookup *lu,
+ target_ulong vaddr, int rw, int mmu_idx)
+{
+ unsigned int i, hit = 0;
+ unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
+ unsigned int tlb_size;
+ uint32_t tlb_tag, tlb_rpn, mask, t0;
+
+ lu->err = ERR_MISS;
+ for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
+ uint32_t t, d;
+
+ /* Lookup and decode. */
+ t = mmu->rams[RAM_TAG][i];
+ D(qemu_log("TLB %d valid=%d\n", i, t & TLB_VALID));
+ if (t & TLB_VALID) {
+ tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
+ if (tlb_size < TARGET_PAGE_SIZE) {
+ qemu_log("%d pages not supported\n", tlb_size);
+ abort();
+ }
+
+ mask = ~(tlb_size - 1);
+ tlb_tag = t & TLB_EPN_MASK;
+ if ((vaddr & mask) != (tlb_tag & mask)) {
+ D(qemu_log("TLB %d vaddr=%x != tag=%x\n",
+ i, vaddr & mask, tlb_tag & mask));
+ continue;
+ }
+ if (mmu->tids[i]
+ && ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
+ D(qemu_log("TLB %d pid=%x != tid=%x\n",
+ i, mmu->regs[MMU_R_PID], mmu->tids[i]));
+ continue;
+ }
+
+ /* Bring in the data part. */
+ d = mmu->rams[RAM_DATA][i];
+ tlb_ex = d & TLB_EX;
+ tlb_wr = d & TLB_WR;
+
+ /* Now let's see if there is a zone that overrides the protbits. */
+ tlb_zsel = (d >> 4) & 0xf;
+ t0 = mmu->regs[MMU_R_ZPR] >> (30 - (tlb_zsel * 2));
+ t0 &= 0x3;
+
+ if (tlb_zsel > mmu->c_mmu_zones) {
+ qemu_log_mask(LOG_GUEST_ERROR, "tlb zone select out of range! %d\n", tlb_zsel);
+ t0 = 1; /* Ignore. */
+ }
+
+ if (mmu->c_mmu == 1) {
+ t0 = 1; /* Zones are disabled. */
+ }
+
+ switch (t0) {
+ case 0:
+ if (mmu_idx == MMU_USER_IDX)
+ continue;
+ break;
+ case 2:
+ if (mmu_idx != MMU_USER_IDX) {
+ tlb_ex = 1;
+ tlb_wr = 1;
+ }
+ break;
+ case 3:
+ tlb_ex = 1;
+ tlb_wr = 1;
+ break;
+ default: break;
+ }
+
+ lu->err = ERR_PROT;
+ lu->prot = PAGE_READ;
+ if (tlb_wr)
+ lu->prot |= PAGE_WRITE;
+ else if (rw == 1)
+ goto done;
+ if (tlb_ex)
+ lu->prot |=PAGE_EXEC;
+ else if (rw == 2) {
+ goto done;
+ }
+
+ tlb_rpn = d & TLB_RPN_MASK;
+
+ lu->vaddr = tlb_tag;
+ lu->paddr = tlb_rpn;
+ lu->size = tlb_size;
+ lu->err = ERR_HIT;
+ lu->idx = i;
+ hit = 1;
+ goto done;
+ }
+ }
+done:
+ D(qemu_log("MMU vaddr=%x rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ vaddr, rw, tlb_wr, tlb_ex, hit));
+ return hit;
+}
+
+/* Writes/reads to the MMU's special regs end up here. */
+uint32_t mmu_read(CPUMBState *env, uint32_t rn)
+{
+ unsigned int i;
+ uint32_t r;
+
+ if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
+ return 0;
+ }
+
+ switch (rn) {
+ /* Reads to HI/LO trig reads from the mmu rams. */
+ case MMU_R_TLBLO:
+ case MMU_R_TLBHI:
+ if (!(env->mmu.c_mmu_tlb_access & 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ return 0;
+ }
+
+ i = env->mmu.regs[MMU_R_TLBX] & 0xff;
+ r = env->mmu.rams[rn & 1][i];
+ if (rn == MMU_R_TLBHI)
+ env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
+ break;
+ case MMU_R_PID:
+ case MMU_R_ZPR:
+ if (!(env->mmu.c_mmu_tlb_access & 1)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ return 0;
+ }
+ r = env->mmu.regs[rn];
+ break;
+ default:
+ r = env->mmu.regs[rn];
+ break;
+ }
+ D(qemu_log("%s rn=%d=%x\n", __func__, rn, r));
+ return r;
+}
+
+void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
+{
+ MicroBlazeCPU *cpu = mb_env_get_cpu(env);
+ unsigned int i;
+ D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
+
+ if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
+ qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
+ return;
+ }
+
+ switch (rn) {
+ /* Writes to HI/LO trig writes to the mmu rams. */
+ case MMU_R_TLBLO:
+ case MMU_R_TLBHI:
+ i = env->mmu.regs[MMU_R_TLBX] & 0xff;
+ if (rn == MMU_R_TLBHI) {
+ if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
+ qemu_log_mask(LOG_GUEST_ERROR, "invalidating index %x at pc=%x\n",
+ i, env->sregs[SR_PC]);
+ env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
+ mmu_flush_idx(env, i);
+ }
+ env->mmu.rams[rn & 1][i] = v;
+
+ D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v));
+ break;
+ case MMU_R_ZPR:
+ if (env->mmu.c_mmu_tlb_access <= 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ return;
+ }
+
+ /* Changes to the zone protection reg flush the QEMU TLB.
+ Fortunately, these are very uncommon. */
+ if (v != env->mmu.regs[rn]) {
+ tlb_flush(CPU(cpu), 1);
+ }
+ env->mmu.regs[rn] = v;
+ break;
+ case MMU_R_PID:
+ if (env->mmu.c_mmu_tlb_access <= 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ return;
+ }
+
+ if (v != env->mmu.regs[rn]) {
+ mmu_change_pid(env, v);
+ env->mmu.regs[rn] = v;
+ }
+ break;
+ case MMU_R_TLBSX:
+ {
+ struct microblaze_mmu_lookup lu;
+ int hit;
+
+ if (env->mmu.c_mmu_tlb_access <= 1) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ return;
+ }
+
+ hit = mmu_translate(&env->mmu, &lu,
+ v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
+ if (hit) {
+ env->mmu.regs[MMU_R_TLBX] = lu.idx;
+ } else
+ env->mmu.regs[MMU_R_TLBX] |= 0x80000000;
+ break;
+ }
+ default:
+ env->mmu.regs[rn] = v;
+ break;
+ }
+}
+
+void mmu_init(struct microblaze_mmu *mmu)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(mmu->regs); i++) {
+ mmu->regs[i] = 0;
+ }
+}
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
new file mode 100644
index 0000000000..3b7a9983d5
--- /dev/null
+++ b/target/microblaze/mmu.h
@@ -0,0 +1,90 @@
+/*
+ * Microblaze MMU emulation for qemu.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define MMU_R_PID 0
+#define MMU_R_ZPR 1
+#define MMU_R_TLBX 2
+#define MMU_R_TLBLO 3
+#define MMU_R_TLBHI 4
+#define MMU_R_TLBSX 5
+
+#define RAM_DATA 1
+#define RAM_TAG 0
+
+/* Tag portion */
+#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
+#define TLB_PAGESZ_MASK 0x00000380
+#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
+#define PAGESZ_1K 0
+#define PAGESZ_4K 1
+#define PAGESZ_16K 2
+#define PAGESZ_64K 3
+#define PAGESZ_256K 4
+#define PAGESZ_1M 5
+#define PAGESZ_4M 6
+#define PAGESZ_16M 7
+#define TLB_VALID 0x00000040 /* Entry is valid */
+
+/* Data portion */
+#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
+#define TLB_PERM_MASK 0x00000300
+#define TLB_EX 0x00000200 /* Instruction execution allowed */
+#define TLB_WR 0x00000100 /* Writes permitted */
+#define TLB_ZSEL_MASK 0x000000F0
+#define TLB_ZSEL(x) (((x) & 0xF) << 4)
+#define TLB_ATTR_MASK 0x0000000F
+#define TLB_W 0x00000008 /* Caching is write-through */
+#define TLB_I 0x00000004 /* Caching is inhibited */
+#define TLB_M 0x00000002 /* Memory is coherent */
+#define TLB_G 0x00000001 /* Memory is guarded from prefetch */
+
+#define TLB_ENTRIES 64
+
+struct microblaze_mmu
+{
+ /* Data and tag brams. */
+ uint32_t rams[2][TLB_ENTRIES];
+ /* We keep a separate ram for the tids to avoid the 48 bit tag width. */
+ uint8_t tids[TLB_ENTRIES];
+ /* Control flops. */
+ uint32_t regs[8];
+
+ int c_mmu;
+ int c_mmu_tlb_access;
+ int c_mmu_zones;
+};
+
+struct microblaze_mmu_lookup
+{
+ uint32_t paddr;
+ uint32_t vaddr;
+ unsigned int size;
+ unsigned int idx;
+ int prot;
+ enum {
+ ERR_PROT, ERR_MISS, ERR_HIT
+ } err;
+};
+
+unsigned int mmu_translate(struct microblaze_mmu *mmu,
+ struct microblaze_mmu_lookup *lu,
+ target_ulong vaddr, int rw, int mmu_idx);
+uint32_t mmu_read(CPUMBState *env, uint32_t rn);
+void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v);
+void mmu_init(struct microblaze_mmu *mmu);
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
new file mode 100644
index 0000000000..4a856e6204
--- /dev/null
+++ b/target/microblaze/op_helper.c
@@ -0,0 +1,523 @@
+/*
+ * Microblaze helper routines.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>.
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define D(x)
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* Try to fill the TLB and return an exception if error. If retaddr is
+ * NULL, it means that the function was called in C code (i.e. not
+ * from generated code or from helper.c)
+ */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = mb_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+#endif
+
+void helper_put(uint32_t id, uint32_t ctrl, uint32_t data)
+{
+ int test = ctrl & STREAM_TEST;
+ int atomic = ctrl & STREAM_ATOMIC;
+ int control = ctrl & STREAM_CONTROL;
+ int nonblock = ctrl & STREAM_NONBLOCK;
+ int exception = ctrl & STREAM_EXCEPTION;
+
+ qemu_log_mask(LOG_UNIMP, "Unhandled stream put to stream-id=%d data=%x %s%s%s%s%s\n",
+ id, data,
+ test ? "t" : "",
+ nonblock ? "n" : "",
+ exception ? "e" : "",
+ control ? "c" : "",
+ atomic ? "a" : "");
+}
+
+uint32_t helper_get(uint32_t id, uint32_t ctrl)
+{
+ int test = ctrl & STREAM_TEST;
+ int atomic = ctrl & STREAM_ATOMIC;
+ int control = ctrl & STREAM_CONTROL;
+ int nonblock = ctrl & STREAM_NONBLOCK;
+ int exception = ctrl & STREAM_EXCEPTION;
+
+ qemu_log_mask(LOG_UNIMP, "Unhandled stream get from stream-id=%d %s%s%s%s%s\n",
+ id,
+ test ? "t" : "",
+ nonblock ? "n" : "",
+ exception ? "e" : "",
+ control ? "c" : "",
+ atomic ? "a" : "");
+ return 0xdead0000 | id;
+}
+
+void helper_raise_exception(CPUMBState *env, uint32_t index)
+{
+ CPUState *cs = CPU(mb_env_get_cpu(env));
+
+ cs->exception_index = index;
+ cpu_loop_exit(cs);
+}
+
+void helper_debug(CPUMBState *env)
+{
+ int i;
+
+ qemu_log("PC=%8.8x\n", env->sregs[SR_PC]);
+ qemu_log("rmsr=%x resr=%x rear=%x debug[%x] imm=%x iflags=%x\n",
+ env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
+ env->debug, env->imm, env->iflags);
+ qemu_log("btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
+ env->btaken, env->btarget,
+ (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
+ (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
+ (env->sregs[SR_MSR] & MSR_EIP),
+ (env->sregs[SR_MSR] & MSR_IE));
+ for (i = 0; i < 32; i++) {
+ qemu_log("r%2.2d=%8.8x ", i, env->regs[i]);
+ if ((i + 1) % 4 == 0)
+ qemu_log("\n");
+ }
+ qemu_log("\n\n");
+}
+
+static inline uint32_t compute_carry(uint32_t a, uint32_t b, uint32_t cin)
+{
+ uint32_t cout = 0;
+
+ if ((b == ~0) && cin)
+ cout = 1;
+ else if ((~0 - a) < (b + cin))
+ cout = 1;
+ return cout;
+}
+
+uint32_t helper_cmp(uint32_t a, uint32_t b)
+{
+ uint32_t t;
+
+ t = b + ~a + 1;
+ if ((b & 0x80000000) ^ (a & 0x80000000))
+ t = (t & 0x7fffffff) | (b & 0x80000000);
+ return t;
+}
+
+uint32_t helper_cmpu(uint32_t a, uint32_t b)
+{
+ uint32_t t;
+
+ t = b + ~a + 1;
+ if ((b & 0x80000000) ^ (a & 0x80000000))
+ t = (t & 0x7fffffff) | (a & 0x80000000);
+ return t;
+}
+
+uint32_t helper_clz(uint32_t t0)
+{
+ return clz32(t0);
+}
+
+uint32_t helper_carry(uint32_t a, uint32_t b, uint32_t cf)
+{
+ return compute_carry(a, b, cf);
+}
+
+static inline int div_prepare(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ if (b == 0) {
+ env->sregs[SR_MSR] |= MSR_DZ;
+
+ if ((env->sregs[SR_MSR] & MSR_EE)
+ && !(env->pvr.regs[2] & PVR2_DIV_ZERO_EXC_MASK)) {
+ env->sregs[SR_ESR] = ESR_EC_DIVZERO;
+ helper_raise_exception(env, EXCP_HW_EXCP);
+ }
+ return 0;
+ }
+ env->sregs[SR_MSR] &= ~MSR_DZ;
+ return 1;
+}
+
+uint32_t helper_divs(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ if (!div_prepare(env, a, b)) {
+ return 0;
+ }
+ return (int32_t)a / (int32_t)b;
+}
+
+uint32_t helper_divu(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ if (!div_prepare(env, a, b)) {
+ return 0;
+ }
+ return a / b;
+}
+
+/* raise FPU exception. */
+static void raise_fpu_exception(CPUMBState *env)
+{
+ env->sregs[SR_ESR] = ESR_EC_FPU;
+ helper_raise_exception(env, EXCP_HW_EXCP);
+}
+
+static void update_fpu_flags(CPUMBState *env, int flags)
+{
+ int raise = 0;
+
+ if (flags & float_flag_invalid) {
+ env->sregs[SR_FSR] |= FSR_IO;
+ raise = 1;
+ }
+ if (flags & float_flag_divbyzero) {
+ env->sregs[SR_FSR] |= FSR_DZ;
+ raise = 1;
+ }
+ if (flags & float_flag_overflow) {
+ env->sregs[SR_FSR] |= FSR_OF;
+ raise = 1;
+ }
+ if (flags & float_flag_underflow) {
+ env->sregs[SR_FSR] |= FSR_UF;
+ raise = 1;
+ }
+ if (raise
+ && (env->pvr.regs[2] & PVR2_FPU_EXC_MASK)
+ && (env->sregs[SR_MSR] & MSR_EE)) {
+ raise_fpu_exception(env);
+ }
+}
+
+uint32_t helper_fadd(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_add(fa.f, fb.f, &env->fp_status);
+
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags);
+ return fd.l;
+}
+
+uint32_t helper_frsub(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_sub(fb.f, fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags);
+ return fd.l;
+}
+
+uint32_t helper_fmul(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_mul(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags);
+
+ return fd.l;
+}
+
+uint32_t helper_fdiv(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fd, fa, fb;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ fd.f = float32_div(fb.f, fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags);
+
+ return fd.l;
+}
+
+uint32_t helper_fcmp_un(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ uint32_t r = 0;
+
+ fa.l = a;
+ fb.l = b;
+
+ if (float32_is_signaling_nan(fa.f, &env->fp_status) ||
+ float32_is_signaling_nan(fb.f, &env->fp_status)) {
+ update_fpu_flags(env, float_flag_invalid);
+ r = 1;
+ }
+
+ if (float32_is_quiet_nan(fa.f, &env->fp_status) ||
+ float32_is_quiet_nan(fb.f, &env->fp_status)) {
+ r = 1;
+ }
+
+ return r;
+}
+
+uint32_t helper_fcmp_lt(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int r;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ r = float32_lt(fb.f, fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid);
+
+ return r;
+}
+
+uint32_t helper_fcmp_eq(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags;
+ int r;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fb.l = b;
+ r = float32_eq_quiet(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid);
+
+ return r;
+}
+
+uint32_t helper_fcmp_le(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags;
+ int r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = float32_le(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid);
+
+
+ return r;
+}
+
+uint32_t helper_fcmp_gt(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags, r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = float32_lt(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid);
+ return r;
+}
+
+uint32_t helper_fcmp_ne(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags, r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = !float32_eq_quiet(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid);
+
+ return r;
+}
+
+uint32_t helper_fcmp_ge(CPUMBState *env, uint32_t a, uint32_t b)
+{
+ CPU_FloatU fa, fb;
+ int flags, r;
+
+ fa.l = a;
+ fb.l = b;
+ set_float_exception_flags(0, &env->fp_status);
+ r = !float32_lt(fa.f, fb.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags & float_flag_invalid);
+
+ return r;
+}
+
+uint32_t helper_flt(CPUMBState *env, uint32_t a)
+{
+ CPU_FloatU fd, fa;
+
+ fa.l = a;
+ fd.f = int32_to_float32(fa.l, &env->fp_status);
+ return fd.l;
+}
+
+uint32_t helper_fint(CPUMBState *env, uint32_t a)
+{
+ CPU_FloatU fa;
+ uint32_t r;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ r = float32_to_int32(fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags);
+
+ return r;
+}
+
+uint32_t helper_fsqrt(CPUMBState *env, uint32_t a)
+{
+ CPU_FloatU fd, fa;
+ int flags;
+
+ set_float_exception_flags(0, &env->fp_status);
+ fa.l = a;
+ fd.l = float32_sqrt(fa.f, &env->fp_status);
+ flags = get_float_exception_flags(&env->fp_status);
+ update_fpu_flags(env, flags);
+
+ return fd.l;
+}
+
+uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
+{
+ unsigned int i;
+ uint32_t mask = 0xff000000;
+
+ for (i = 0; i < 4; i++) {
+ if ((a & mask) == (b & mask))
+ return i + 1;
+ mask >>= 8;
+ }
+ return 0;
+}
+
+void helper_memalign(CPUMBState *env, uint32_t addr, uint32_t dr, uint32_t wr,
+ uint32_t mask)
+{
+ if (addr & mask) {
+ qemu_log_mask(CPU_LOG_INT,
+ "unaligned access addr=%x mask=%x, wr=%d dr=r%d\n",
+ addr, mask, wr, dr);
+ env->sregs[SR_EAR] = addr;
+ env->sregs[SR_ESR] = ESR_EC_UNALIGNED_DATA | (wr << 10) \
+ | (dr & 31) << 5;
+ if (mask == 3) {
+ env->sregs[SR_ESR] |= 1 << 11;
+ }
+ if (!(env->sregs[SR_MSR] & MSR_EE)) {
+ return;
+ }
+ helper_raise_exception(env, EXCP_HW_EXCP);
+ }
+}
+
+void helper_stackprot(CPUMBState *env, uint32_t addr)
+{
+ if (addr < env->slr || addr > env->shr) {
+ qemu_log_mask(CPU_LOG_INT, "Stack protector violation at %x %x %x\n",
+ addr, env->slr, env->shr);
+ env->sregs[SR_EAR] = addr;
+ env->sregs[SR_ESR] = ESR_EC_STACKPROT;
+ helper_raise_exception(env, EXCP_HW_EXCP);
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* Writes/reads to the MMU's special regs end up here. */
+uint32_t helper_mmu_read(CPUMBState *env, uint32_t rn)
+{
+ return mmu_read(env, rn);
+}
+
+void helper_mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
+{
+ mmu_write(env, rn, v);
+}
+
+void mb_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size)
+{
+ MicroBlazeCPU *cpu;
+ CPUMBState *env;
+
+ qemu_log_mask(CPU_LOG_INT, "Unassigned " TARGET_FMT_plx " wr=%d exe=%d\n",
+ addr, is_write ? 1 : 0, is_exec ? 1 : 0);
+ if (cs == NULL) {
+ return;
+ }
+ cpu = MICROBLAZE_CPU(cs);
+ env = &cpu->env;
+ if (!(env->sregs[SR_MSR] & MSR_EE)) {
+ return;
+ }
+
+ env->sregs[SR_EAR] = addr;
+ if (is_exec) {
+ if ((env->pvr.regs[2] & PVR2_IOPB_BUS_EXC_MASK)) {
+ env->sregs[SR_ESR] = ESR_EC_INSN_BUS;
+ helper_raise_exception(env, EXCP_HW_EXCP);
+ }
+ } else {
+ if ((env->pvr.regs[2] & PVR2_DOPB_BUS_EXC_MASK)) {
+ env->sregs[SR_ESR] = ESR_EC_DATA_BUS;
+ helper_raise_exception(env, EXCP_HW_EXCP);
+ }
+ }
+}
+#endif
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
new file mode 100644
index 0000000000..de2090ac71
--- /dev/null
+++ b/target/microblaze/translate.c
@@ -0,0 +1,1872 @@
+/*
+ * Xilinx MicroBlaze emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2009 Edgar E. Iglesias.
+ * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/helper-proto.h"
+#include "microblaze-decode.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define SIM_COMPAT 0
+#define DISAS_GNU 1
+#define DISAS_MB 1
+#if DISAS_MB && !SIM_COMPAT
+# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DIS(...) do { } while (0)
+#endif
+
+#define D(x)
+
+#define EXTRACT_FIELD(src, start, end) \
+ (((src) >> start) & ((1 << (end - start + 1)) - 1))
+
+static TCGv env_debug;
+static TCGv_env cpu_env;
+static TCGv cpu_R[32];
+static TCGv cpu_SR[18];
+static TCGv env_imm;
+static TCGv env_btaken;
+static TCGv env_btarget;
+static TCGv env_iflags;
+static TCGv env_res_addr;
+static TCGv env_res_val;
+
+#include "exec/gen-icount.h"
+
+/* This is the state at translation time. */
+typedef struct DisasContext {
+ MicroBlazeCPU *cpu;
+ target_ulong pc;
+
+ /* Decoder. */
+ int type_b;
+ uint32_t ir;
+ uint8_t opcode;
+ uint8_t rd, ra, rb;
+ uint16_t imm;
+
+ unsigned int cpustate_changed;
+ unsigned int delayed_branch;
+ unsigned int tb_flags, synced_flags; /* tb dependent flags. */
+ unsigned int clear_imm;
+ int is_jmp;
+
+#define JMP_NOJMP 0
+#define JMP_DIRECT 1
+#define JMP_DIRECT_CC 2
+#define JMP_INDIRECT 3
+ unsigned int jmp;
+ uint32_t jmp_pc;
+
+ int abort_at_next_insn;
+ int nr_nops;
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+} DisasContext;
+
+static const char *regnames[] =
+{
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+};
+
+static const char *special_regnames[] =
+{
+ "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
+ "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
+ "sr16", "sr17", "sr18"
+};
+
+static inline void t_sync_flags(DisasContext *dc)
+{
+ /* Synch the tb dependent flags between translator and runtime. */
+ if (dc->tb_flags != dc->synced_flags) {
+ tcg_gen_movi_tl(env_iflags, dc->tb_flags);
+ dc->synced_flags = dc->tb_flags;
+ }
+}
+
+static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
+{
+ TCGv_i32 tmp = tcg_const_i32(index);
+
+ t_sync_flags(dc);
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (use_goto_tb(dc, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
+ } else {
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void read_carry(DisasContext *dc, TCGv d)
+{
+ tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
+}
+
+/*
+ * write_carry sets the carry bits in MSR based on bit 0 of v.
+ * v[31:1] are ignored.
+ */
+static void write_carry(DisasContext *dc, TCGv v)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, v, 31);
+ tcg_gen_sari_tl(t0, t0, 31);
+ tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
+ tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
+ ~(MSR_C | MSR_CC));
+ tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
+ tcg_temp_free(t0);
+}
+
+static void write_carryi(DisasContext *dc, bool carry)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_movi_tl(t0, carry);
+ write_carry(dc, t0);
+ tcg_temp_free(t0);
+}
+
+/* True if ALU operand b is a small immediate that may deserve
+ faster treatment. */
+static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
+{
+ /* Immediate insn without the imm prefix ? */
+ return dc->type_b && !(dc->tb_flags & IMM_FLAG);
+}
+
+static inline TCGv *dec_alu_op_b(DisasContext *dc)
+{
+ if (dc->type_b) {
+ if (dc->tb_flags & IMM_FLAG)
+ tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
+ else
+ tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
+ return &env_imm;
+ } else
+ return &cpu_R[dc->rb];
+}
+
+static void dec_add(DisasContext *dc)
+{
+ unsigned int k, c;
+ TCGv cf;
+
+ k = dc->opcode & 4;
+ c = dc->opcode & 2;
+
+ LOG_DIS("add%s%s%s r%d r%d r%d\n",
+ dc->type_b ? "i" : "", k ? "k" : "", c ? "c" : "",
+ dc->rd, dc->ra, dc->rb);
+
+ /* Take care of the easy cases first. */
+ if (k) {
+ /* k - keep carry, no need to update MSR. */
+ /* If rd == r0, it's a nop. */
+ if (dc->rd) {
+ tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+
+ if (c) {
+ /* c - Add carry into the result. */
+ cf = tcg_temp_new();
+
+ read_carry(dc, cf);
+ tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ tcg_temp_free(cf);
+ }
+ }
+ return;
+ }
+
+ /* From now on, we can assume k is zero. So we need to update MSR. */
+ /* Extract carry. */
+ cf = tcg_temp_new();
+ if (c) {
+ read_carry(dc, cf);
+ } else {
+ tcg_gen_movi_tl(cf, 0);
+ }
+
+ if (dc->rd) {
+ TCGv ncf = tcg_temp_new();
+ gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
+ tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ write_carry(dc, ncf);
+ tcg_temp_free(ncf);
+ } else {
+ gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
+ write_carry(dc, cf);
+ }
+ tcg_temp_free(cf);
+}
+
+static void dec_sub(DisasContext *dc)
+{
+ unsigned int u, cmp, k, c;
+ TCGv cf, na;
+
+ u = dc->imm & 2;
+ k = dc->opcode & 4;
+ c = dc->opcode & 2;
+ cmp = (dc->imm & 1) && (!dc->type_b) && k;
+
+ if (cmp) {
+ LOG_DIS("cmp%s r%d, r%d ir=%x\n", u ? "u" : "", dc->rd, dc->ra, dc->ir);
+ if (dc->rd) {
+ if (u)
+ gen_helper_cmpu(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ else
+ gen_helper_cmp(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ }
+ return;
+ }
+
+ LOG_DIS("sub%s%s r%d, r%d r%d\n",
+ k ? "k" : "", c ? "c" : "", dc->rd, dc->ra, dc->rb);
+
+ /* Take care of the easy cases first. */
+ if (k) {
+ /* k - keep carry, no need to update MSR. */
+ /* If rd == r0, it's a nop. */
+ if (dc->rd) {
+ tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
+
+ if (c) {
+ /* c - Add carry into the result. */
+ cf = tcg_temp_new();
+
+ read_carry(dc, cf);
+ tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ tcg_temp_free(cf);
+ }
+ }
+ return;
+ }
+
+ /* From now on, we can assume k is zero. So we need to update MSR. */
+ /* Extract carry. And complement a into na. */
+ cf = tcg_temp_new();
+ na = tcg_temp_new();
+ if (c) {
+ read_carry(dc, cf);
+ } else {
+ tcg_gen_movi_tl(cf, 1);
+ }
+
+ /* d = b + ~a + c. carry defaults to 1. */
+ tcg_gen_not_tl(na, cpu_R[dc->ra]);
+
+ if (dc->rd) {
+ TCGv ncf = tcg_temp_new();
+ gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
+ tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
+ tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ write_carry(dc, ncf);
+ tcg_temp_free(ncf);
+ } else {
+ gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
+ write_carry(dc, cf);
+ }
+ tcg_temp_free(cf);
+ tcg_temp_free(na);
+}
+
+static void dec_pattern(DisasContext *dc)
+{
+ unsigned int mode;
+
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+
+ mode = dc->opcode & 3;
+ switch (mode) {
+ case 0:
+ /* pcmpbf. */
+ LOG_DIS("pcmpbf r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ if (dc->rd)
+ gen_helper_pcmpbf(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 2:
+ LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ if (dc->rd) {
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ }
+ break;
+ case 3:
+ LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ if (dc->rd) {
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ }
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu),
+ "unsupported pattern insn opcode=%x\n", dc->opcode);
+ break;
+ }
+}
+
+static void dec_and(DisasContext *dc)
+{
+ unsigned int not;
+
+ if (!dc->type_b && (dc->imm & (1 << 10))) {
+ dec_pattern(dc);
+ return;
+ }
+
+ not = dc->opcode & (1 << 1);
+ LOG_DIS("and%s\n", not ? "n" : "");
+
+ if (!dc->rd)
+ return;
+
+ if (not) {
+ tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ } else
+ tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+}
+
+static void dec_or(DisasContext *dc)
+{
+ if (!dc->type_b && (dc->imm & (1 << 10))) {
+ dec_pattern(dc);
+ return;
+ }
+
+ LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
+ if (dc->rd)
+ tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+}
+
+static void dec_xor(DisasContext *dc)
+{
+ if (!dc->type_b && (dc->imm & (1 << 10))) {
+ dec_pattern(dc);
+ return;
+ }
+
+ LOG_DIS("xor r%d\n", dc->rd);
+ if (dc->rd)
+ tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+}
+
+static inline void msr_read(DisasContext *dc, TCGv d)
+{
+ tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
+}
+
+static inline void msr_write(DisasContext *dc, TCGv v)
+{
+ TCGv t;
+
+ t = tcg_temp_new();
+ dc->cpustate_changed = 1;
+ /* PVR bit is not writable. */
+ tcg_gen_andi_tl(t, v, ~MSR_PVR);
+ tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
+ tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], v);
+ tcg_temp_free(t);
+}
+
+static void dec_msr(DisasContext *dc)
+{
+ CPUState *cs = CPU(dc->cpu);
+ TCGv t0, t1;
+ unsigned int sr, to, rn;
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ sr = dc->imm & ((1 << 14) - 1);
+ to = dc->imm & (1 << 14);
+ dc->type_b = 1;
+ if (to)
+ dc->cpustate_changed = 1;
+
+ /* msrclr and msrset. */
+ if (!(dc->imm & (1 << 15))) {
+ unsigned int clr = dc->ir & (1 << 16);
+
+ LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
+ dc->rd, dc->imm);
+
+ if (!(dc->cpu->env.pvr.regs[2] & PVR2_USE_MSR_INSTR)) {
+ /* nop??? */
+ return;
+ }
+
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ if (dc->rd)
+ msr_read(dc, cpu_R[dc->rd]);
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ msr_read(dc, t0);
+ tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
+
+ if (clr) {
+ tcg_gen_not_tl(t1, t1);
+ tcg_gen_and_tl(t0, t0, t1);
+ } else
+ tcg_gen_or_tl(t0, t0, t1);
+ msr_write(dc, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
+ dc->is_jmp = DISAS_UPDATE;
+ return;
+ }
+
+ if (to) {
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ /* Catch read/writes to the mmu block. */
+ if ((sr & ~0xff) == 0x1000) {
+ sr &= 7;
+ LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
+ if (to)
+ gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
+ else
+ gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
+ return;
+ }
+#endif
+
+ if (to) {
+ LOG_DIS("m%ss sr%x r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
+ switch (sr) {
+ case 0:
+ break;
+ case 1:
+ msr_write(dc, cpu_R[dc->ra]);
+ break;
+ case 0x3:
+ tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
+ break;
+ case 0x5:
+ tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
+ break;
+ case 0x7:
+ tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
+ break;
+ case 0x800:
+ tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
+ break;
+ case 0x802:
+ tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
+ break;
+ }
+ } else {
+ LOG_DIS("m%ss r%d sr%x imm=%x\n", to ? "t" : "f", dc->rd, sr, dc->imm);
+
+ switch (sr) {
+ case 0:
+ tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
+ break;
+ case 1:
+ msr_read(dc, cpu_R[dc->rd]);
+ break;
+ case 0x3:
+ tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
+ break;
+ case 0x5:
+ tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
+ break;
+ case 0x7:
+ tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
+ break;
+ case 0xb:
+ tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
+ break;
+ case 0x800:
+ tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
+ break;
+ case 0x802:
+ tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
+ break;
+ case 0x2000:
+ case 0x2001:
+ case 0x2002:
+ case 0x2003:
+ case 0x2004:
+ case 0x2005:
+ case 0x2006:
+ case 0x2007:
+ case 0x2008:
+ case 0x2009:
+ case 0x200a:
+ case 0x200b:
+ case 0x200c:
+ rn = sr & 0xf;
+ tcg_gen_ld_tl(cpu_R[dc->rd],
+ cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
+ break;
+ default:
+ cpu_abort(cs, "unknown mfs reg %x\n", sr);
+ break;
+ }
+ }
+
+ if (dc->rd == 0) {
+ tcg_gen_movi_tl(cpu_R[0], 0);
+ }
+}
+
+/* Multiplier unit. */
+static void dec_mul(DisasContext *dc)
+{
+ TCGv tmp;
+ unsigned int subcode;
+
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_HW_MUL_MASK)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ subcode = dc->imm & 3;
+
+ if (dc->type_b) {
+ LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
+ tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ return;
+ }
+
+ /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
+ if (subcode >= 1 && subcode <= 3
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_MUL64_MASK))) {
+ /* nop??? */
+ }
+
+ tmp = tcg_temp_new();
+ switch (subcode) {
+ case 0:
+ LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 1:
+ LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 2:
+ LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 3:
+ LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
+ tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
+ break;
+ }
+ tcg_temp_free(tmp);
+}
+
+/* Div unit. */
+static void dec_div(DisasContext *dc)
+{
+ unsigned int u;
+
+ u = dc->imm & 2;
+ LOG_DIS("div\n");
+
+ if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[0] & PVR0_USE_DIV_MASK))) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+
+ if (u)
+ gen_helper_divu(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
+ cpu_R[dc->ra]);
+ else
+ gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
+ cpu_R[dc->ra]);
+ if (!dc->rd)
+ tcg_gen_movi_tl(cpu_R[dc->rd], 0);
+}
+
+static void dec_barrel(DisasContext *dc)
+{
+ TCGv t0;
+ unsigned int s, t;
+
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !(dc->cpu->env.pvr.regs[0] & PVR0_USE_BARREL_MASK)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ s = dc->imm & (1 << 10);
+ t = dc->imm & (1 << 9);
+
+ LOG_DIS("bs%s%s r%d r%d r%d\n",
+ s ? "l" : "r", t ? "a" : "l", dc->rd, dc->ra, dc->rb);
+
+ t0 = tcg_temp_new();
+
+ tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
+ tcg_gen_andi_tl(t0, t0, 31);
+
+ if (s)
+ tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+ else {
+ if (t)
+ tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+ else
+ tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+ }
+}
+
+static void dec_bit(DisasContext *dc)
+{
+ CPUState *cs = CPU(dc->cpu);
+ TCGv t0;
+ unsigned int op;
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ op = dc->ir & ((1 << 9) - 1);
+ switch (op) {
+ case 0x21:
+ /* src. */
+ t0 = tcg_temp_new();
+
+ LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
+ tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
+ write_carry(dc, cpu_R[dc->ra]);
+ if (dc->rd) {
+ tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+ tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
+ }
+ tcg_temp_free(t0);
+ break;
+
+ case 0x1:
+ case 0x41:
+ /* srl. */
+ LOG_DIS("srl r%d r%d\n", dc->rd, dc->ra);
+
+ /* Update carry. Note that write carry only looks at the LSB. */
+ write_carry(dc, cpu_R[dc->ra]);
+ if (dc->rd) {
+ if (op == 0x41)
+ tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+ else
+ tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+ }
+ break;
+ case 0x60:
+ LOG_DIS("ext8s r%d r%d\n", dc->rd, dc->ra);
+ tcg_gen_ext8s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
+ break;
+ case 0x61:
+ LOG_DIS("ext16s r%d r%d\n", dc->rd, dc->ra);
+ tcg_gen_ext16s_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
+ break;
+ case 0x64:
+ case 0x66:
+ case 0x74:
+ case 0x76:
+ /* wdc. */
+ LOG_DIS("wdc r%d\n", dc->ra);
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+ break;
+ case 0x68:
+ /* wic. */
+ LOG_DIS("wic r%d\n", dc->ra);
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+ break;
+ case 0xe0:
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && !((dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR))) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ if (dc->cpu->env.pvr.regs[2] & PVR2_USE_PCMP_INSTR) {
+ gen_helper_clz(cpu_R[dc->rd], cpu_R[dc->ra]);
+ }
+ break;
+ case 0x1e0:
+ /* swapb */
+ LOG_DIS("swapb r%d r%d\n", dc->rd, dc->ra);
+ tcg_gen_bswap32_i32(cpu_R[dc->rd], cpu_R[dc->ra]);
+ break;
+ case 0x1e2:
+ /*swaph */
+ LOG_DIS("swaph r%d r%d\n", dc->rd, dc->ra);
+ tcg_gen_rotri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 16);
+ break;
+ default:
+ cpu_abort(cs, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
+ dc->pc, op, dc->rd, dc->ra, dc->rb);
+ break;
+ }
+}
+
+static inline void sync_jmpstate(DisasContext *dc)
+{
+ if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
+ if (dc->jmp == JMP_DIRECT) {
+ tcg_gen_movi_tl(env_btaken, 1);
+ }
+ dc->jmp = JMP_INDIRECT;
+ tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ }
+}
+
+static void dec_imm(DisasContext *dc)
+{
+ LOG_DIS("imm %x\n", dc->imm << 16);
+ tcg_gen_movi_tl(env_imm, (dc->imm << 16));
+ dc->tb_flags |= IMM_FLAG;
+ dc->clear_imm = 0;
+}
+
+static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
+{
+ unsigned int extimm = dc->tb_flags & IMM_FLAG;
+ /* Should be set to one if r1 is used by loadstores. */
+ int stackprot = 0;
+
+ /* All load/stores use ra. */
+ if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
+ stackprot = 1;
+ }
+
+ /* Treat the common cases first. */
+ if (!dc->type_b) {
+ /* If any of the regs is r0, return a ptr to the other. */
+ if (dc->ra == 0) {
+ return &cpu_R[dc->rb];
+ } else if (dc->rb == 0) {
+ return &cpu_R[dc->ra];
+ }
+
+ if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
+ stackprot = 1;
+ }
+
+ *t = tcg_temp_new();
+ tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
+
+ if (stackprot) {
+ gen_helper_stackprot(cpu_env, *t);
+ }
+ return t;
+ }
+ /* Immediate. */
+ if (!extimm) {
+ if (dc->imm == 0) {
+ return &cpu_R[dc->ra];
+ }
+ *t = tcg_temp_new();
+ tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
+ tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
+ } else {
+ *t = tcg_temp_new();
+ tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ }
+
+ if (stackprot) {
+ gen_helper_stackprot(cpu_env, *t);
+ }
+ return t;
+}
+
+static void dec_load(DisasContext *dc)
+{
+ TCGv t, v, *addr;
+ unsigned int size, rev = 0, ex = 0;
+ TCGMemOp mop;
+
+ mop = dc->opcode & 3;
+ size = 1 << mop;
+ if (!dc->type_b) {
+ rev = (dc->ir >> 9) & 1;
+ ex = (dc->ir >> 10) & 1;
+ }
+ mop |= MO_TE;
+ if (rev) {
+ mop ^= MO_BSWAP;
+ }
+
+ if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ LOG_DIS("l%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
+ ex ? "x" : "");
+
+ t_sync_flags(dc);
+ addr = compute_ldst_addr(dc, &t);
+
+ /*
+ * When doing reverse accesses we need to do two things.
+ *
+ * 1. Reverse the address wrt endianness.
+ * 2. Byteswap the data lanes on the way back into the CPU core.
+ */
+ if (rev && size != 4) {
+ /* Endian reverse the address. t is addr. */
+ switch (size) {
+ case 1:
+ {
+ /* 00 -> 11
+ 01 -> 10
+ 10 -> 10
+ 11 -> 00 */
+ TCGv low = tcg_temp_new();
+
+ /* Force addr into the temp. */
+ if (addr != &t) {
+ t = tcg_temp_new();
+ tcg_gen_mov_tl(t, *addr);
+ addr = &t;
+ }
+
+ tcg_gen_andi_tl(low, t, 3);
+ tcg_gen_sub_tl(low, tcg_const_tl(3), low);
+ tcg_gen_andi_tl(t, t, ~3);
+ tcg_gen_or_tl(t, t, low);
+ tcg_gen_mov_tl(env_imm, t);
+ tcg_temp_free(low);
+ break;
+ }
+
+ case 2:
+ /* 00 -> 10
+ 10 -> 00. */
+ /* Force addr into the temp. */
+ if (addr != &t) {
+ t = tcg_temp_new();
+ tcg_gen_xori_tl(t, *addr, 2);
+ addr = &t;
+ } else {
+ tcg_gen_xori_tl(t, t, 2);
+ }
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
+ break;
+ }
+ }
+
+ /* lwx does not throw unaligned access errors, so force alignment */
+ if (ex) {
+ /* Force addr into the temp. */
+ if (addr != &t) {
+ t = tcg_temp_new();
+ tcg_gen_mov_tl(t, *addr);
+ addr = &t;
+ }
+ tcg_gen_andi_tl(t, t, ~3);
+ }
+
+ /* If we get a fault on a dslot, the jmpstate better be in sync. */
+ sync_jmpstate(dc);
+
+ /* Verify alignment if needed. */
+ /*
+ * Microblaze gives MMU faults priority over faults due to
+ * unaligned addresses. That's why we speculatively do the load
+ * into v. If the load succeeds, we verify alignment of the
+ * address and if that succeeds we write into the destination reg.
+ */
+ v = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
+
+ if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
+ tcg_const_tl(0), tcg_const_tl(size - 1));
+ }
+
+ if (ex) {
+ tcg_gen_mov_tl(env_res_addr, *addr);
+ tcg_gen_mov_tl(env_res_val, v);
+ }
+ if (dc->rd) {
+ tcg_gen_mov_tl(cpu_R[dc->rd], v);
+ }
+ tcg_temp_free(v);
+
+ if (ex) { /* lwx */
+ /* no support for AXI exclusive so always clear C */
+ write_carryi(dc, 0);
+ }
+
+ if (addr == &t)
+ tcg_temp_free(t);
+}
+
+static void dec_store(DisasContext *dc)
+{
+ TCGv t, *addr, swx_addr;
+ TCGLabel *swx_skip = NULL;
+ unsigned int size, rev = 0, ex = 0;
+ TCGMemOp mop;
+
+ mop = dc->opcode & 3;
+ size = 1 << mop;
+ if (!dc->type_b) {
+ rev = (dc->ir >> 9) & 1;
+ ex = (dc->ir >> 10) & 1;
+ }
+ mop |= MO_TE;
+ if (rev) {
+ mop ^= MO_BSWAP;
+ }
+
+ if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ LOG_DIS("s%d%s%s%s\n", size, dc->type_b ? "i" : "", rev ? "r" : "",
+ ex ? "x" : "");
+ t_sync_flags(dc);
+ /* If we get a fault on a dslot, the jmpstate better be in sync. */
+ sync_jmpstate(dc);
+ addr = compute_ldst_addr(dc, &t);
+
+ swx_addr = tcg_temp_local_new();
+ if (ex) { /* swx */
+ TCGv tval;
+
+ /* Force addr into the swx_addr. */
+ tcg_gen_mov_tl(swx_addr, *addr);
+ addr = &swx_addr;
+ /* swx does not throw unaligned access errors, so force alignment */
+ tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
+
+ write_carryi(dc, 1);
+ swx_skip = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
+
+ /* Compare the value loaded at lwx with current contents of
+ the reserved location.
+ FIXME: This only works for system emulation where we can expect
+ this compare and the following write to be atomic. For user
+ emulation we need to add atomicity between threads. */
+ tval = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
+ MO_TEUL);
+ tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
+ write_carryi(dc, 0);
+ tcg_temp_free(tval);
+ }
+
+ if (rev && size != 4) {
+ /* Endian reverse the address. t is addr. */
+ switch (size) {
+ case 1:
+ {
+ /* 00 -> 11
+ 01 -> 10
+ 10 -> 10
+ 11 -> 00 */
+ TCGv low = tcg_temp_new();
+
+ /* Force addr into the temp. */
+ if (addr != &t) {
+ t = tcg_temp_new();
+ tcg_gen_mov_tl(t, *addr);
+ addr = &t;
+ }
+
+ tcg_gen_andi_tl(low, t, 3);
+ tcg_gen_sub_tl(low, tcg_const_tl(3), low);
+ tcg_gen_andi_tl(t, t, ~3);
+ tcg_gen_or_tl(t, t, low);
+ tcg_gen_mov_tl(env_imm, t);
+ tcg_temp_free(low);
+ break;
+ }
+
+ case 2:
+ /* 00 -> 10
+ 10 -> 00. */
+ /* Force addr into the temp. */
+ if (addr != &t) {
+ t = tcg_temp_new();
+ tcg_gen_xori_tl(t, *addr, 2);
+ addr = &t;
+ } else {
+ tcg_gen_xori_tl(t, t, 2);
+ }
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
+ break;
+ }
+ }
+ tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
+
+ /* Verify alignment if needed. */
+ if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ /* FIXME: if the alignment is wrong, we should restore the value
+ * in memory. One possible way to achieve this is to probe
+ * the MMU prior to the memaccess, thay way we could put
+ * the alignment checks in between the probe and the mem
+ * access.
+ */
+ gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
+ tcg_const_tl(1), tcg_const_tl(size - 1));
+ }
+
+ if (ex) {
+ gen_set_label(swx_skip);
+ }
+ tcg_temp_free(swx_addr);
+
+ if (addr == &t)
+ tcg_temp_free(t);
+}
+
+static inline void eval_cc(DisasContext *dc, unsigned int cc,
+ TCGv d, TCGv a, TCGv b)
+{
+ switch (cc) {
+ case CC_EQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
+ break;
+ case CC_NE:
+ tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
+ break;
+ case CC_LT:
+ tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
+ break;
+ case CC_LE:
+ tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
+ break;
+ case CC_GE:
+ tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
+ break;
+ case CC_GT:
+ tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
+ break;
+ }
+}
+
+static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
+{
+ TCGLabel *l1 = gen_new_label();
+ /* Conditional jmp. */
+ tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
+ tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
+ gen_set_label(l1);
+}
+
+static void dec_bcc(DisasContext *dc)
+{
+ unsigned int cc;
+ unsigned int dslot;
+
+ cc = EXTRACT_FIELD(dc->ir, 21, 23);
+ dslot = dc->ir & (1 << 25);
+ LOG_DIS("bcc%s r%d %x\n", dslot ? "d" : "", dc->ra, dc->imm);
+
+ dc->delayed_branch = 1;
+ if (dslot) {
+ dc->delayed_branch = 2;
+ dc->tb_flags |= D_FLAG;
+ tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+ cpu_env, offsetof(CPUMBState, bimm));
+ }
+
+ if (dec_alu_op_b_is_small_imm(dc)) {
+ int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */
+
+ tcg_gen_movi_tl(env_btarget, dc->pc + offset);
+ dc->jmp = JMP_DIRECT_CC;
+ dc->jmp_pc = dc->pc + offset;
+ } else {
+ dc->jmp = JMP_INDIRECT;
+ tcg_gen_movi_tl(env_btarget, dc->pc);
+ tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+ }
+ eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
+}
+
+static void dec_br(DisasContext *dc)
+{
+ unsigned int dslot, link, abs, mbar;
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ dslot = dc->ir & (1 << 20);
+ abs = dc->ir & (1 << 19);
+ link = dc->ir & (1 << 18);
+
+ /* Memory barrier. */
+ mbar = (dc->ir >> 16) & 31;
+ if (mbar == 2 && dc->imm == 4) {
+ /* mbar IMM & 16 decodes to sleep. */
+ if (dc->rd & 16) {
+ TCGv_i32 tmp_hlt = tcg_const_i32(EXCP_HLT);
+ TCGv_i32 tmp_1 = tcg_const_i32(1);
+
+ LOG_DIS("sleep\n");
+
+ t_sync_flags(dc);
+ tcg_gen_st_i32(tmp_1, cpu_env,
+ -offsetof(MicroBlazeCPU, env)
+ +offsetof(CPUState, halted));
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
+ gen_helper_raise_exception(cpu_env, tmp_hlt);
+ tcg_temp_free_i32(tmp_hlt);
+ tcg_temp_free_i32(tmp_1);
+ return;
+ }
+ LOG_DIS("mbar %d\n", dc->rd);
+ /* Break the TB. */
+ dc->cpustate_changed = 1;
+ return;
+ }
+
+ LOG_DIS("br%s%s%s%s imm=%x\n",
+ abs ? "a" : "", link ? "l" : "",
+ dc->type_b ? "i" : "", dslot ? "d" : "",
+ dc->imm);
+
+ dc->delayed_branch = 1;
+ if (dslot) {
+ dc->delayed_branch = 2;
+ dc->tb_flags |= D_FLAG;
+ tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+ cpu_env, offsetof(CPUMBState, bimm));
+ }
+ if (link && dc->rd)
+ tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
+
+ dc->jmp = JMP_INDIRECT;
+ if (abs) {
+ tcg_gen_movi_tl(env_btaken, 1);
+ tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
+ if (link && !dslot) {
+ if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
+ t_gen_raise_exception(dc, EXCP_BREAK);
+ if (dc->imm == 0) {
+ if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ t_gen_raise_exception(dc, EXCP_DEBUG);
+ }
+ }
+ } else {
+ if (dec_alu_op_b_is_small_imm(dc)) {
+ dc->jmp = JMP_DIRECT;
+ dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
+ } else {
+ tcg_gen_movi_tl(env_btaken, 1);
+ tcg_gen_movi_tl(env_btarget, dc->pc);
+ tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+ }
+ }
+}
+
+static inline void do_rti(DisasContext *dc)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
+ tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
+ tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
+
+ tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_tl(t1, t1, t0);
+ msr_write(dc, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ dc->tb_flags &= ~DRTI_FLAG;
+}
+
+static inline void do_rtb(DisasContext *dc)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
+ tcg_gen_shri_tl(t0, t1, 1);
+ tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
+
+ tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_tl(t1, t1, t0);
+ msr_write(dc, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ dc->tb_flags &= ~DRTB_FLAG;
+}
+
+static inline void do_rte(DisasContext *dc)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
+ tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
+ tcg_gen_shri_tl(t0, t1, 1);
+ tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
+
+ tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_tl(t1, t1, t0);
+ msr_write(dc, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ dc->tb_flags &= ~DRTE_FLAG;
+}
+
+static void dec_rts(DisasContext *dc)
+{
+ unsigned int b_bit, i_bit, e_bit;
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+
+ i_bit = dc->ir & (1 << 21);
+ b_bit = dc->ir & (1 << 22);
+ e_bit = dc->ir & (1 << 23);
+
+ dc->delayed_branch = 2;
+ dc->tb_flags |= D_FLAG;
+ tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+ cpu_env, offsetof(CPUMBState, bimm));
+
+ if (i_bit) {
+ LOG_DIS("rtid ir=%x\n", dc->ir);
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ dc->tb_flags |= DRTI_FLAG;
+ } else if (b_bit) {
+ LOG_DIS("rtbd ir=%x\n", dc->ir);
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ dc->tb_flags |= DRTB_FLAG;
+ } else if (e_bit) {
+ LOG_DIS("rted ir=%x\n", dc->ir);
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && mem_index == MMU_USER_IDX) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ dc->tb_flags |= DRTE_FLAG;
+ } else
+ LOG_DIS("rts ir=%x\n", dc->ir);
+
+ dc->jmp = JMP_INDIRECT;
+ tcg_gen_movi_tl(env_btaken, 1);
+ tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+}
+
+static int dec_check_fpuv2(DisasContext *dc)
+{
+ if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
+}
+
+static void dec_fpu(DisasContext *dc)
+{
+ unsigned int fpu_insn;
+
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && (dc->cpu->cfg.use_fpu != 1)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ fpu_insn = (dc->ir >> 7) & 7;
+
+ switch (fpu_insn) {
+ case 0:
+ gen_helper_fadd(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
+ break;
+
+ case 1:
+ gen_helper_frsub(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
+ break;
+
+ case 2:
+ gen_helper_fmul(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
+ break;
+
+ case 3:
+ gen_helper_fdiv(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra],
+ cpu_R[dc->rb]);
+ break;
+
+ case 4:
+ switch ((dc->ir >> 4) & 7) {
+ case 0:
+ gen_helper_fcmp_un(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 1:
+ gen_helper_fcmp_lt(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 2:
+ gen_helper_fcmp_eq(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 3:
+ gen_helper_fcmp_le(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 4:
+ gen_helper_fcmp_gt(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 5:
+ gen_helper_fcmp_ne(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ case 6:
+ gen_helper_fcmp_ge(cpu_R[dc->rd], cpu_env,
+ cpu_R[dc->ra], cpu_R[dc->rb]);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "unimplemented fcmp fpu_insn=%x pc=%x"
+ " opc=%x\n",
+ fpu_insn, dc->pc, dc->opcode);
+ dc->abort_at_next_insn = 1;
+ break;
+ }
+ break;
+
+ case 5:
+ if (!dec_check_fpuv2(dc)) {
+ return;
+ }
+ gen_helper_flt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
+ break;
+
+ case 6:
+ if (!dec_check_fpuv2(dc)) {
+ return;
+ }
+ gen_helper_fint(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
+ break;
+
+ case 7:
+ if (!dec_check_fpuv2(dc)) {
+ return;
+ }
+ gen_helper_fsqrt(cpu_R[dc->rd], cpu_env, cpu_R[dc->ra]);
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "unimplemented FPU insn fpu_insn=%x pc=%x"
+ " opc=%x\n",
+ fpu_insn, dc->pc, dc->opcode);
+ dc->abort_at_next_insn = 1;
+ break;
+ }
+}
+
+static void dec_null(DisasContext *dc)
+{
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+ qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
+ dc->abort_at_next_insn = 1;
+}
+
+/* Insns connected to FSL or AXI stream attached devices. */
+static void dec_stream(DisasContext *dc)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+ TCGv_i32 t_id, t_ctrl;
+ int ctrl;
+
+ LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
+ dc->type_b ? "" : "d", dc->imm);
+
+ if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ t_id = tcg_temp_new();
+ if (dc->type_b) {
+ tcg_gen_movi_tl(t_id, dc->imm & 0xf);
+ ctrl = dc->imm >> 10;
+ } else {
+ tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
+ ctrl = dc->imm >> 5;
+ }
+
+ t_ctrl = tcg_const_tl(ctrl);
+
+ if (dc->rd == 0) {
+ gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
+ } else {
+ gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
+ }
+ tcg_temp_free(t_id);
+ tcg_temp_free(t_ctrl);
+}
+
+static struct decoder_info {
+ struct {
+ uint32_t bits;
+ uint32_t mask;
+ };
+ void (*dec)(DisasContext *dc);
+} decinfo[] = {
+ {DEC_ADD, dec_add},
+ {DEC_SUB, dec_sub},
+ {DEC_AND, dec_and},
+ {DEC_XOR, dec_xor},
+ {DEC_OR, dec_or},
+ {DEC_BIT, dec_bit},
+ {DEC_BARREL, dec_barrel},
+ {DEC_LD, dec_load},
+ {DEC_ST, dec_store},
+ {DEC_IMM, dec_imm},
+ {DEC_BR, dec_br},
+ {DEC_BCC, dec_bcc},
+ {DEC_RTS, dec_rts},
+ {DEC_FPU, dec_fpu},
+ {DEC_MUL, dec_mul},
+ {DEC_DIV, dec_div},
+ {DEC_MSR, dec_msr},
+ {DEC_STREAM, dec_stream},
+ {{0, 0}, dec_null}
+};
+
+static inline void decode(DisasContext *dc, uint32_t ir)
+{
+ int i;
+
+ dc->ir = ir;
+ LOG_DIS("%8.8x\t", dc->ir);
+
+ if (dc->ir)
+ dc->nr_nops = 0;
+ else {
+ if ((dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
+ tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ return;
+ }
+
+ LOG_DIS("nr_nops=%d\t", dc->nr_nops);
+ dc->nr_nops++;
+ if (dc->nr_nops > 4) {
+ cpu_abort(CPU(dc->cpu), "fetching nop sequence\n");
+ }
+ }
+ /* bit 2 seems to indicate insn type. */
+ dc->type_b = ir & (1 << 29);
+
+ dc->opcode = EXTRACT_FIELD(ir, 26, 31);
+ dc->rd = EXTRACT_FIELD(ir, 21, 25);
+ dc->ra = EXTRACT_FIELD(ir, 16, 20);
+ dc->rb = EXTRACT_FIELD(ir, 11, 15);
+ dc->imm = EXTRACT_FIELD(ir, 0, 15);
+
+ /* Large switch for all insns. */
+ for (i = 0; i < ARRAY_SIZE(decinfo); i++) {
+ if ((dc->opcode & decinfo[i].mask) == decinfo[i].bits) {
+ decinfo[i].dec(dc);
+ break;
+ }
+ }
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUMBState *env, struct TranslationBlock *tb)
+{
+ MicroBlazeCPU *cpu = mb_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ uint32_t pc_start;
+ struct DisasContext ctx;
+ struct DisasContext *dc = &ctx;
+ uint32_t next_page_start, org_flags;
+ target_ulong npc;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+ dc->cpu = cpu;
+ dc->tb = tb;
+ org_flags = dc->synced_flags = dc->tb_flags = tb->flags;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->jmp = 0;
+ dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
+ if (dc->delayed_branch) {
+ dc->jmp = JMP_INDIRECT;
+ }
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->cpustate_changed = 0;
+ dc->abort_at_next_insn = 0;
+ dc->nr_nops = 0;
+
+ if (pc_start & 3) {
+ cpu_abort(cs, "Microblaze: unaligned PC=%x\n", pc_start);
+ }
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ do
+ {
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+#if SIM_COMPAT
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ gen_helper_debug();
+ }
+#endif
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ t_gen_raise_exception(dc, EXCP_DEBUG);
+ dc->is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 4;
+ break;
+ }
+
+ /* Pretty disas. */
+ LOG_DIS("%8.8x:\t", dc->pc);
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ dc->clear_imm = 1;
+ decode(dc, cpu_ldl_code(env, dc->pc));
+ if (dc->clear_imm)
+ dc->tb_flags &= ~IMM_FLAG;
+ dc->pc += 4;
+
+ if (dc->delayed_branch) {
+ dc->delayed_branch--;
+ if (!dc->delayed_branch) {
+ if (dc->tb_flags & DRTI_FLAG)
+ do_rti(dc);
+ if (dc->tb_flags & DRTB_FLAG)
+ do_rtb(dc);
+ if (dc->tb_flags & DRTE_FLAG)
+ do_rte(dc);
+ /* Clear the delay slot flag. */
+ dc->tb_flags &= ~D_FLAG;
+ /* If it is a direct jump, try direct chaining. */
+ if (dc->jmp == JMP_INDIRECT) {
+ eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
+ dc->is_jmp = DISAS_JUMP;
+ } else if (dc->jmp == JMP_DIRECT) {
+ t_sync_flags(dc);
+ gen_goto_tb(dc, 0, dc->jmp_pc);
+ dc->is_jmp = DISAS_TB_JUMP;
+ } else if (dc->jmp == JMP_DIRECT_CC) {
+ TCGLabel *l1 = gen_new_label();
+ t_sync_flags(dc);
+ /* Conditional jmp. */
+ tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
+ gen_goto_tb(dc, 1, dc->pc);
+ gen_set_label(l1);
+ gen_goto_tb(dc, 0, dc->jmp_pc);
+
+ dc->is_jmp = DISAS_TB_JUMP;
+ }
+ break;
+ }
+ }
+ if (cs->singlestep_enabled) {
+ break;
+ }
+ } while (!dc->is_jmp && !dc->cpustate_changed
+ && !tcg_op_buf_full()
+ && !singlestep
+ && (dc->pc < next_page_start)
+ && num_insns < max_insns);
+
+ npc = dc->pc;
+ if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
+ if (dc->tb_flags & D_FLAG) {
+ dc->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+ sync_jmpstate(dc);
+ } else
+ npc = dc->jmp_pc;
+ }
+
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ /* Force an update if the per-tb cpu state has changed. */
+ if (dc->is_jmp == DISAS_NEXT
+ && (dc->cpustate_changed || org_flags != dc->tb_flags)) {
+ dc->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+ }
+ t_sync_flags(dc);
+
+ if (unlikely(cs->singlestep_enabled)) {
+ TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);
+
+ if (dc->is_jmp != DISAS_JUMP) {
+ tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+ }
+ gen_helper_raise_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ switch(dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, npc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used
+ to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ }
+ gen_tb_end(tb, num_insns);
+
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+#if !SIM_COMPAT
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("--------------\n");
+#if DISAS_GNU
+ log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
+#endif
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
+ }
+#endif
+#endif
+ assert(!dc->abort_at_next_insn);
+}
+
+void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
+ CPUMBState *env = &cpu->env;
+ int i;
+
+ if (!env || !f)
+ return;
+
+ cpu_fprintf(f, "IN: PC=%x %s\n",
+ env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
+ cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
+ env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
+ env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
+ cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
+ env->btaken, env->btarget,
+ (env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
+ (env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
+ (env->sregs[SR_MSR] & MSR_EIP),
+ (env->sregs[SR_MSR] & MSR_IE));
+
+ for (i = 0; i < 32; i++) {
+ cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
+ if ((i + 1) % 4 == 0)
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "\n\n");
+}
+
+MicroBlazeCPU *cpu_mb_init(const char *cpu_model)
+{
+ MicroBlazeCPU *cpu;
+
+ cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU));
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+void mb_tcg_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ env_debug = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, debug),
+ "debug0");
+ env_iflags = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, iflags),
+ "iflags");
+ env_imm = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, imm),
+ "imm");
+ env_btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, btarget),
+ "btarget");
+ env_btaken = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, btaken),
+ "btaken");
+ env_res_addr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, res_addr),
+ "res_addr");
+ env_res_val = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, res_val),
+ "res_val");
+ for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, regs[i]),
+ regnames[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
+ cpu_SR[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMBState, sregs[i]),
+ special_regnames[i]);
+ }
+}
+
+void restore_state_to_opc(CPUMBState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->sregs[SR_PC] = data[0];
+}
diff --git a/target/mips/Makefile.objs b/target/mips/Makefile.objs
new file mode 100644
index 0000000000..bc5ed8511f
--- /dev/null
+++ b/target/mips/Makefile.objs
@@ -0,0 +1,4 @@
+obj-y += translate.o dsp_helper.o op_helper.o lmi_helper.o helper.o cpu.o
+obj-y += gdbstub.o msa_helper.o mips-semi.o
+obj-$(CONFIG_SOFTMMU) += machine.o
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/target/mips/TODO b/target/mips/TODO
new file mode 100644
index 0000000000..1d782d8027
--- /dev/null
+++ b/target/mips/TODO
@@ -0,0 +1,51 @@
+Unsolved issues/bugs in the mips/mipsel backend
+-----------------------------------------------
+
+General
+-------
+- Unimplemented ASEs:
+ - MDMX
+ - SmartMIPS
+ - microMIPS DSP r1 & r2 encodings
+- MT ASE only partially implemented and not functional
+- Shadow register support only partially implemented,
+ lacks set switching on interrupt/exception.
+- 34K ITC not implemented.
+- A general lack of documentation, especially for technical internals.
+ Existing documentation is x86-centric.
+- Reverse endianness bit not implemented
+- The TLB emulation is very inefficient:
+ QEMU's softmmu implements a x86-style MMU, with separate entries
+ for read/write/execute, a TLB index which is just a modulo of the
+ virtual address, and a set of TLBs for each user/kernel/supervisor
+ MMU mode.
+ MIPS has a single entry for read/write/execute and only one MMU mode.
+ But it is fully associative with randomized entry indices, and uses
+ up to 256 ASID tags as additional matching criterion (which roughly
+ equates to 256 MMU modes). It also has a global flag which causes
+ entries to match regardless of ASID.
+ To cope with these differences, QEMU currently flushes the TLB at
+ each ASID change. Using the MMU modes to implement ASIDs hinges on
+ implementing the global bit efficiently.
+- save/restore of the CPU state is not implemented (see machine.c).
+
+MIPS64
+------
+- Userland emulation (both n32 and n64) not functional.
+
+"Generic" 4Kc system emulation
+------------------------------
+- Doesn't correspond to any real hardware. Should be removed some day,
+ U-Boot is the last remaining user.
+
+PICA 61 system emulation
+------------------------
+- No framebuffer support yet.
+
+MALTA system emulation
+----------------------
+- We fake firmware support instead of doing the real thing
+- Real firmware (YAMON) falls over when trying to init RAM, presumably
+ due to lacking system controller emulation.
+- Bonito system controller not implemented
+- MSC1 system controller not implemented
diff --git a/target/mips/cpu-qom.h b/target/mips/cpu-qom.h
new file mode 100644
index 0000000000..3f5bf23823
--- /dev/null
+++ b/target/mips/cpu-qom.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU MIPS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_MIPS_CPU_QOM_H
+#define QEMU_MIPS_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#ifdef TARGET_MIPS64
+#define TYPE_MIPS_CPU "mips64-cpu"
+#else
+#define TYPE_MIPS_CPU "mips-cpu"
+#endif
+
+#define MIPS_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(MIPSCPUClass, (klass), TYPE_MIPS_CPU)
+#define MIPS_CPU(obj) \
+ OBJECT_CHECK(MIPSCPU, (obj), TYPE_MIPS_CPU)
+#define MIPS_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(MIPSCPUClass, (obj), TYPE_MIPS_CPU)
+
+/**
+ * MIPSCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A MIPS CPU model.
+ */
+typedef struct MIPSCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} MIPSCPUClass;
+
+typedef struct MIPSCPU MIPSCPU;
+
+#endif
diff --git a/target/mips/cpu.c b/target/mips/cpu.c
new file mode 100644
index 0000000000..65ca607f88
--- /dev/null
+++ b/target/mips/cpu.c
@@ -0,0 +1,203 @@
+/*
+ * QEMU MIPS CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "kvm_mips.h"
+#include "qemu-common.h"
+#include "sysemu/kvm.h"
+#include "exec/exec-all.h"
+
+
+static void mips_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ env->active_tc.PC = value & ~(target_ulong)1;
+ if (value & 1) {
+ env->hflags |= MIPS_HFLAG_M16;
+ } else {
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ }
+}
+
+static void mips_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ env->active_tc.PC = tb->pc;
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->hflags |= tb->flags & MIPS_HFLAG_BMASK;
+}
+
+static bool mips_cpu_has_work(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool has_work = false;
+
+ /* Prior to MIPS Release 6 it is implementation dependent if non-enabled
+ interrupts wake-up the CPU, however most of the implementations only
+ check for interrupts that can be taken. */
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ if (cpu_mips_hw_interrupts_enabled(env) ||
+ (env->insn_flags & ISA_MIPS32R6)) {
+ has_work = true;
+ }
+ }
+
+ /* MIPS-MT has the ability to halt the CPU. */
+ if (env->CP0_Config3 & (1 << CP0C3_MT)) {
+ /* The QEMU model will issue an _WAKE request whenever the CPUs
+ should be woken up. */
+ if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
+ has_work = true;
+ }
+
+ if (!mips_vpe_active(env)) {
+ has_work = false;
+ }
+ }
+ /* MIPS Release 6 has the ability to halt the CPU. */
+ if (env->CP0_Config5 & (1 << CP0C5_VP)) {
+ if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
+ has_work = true;
+ }
+ if (!mips_vp_active(env)) {
+ has_work = false;
+ }
+ }
+ return has_work;
+}
+
+/* CPUClass::reset() */
+static void mips_cpu_reset(CPUState *s)
+{
+ MIPSCPU *cpu = MIPS_CPU(s);
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(cpu);
+ CPUMIPSState *env = &cpu->env;
+
+ mcc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUMIPSState, mvp));
+ tlb_flush(s, 1);
+
+ cpu_state_reset(env);
+
+#ifndef CONFIG_USER_ONLY
+ if (kvm_enabled()) {
+ kvm_mips_reset_vcpu(cpu);
+ }
+#endif
+}
+
+static void mips_cpu_disas_set_info(CPUState *s, disassemble_info *info) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ info->print_insn = print_insn_big_mips;
+#else
+ info->print_insn = print_insn_little_mips;
+#endif
+}
+
+static void mips_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void mips_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ MIPSCPU *cpu = MIPS_CPU(obj);
+ CPUMIPSState *env = &cpu->env;
+
+ cs->env_ptr = env;
+
+ if (tcg_enabled()) {
+ mips_tcg_init();
+ }
+}
+
+static void mips_cpu_class_init(ObjectClass *c, void *data)
+{
+ MIPSCPUClass *mcc = MIPS_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ mcc->parent_realize = dc->realize;
+ dc->realize = mips_cpu_realizefn;
+
+ mcc->parent_reset = cc->reset;
+ cc->reset = mips_cpu_reset;
+
+ cc->has_work = mips_cpu_has_work;
+ cc->do_interrupt = mips_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = mips_cpu_exec_interrupt;
+ cc->dump_state = mips_cpu_dump_state;
+ cc->set_pc = mips_cpu_set_pc;
+ cc->synchronize_from_tb = mips_cpu_synchronize_from_tb;
+ cc->gdb_read_register = mips_cpu_gdb_read_register;
+ cc->gdb_write_register = mips_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = mips_cpu_handle_mmu_fault;
+#else
+ cc->do_unassigned_access = mips_cpu_unassigned_access;
+ cc->do_unaligned_access = mips_cpu_do_unaligned_access;
+ cc->get_phys_page_debug = mips_cpu_get_phys_page_debug;
+ cc->vmsd = &vmstate_mips_cpu;
+#endif
+ cc->disas_set_info = mips_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 73;
+ cc->gdb_stop_before_watchpoint = true;
+}
+
+static const TypeInfo mips_cpu_type_info = {
+ .name = TYPE_MIPS_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(MIPSCPU),
+ .instance_init = mips_cpu_initfn,
+ .abstract = false,
+ .class_size = sizeof(MIPSCPUClass),
+ .class_init = mips_cpu_class_init,
+};
+
+static void mips_cpu_register_types(void)
+{
+ type_register_static(&mips_cpu_type_info);
+}
+
+type_init(mips_cpu_register_types)
diff --git a/target/mips/cpu.h b/target/mips/cpu.h
new file mode 100644
index 0000000000..5182dc74ff
--- /dev/null
+++ b/target/mips/cpu.h
@@ -0,0 +1,1069 @@
+#ifndef MIPS_CPU_H
+#define MIPS_CPU_H
+
+//#define DEBUG_OP
+
+#define ALIGNED_ONLY
+
+#define CPUArchState struct CPUMIPSState
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "mips-defs.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+struct CPUMIPSState;
+
+typedef struct r4k_tlb_t r4k_tlb_t;
+struct r4k_tlb_t {
+ target_ulong VPN;
+ uint32_t PageMask;
+ uint16_t ASID;
+ unsigned int G:1;
+ unsigned int C0:3;
+ unsigned int C1:3;
+ unsigned int V0:1;
+ unsigned int V1:1;
+ unsigned int D0:1;
+ unsigned int D1:1;
+ unsigned int XI0:1;
+ unsigned int XI1:1;
+ unsigned int RI0:1;
+ unsigned int RI1:1;
+ unsigned int EHINV:1;
+ uint64_t PFN[2];
+};
+
+#if !defined(CONFIG_USER_ONLY)
+typedef struct CPUMIPSTLBContext CPUMIPSTLBContext;
+struct CPUMIPSTLBContext {
+ uint32_t nb_tlb;
+ uint32_t tlb_in_use;
+ int (*map_address) (struct CPUMIPSState *env, hwaddr *physical, int *prot, target_ulong address, int rw, int access_type);
+ void (*helper_tlbwi)(struct CPUMIPSState *env);
+ void (*helper_tlbwr)(struct CPUMIPSState *env);
+ void (*helper_tlbp)(struct CPUMIPSState *env);
+ void (*helper_tlbr)(struct CPUMIPSState *env);
+ void (*helper_tlbinv)(struct CPUMIPSState *env);
+ void (*helper_tlbinvf)(struct CPUMIPSState *env);
+ union {
+ struct {
+ r4k_tlb_t tlb[MIPS_TLB_MAX];
+ } r4k;
+ } mmu;
+};
+#endif
+
+/* MSA Context */
+#define MSA_WRLEN (128)
+
+enum CPUMIPSMSADataFormat {
+ DF_BYTE = 0,
+ DF_HALF,
+ DF_WORD,
+ DF_DOUBLE
+};
+
+typedef union wr_t wr_t;
+union wr_t {
+ int8_t b[MSA_WRLEN/8];
+ int16_t h[MSA_WRLEN/16];
+ int32_t w[MSA_WRLEN/32];
+ int64_t d[MSA_WRLEN/64];
+};
+
+typedef union fpr_t fpr_t;
+union fpr_t {
+ float64 fd; /* ieee double precision */
+ float32 fs[2];/* ieee single precision */
+ uint64_t d; /* binary double fixed-point */
+ uint32_t w[2]; /* binary single fixed-point */
+/* FPU/MSA register mapping is not tested on big-endian hosts. */
+ wr_t wr; /* vector data */
+};
+/* define FP_ENDIAN_IDX to access the same location
+ * in the fpr_t union regardless of the host endianness
+ */
+#if defined(HOST_WORDS_BIGENDIAN)
+# define FP_ENDIAN_IDX 1
+#else
+# define FP_ENDIAN_IDX 0
+#endif
+
+typedef struct CPUMIPSFPUContext CPUMIPSFPUContext;
+struct CPUMIPSFPUContext {
+ /* Floating point registers */
+ fpr_t fpr[32];
+ float_status fp_status;
+ /* fpu implementation/revision register (fir) */
+ uint32_t fcr0;
+#define FCR0_FREP 29
+#define FCR0_UFRP 28
+#define FCR0_HAS2008 23
+#define FCR0_F64 22
+#define FCR0_L 21
+#define FCR0_W 20
+#define FCR0_3D 19
+#define FCR0_PS 18
+#define FCR0_D 17
+#define FCR0_S 16
+#define FCR0_PRID 8
+#define FCR0_REV 0
+ /* fcsr */
+ uint32_t fcr31_rw_bitmask;
+ uint32_t fcr31;
+#define FCR31_FS 24
+#define FCR31_ABS2008 19
+#define FCR31_NAN2008 18
+#define SET_FP_COND(num,env) do { ((env).fcr31) |= ((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
+#define CLEAR_FP_COND(num,env) do { ((env).fcr31) &= ~((num) ? (1 << ((num) + 24)) : (1 << 23)); } while(0)
+#define GET_FP_COND(env) ((((env).fcr31 >> 24) & 0xfe) | (((env).fcr31 >> 23) & 0x1))
+#define GET_FP_CAUSE(reg) (((reg) >> 12) & 0x3f)
+#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
+#define GET_FP_FLAGS(reg) (((reg) >> 2) & 0x1f)
+#define SET_FP_CAUSE(reg,v) do { (reg) = ((reg) & ~(0x3f << 12)) | ((v & 0x3f) << 12); } while(0)
+#define SET_FP_ENABLE(reg,v) do { (reg) = ((reg) & ~(0x1f << 7)) | ((v & 0x1f) << 7); } while(0)
+#define SET_FP_FLAGS(reg,v) do { (reg) = ((reg) & ~(0x1f << 2)) | ((v & 0x1f) << 2); } while(0)
+#define UPDATE_FP_FLAGS(reg,v) do { (reg) |= ((v & 0x1f) << 2); } while(0)
+#define FP_INEXACT 1
+#define FP_UNDERFLOW 2
+#define FP_OVERFLOW 4
+#define FP_DIV0 8
+#define FP_INVALID 16
+#define FP_UNIMPLEMENTED 32
+};
+
+#define NB_MMU_MODES 3
+#define TARGET_INSN_START_EXTRA_WORDS 2
+
+typedef struct CPUMIPSMVPContext CPUMIPSMVPContext;
+struct CPUMIPSMVPContext {
+ int32_t CP0_MVPControl;
+#define CP0MVPCo_CPA 3
+#define CP0MVPCo_STLB 2
+#define CP0MVPCo_VPC 1
+#define CP0MVPCo_EVP 0
+ int32_t CP0_MVPConf0;
+#define CP0MVPC0_M 31
+#define CP0MVPC0_TLBS 29
+#define CP0MVPC0_GS 28
+#define CP0MVPC0_PCP 27
+#define CP0MVPC0_PTLBE 16
+#define CP0MVPC0_TCA 15
+#define CP0MVPC0_PVPE 10
+#define CP0MVPC0_PTC 0
+ int32_t CP0_MVPConf1;
+#define CP0MVPC1_CIM 31
+#define CP0MVPC1_CIF 30
+#define CP0MVPC1_PCX 20
+#define CP0MVPC1_PCP2 10
+#define CP0MVPC1_PCP1 0
+};
+
+typedef struct mips_def_t mips_def_t;
+
+#define MIPS_SHADOW_SET_MAX 16
+#define MIPS_TC_MAX 5
+#define MIPS_FPU_MAX 1
+#define MIPS_DSP_ACC 4
+#define MIPS_KSCRATCH_NUM 6
+#define MIPS_MAAR_MAX 16 /* Must be an even number. */
+
+typedef struct TCState TCState;
+struct TCState {
+ target_ulong gpr[32];
+ target_ulong PC;
+ target_ulong HI[MIPS_DSP_ACC];
+ target_ulong LO[MIPS_DSP_ACC];
+ target_ulong ACX[MIPS_DSP_ACC];
+ target_ulong DSPControl;
+ int32_t CP0_TCStatus;
+#define CP0TCSt_TCU3 31
+#define CP0TCSt_TCU2 30
+#define CP0TCSt_TCU1 29
+#define CP0TCSt_TCU0 28
+#define CP0TCSt_TMX 27
+#define CP0TCSt_RNST 23
+#define CP0TCSt_TDS 21
+#define CP0TCSt_DT 20
+#define CP0TCSt_DA 15
+#define CP0TCSt_A 13
+#define CP0TCSt_TKSU 11
+#define CP0TCSt_IXMT 10
+#define CP0TCSt_TASID 0
+ int32_t CP0_TCBind;
+#define CP0TCBd_CurTC 21
+#define CP0TCBd_TBE 17
+#define CP0TCBd_CurVPE 0
+ target_ulong CP0_TCHalt;
+ target_ulong CP0_TCContext;
+ target_ulong CP0_TCSchedule;
+ target_ulong CP0_TCScheFBack;
+ int32_t CP0_Debug_tcstatus;
+ target_ulong CP0_UserLocal;
+
+ int32_t msacsr;
+
+#define MSACSR_FS 24
+#define MSACSR_FS_MASK (1 << MSACSR_FS)
+#define MSACSR_NX 18
+#define MSACSR_NX_MASK (1 << MSACSR_NX)
+#define MSACSR_CEF 2
+#define MSACSR_CEF_MASK (0xffff << MSACSR_CEF)
+#define MSACSR_RM 0
+#define MSACSR_RM_MASK (0x3 << MSACSR_RM)
+#define MSACSR_MASK (MSACSR_RM_MASK | MSACSR_CEF_MASK | MSACSR_NX_MASK | \
+ MSACSR_FS_MASK)
+
+ float_status msa_fp_status;
+};
+
+typedef struct CPUMIPSState CPUMIPSState;
+struct CPUMIPSState {
+ TCState active_tc;
+ CPUMIPSFPUContext active_fpu;
+
+ uint32_t current_tc;
+ uint32_t current_fpu;
+
+ uint32_t SEGBITS;
+ uint32_t PABITS;
+#if defined(TARGET_MIPS64)
+# define PABITS_BASE 36
+#else
+# define PABITS_BASE 32
+#endif
+ target_ulong SEGMask;
+ uint64_t PAMask;
+#define PAMASK_BASE ((1ULL << PABITS_BASE) - 1)
+
+ int32_t msair;
+#define MSAIR_ProcID 8
+#define MSAIR_Rev 0
+
+ int32_t CP0_Index;
+ /* CP0_MVP* are per MVP registers. */
+ int32_t CP0_VPControl;
+#define CP0VPCtl_DIS 0
+ int32_t CP0_Random;
+ int32_t CP0_VPEControl;
+#define CP0VPECo_YSI 21
+#define CP0VPECo_GSI 20
+#define CP0VPECo_EXCPT 16
+#define CP0VPECo_TE 15
+#define CP0VPECo_TargTC 0
+ int32_t CP0_VPEConf0;
+#define CP0VPEC0_M 31
+#define CP0VPEC0_XTC 21
+#define CP0VPEC0_TCS 19
+#define CP0VPEC0_SCS 18
+#define CP0VPEC0_DSC 17
+#define CP0VPEC0_ICS 16
+#define CP0VPEC0_MVP 1
+#define CP0VPEC0_VPA 0
+ int32_t CP0_VPEConf1;
+#define CP0VPEC1_NCX 20
+#define CP0VPEC1_NCP2 10
+#define CP0VPEC1_NCP1 0
+ target_ulong CP0_YQMask;
+ target_ulong CP0_VPESchedule;
+ target_ulong CP0_VPEScheFBack;
+ int32_t CP0_VPEOpt;
+#define CP0VPEOpt_IWX7 15
+#define CP0VPEOpt_IWX6 14
+#define CP0VPEOpt_IWX5 13
+#define CP0VPEOpt_IWX4 12
+#define CP0VPEOpt_IWX3 11
+#define CP0VPEOpt_IWX2 10
+#define CP0VPEOpt_IWX1 9
+#define CP0VPEOpt_IWX0 8
+#define CP0VPEOpt_DWX7 7
+#define CP0VPEOpt_DWX6 6
+#define CP0VPEOpt_DWX5 5
+#define CP0VPEOpt_DWX4 4
+#define CP0VPEOpt_DWX3 3
+#define CP0VPEOpt_DWX2 2
+#define CP0VPEOpt_DWX1 1
+#define CP0VPEOpt_DWX0 0
+ uint64_t CP0_EntryLo0;
+ uint64_t CP0_EntryLo1;
+#if defined(TARGET_MIPS64)
+# define CP0EnLo_RI 63
+# define CP0EnLo_XI 62
+#else
+# define CP0EnLo_RI 31
+# define CP0EnLo_XI 30
+#endif
+ int32_t CP0_GlobalNumber;
+#define CP0GN_VPId 0
+ target_ulong CP0_Context;
+ target_ulong CP0_KScratch[MIPS_KSCRATCH_NUM];
+ int32_t CP0_PageMask;
+ int32_t CP0_PageGrain_rw_bitmask;
+ int32_t CP0_PageGrain;
+#define CP0PG_RIE 31
+#define CP0PG_XIE 30
+#define CP0PG_ELPA 29
+#define CP0PG_IEC 27
+ int32_t CP0_Wired;
+ int32_t CP0_SRSConf0_rw_bitmask;
+ int32_t CP0_SRSConf0;
+#define CP0SRSC0_M 31
+#define CP0SRSC0_SRS3 20
+#define CP0SRSC0_SRS2 10
+#define CP0SRSC0_SRS1 0
+ int32_t CP0_SRSConf1_rw_bitmask;
+ int32_t CP0_SRSConf1;
+#define CP0SRSC1_M 31
+#define CP0SRSC1_SRS6 20
+#define CP0SRSC1_SRS5 10
+#define CP0SRSC1_SRS4 0
+ int32_t CP0_SRSConf2_rw_bitmask;
+ int32_t CP0_SRSConf2;
+#define CP0SRSC2_M 31
+#define CP0SRSC2_SRS9 20
+#define CP0SRSC2_SRS8 10
+#define CP0SRSC2_SRS7 0
+ int32_t CP0_SRSConf3_rw_bitmask;
+ int32_t CP0_SRSConf3;
+#define CP0SRSC3_M 31
+#define CP0SRSC3_SRS12 20
+#define CP0SRSC3_SRS11 10
+#define CP0SRSC3_SRS10 0
+ int32_t CP0_SRSConf4_rw_bitmask;
+ int32_t CP0_SRSConf4;
+#define CP0SRSC4_SRS15 20
+#define CP0SRSC4_SRS14 10
+#define CP0SRSC4_SRS13 0
+ int32_t CP0_HWREna;
+ target_ulong CP0_BadVAddr;
+ uint32_t CP0_BadInstr;
+ uint32_t CP0_BadInstrP;
+ int32_t CP0_Count;
+ target_ulong CP0_EntryHi;
+#define CP0EnHi_EHINV 10
+ target_ulong CP0_EntryHi_ASID_mask;
+ int32_t CP0_Compare;
+ int32_t CP0_Status;
+#define CP0St_CU3 31
+#define CP0St_CU2 30
+#define CP0St_CU1 29
+#define CP0St_CU0 28
+#define CP0St_RP 27
+#define CP0St_FR 26
+#define CP0St_RE 25
+#define CP0St_MX 24
+#define CP0St_PX 23
+#define CP0St_BEV 22
+#define CP0St_TS 21
+#define CP0St_SR 20
+#define CP0St_NMI 19
+#define CP0St_IM 8
+#define CP0St_KX 7
+#define CP0St_SX 6
+#define CP0St_UX 5
+#define CP0St_KSU 3
+#define CP0St_ERL 2
+#define CP0St_EXL 1
+#define CP0St_IE 0
+ int32_t CP0_IntCtl;
+#define CP0IntCtl_IPTI 29
+#define CP0IntCtl_IPPCI 26
+#define CP0IntCtl_VS 5
+ int32_t CP0_SRSCtl;
+#define CP0SRSCtl_HSS 26
+#define CP0SRSCtl_EICSS 18
+#define CP0SRSCtl_ESS 12
+#define CP0SRSCtl_PSS 6
+#define CP0SRSCtl_CSS 0
+ int32_t CP0_SRSMap;
+#define CP0SRSMap_SSV7 28
+#define CP0SRSMap_SSV6 24
+#define CP0SRSMap_SSV5 20
+#define CP0SRSMap_SSV4 16
+#define CP0SRSMap_SSV3 12
+#define CP0SRSMap_SSV2 8
+#define CP0SRSMap_SSV1 4
+#define CP0SRSMap_SSV0 0
+ int32_t CP0_Cause;
+#define CP0Ca_BD 31
+#define CP0Ca_TI 30
+#define CP0Ca_CE 28
+#define CP0Ca_DC 27
+#define CP0Ca_PCI 26
+#define CP0Ca_IV 23
+#define CP0Ca_WP 22
+#define CP0Ca_IP 8
+#define CP0Ca_IP_mask 0x0000FF00
+#define CP0Ca_EC 2
+ target_ulong CP0_EPC;
+ int32_t CP0_PRid;
+ int32_t CP0_EBase;
+ target_ulong CP0_CMGCRBase;
+ int32_t CP0_Config0;
+#define CP0C0_M 31
+#define CP0C0_K23 28
+#define CP0C0_KU 25
+#define CP0C0_MDU 20
+#define CP0C0_MM 18
+#define CP0C0_BM 16
+#define CP0C0_BE 15
+#define CP0C0_AT 13
+#define CP0C0_AR 10
+#define CP0C0_MT 7
+#define CP0C0_VI 3
+#define CP0C0_K0 0
+ int32_t CP0_Config1;
+#define CP0C1_M 31
+#define CP0C1_MMU 25
+#define CP0C1_IS 22
+#define CP0C1_IL 19
+#define CP0C1_IA 16
+#define CP0C1_DS 13
+#define CP0C1_DL 10
+#define CP0C1_DA 7
+#define CP0C1_C2 6
+#define CP0C1_MD 5
+#define CP0C1_PC 4
+#define CP0C1_WR 3
+#define CP0C1_CA 2
+#define CP0C1_EP 1
+#define CP0C1_FP 0
+ int32_t CP0_Config2;
+#define CP0C2_M 31
+#define CP0C2_TU 28
+#define CP0C2_TS 24
+#define CP0C2_TL 20
+#define CP0C2_TA 16
+#define CP0C2_SU 12
+#define CP0C2_SS 8
+#define CP0C2_SL 4
+#define CP0C2_SA 0
+ int32_t CP0_Config3;
+#define CP0C3_M 31
+#define CP0C3_BPG 30
+#define CP0C3_CMGCR 29
+#define CP0C3_MSAP 28
+#define CP0C3_BP 27
+#define CP0C3_BI 26
+#define CP0C3_IPLW 21
+#define CP0C3_MMAR 18
+#define CP0C3_MCU 17
+#define CP0C3_ISA_ON_EXC 16
+#define CP0C3_ISA 14
+#define CP0C3_ULRI 13
+#define CP0C3_RXI 12
+#define CP0C3_DSP2P 11
+#define CP0C3_DSPP 10
+#define CP0C3_LPA 7
+#define CP0C3_VEIC 6
+#define CP0C3_VInt 5
+#define CP0C3_SP 4
+#define CP0C3_CDMM 3
+#define CP0C3_MT 2
+#define CP0C3_SM 1
+#define CP0C3_TL 0
+ int32_t CP0_Config4;
+ int32_t CP0_Config4_rw_bitmask;
+#define CP0C4_M 31
+#define CP0C4_IE 29
+#define CP0C4_AE 28
+#define CP0C4_KScrExist 16
+#define CP0C4_MMUExtDef 14
+#define CP0C4_FTLBPageSize 8
+#define CP0C4_FTLBWays 4
+#define CP0C4_FTLBSets 0
+#define CP0C4_MMUSizeExt 0
+ int32_t CP0_Config5;
+ int32_t CP0_Config5_rw_bitmask;
+#define CP0C5_M 31
+#define CP0C5_K 30
+#define CP0C5_CV 29
+#define CP0C5_EVA 28
+#define CP0C5_MSAEn 27
+#define CP0C5_XNP 13
+#define CP0C5_UFE 9
+#define CP0C5_FRE 8
+#define CP0C5_VP 7
+#define CP0C5_SBRI 6
+#define CP0C5_MVH 5
+#define CP0C5_LLB 4
+#define CP0C5_MRP 3
+#define CP0C5_UFR 2
+#define CP0C5_NFExists 0
+ int32_t CP0_Config6;
+ int32_t CP0_Config7;
+ uint64_t CP0_MAAR[MIPS_MAAR_MAX];
+ int32_t CP0_MAARI;
+ /* XXX: Maybe make LLAddr per-TC? */
+ uint64_t lladdr;
+ target_ulong llval;
+ target_ulong llnewval;
+ target_ulong llreg;
+ uint64_t CP0_LLAddr_rw_bitmask;
+ int CP0_LLAddr_shift;
+ target_ulong CP0_WatchLo[8];
+ int32_t CP0_WatchHi[8];
+#define CP0WH_ASID 16
+ target_ulong CP0_XContext;
+ int32_t CP0_Framemask;
+ int32_t CP0_Debug;
+#define CP0DB_DBD 31
+#define CP0DB_DM 30
+#define CP0DB_LSNM 28
+#define CP0DB_Doze 27
+#define CP0DB_Halt 26
+#define CP0DB_CNT 25
+#define CP0DB_IBEP 24
+#define CP0DB_DBEP 21
+#define CP0DB_IEXI 20
+#define CP0DB_VER 15
+#define CP0DB_DEC 10
+#define CP0DB_SSt 8
+#define CP0DB_DINT 5
+#define CP0DB_DIB 4
+#define CP0DB_DDBS 3
+#define CP0DB_DDBL 2
+#define CP0DB_DBp 1
+#define CP0DB_DSS 0
+ target_ulong CP0_DEPC;
+ int32_t CP0_Performance0;
+ int32_t CP0_ErrCtl;
+#define CP0EC_WST 29
+#define CP0EC_SPR 28
+#define CP0EC_ITC 26
+ uint64_t CP0_TagLo;
+ int32_t CP0_DataLo;
+ int32_t CP0_TagHi;
+ int32_t CP0_DataHi;
+ target_ulong CP0_ErrorEPC;
+ int32_t CP0_DESAVE;
+ /* We waste some space so we can handle shadow registers like TCs. */
+ TCState tcs[MIPS_SHADOW_SET_MAX];
+ CPUMIPSFPUContext fpus[MIPS_FPU_MAX];
+ /* QEMU */
+ int error_code;
+#define EXCP_TLB_NOMATCH 0x1
+#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
+ uint32_t hflags; /* CPU State */
+ /* TMASK defines different execution modes */
+#define MIPS_HFLAG_TMASK 0xF5807FF
+#define MIPS_HFLAG_MODE 0x00007 /* execution modes */
+ /* The KSU flags must be the lowest bits in hflags. The flag order
+ must be the same as defined for CP0 Status. This allows to use
+ the bits as the value of mmu_idx. */
+#define MIPS_HFLAG_KSU 0x00003 /* kernel/supervisor/user mode mask */
+#define MIPS_HFLAG_UM 0x00002 /* user mode flag */
+#define MIPS_HFLAG_SM 0x00001 /* supervisor mode flag */
+#define MIPS_HFLAG_KM 0x00000 /* kernel mode flag */
+#define MIPS_HFLAG_DM 0x00004 /* Debug mode */
+#define MIPS_HFLAG_64 0x00008 /* 64-bit instructions enabled */
+#define MIPS_HFLAG_CP0 0x00010 /* CP0 enabled */
+#define MIPS_HFLAG_FPU 0x00020 /* FPU enabled */
+#define MIPS_HFLAG_F64 0x00040 /* 64-bit FPU enabled */
+ /* True if the MIPS IV COP1X instructions can be used. This also
+ controls the non-COP1X instructions RECIP.S, RECIP.D, RSQRT.S
+ and RSQRT.D. */
+#define MIPS_HFLAG_COP1X 0x00080 /* COP1X instructions enabled */
+#define MIPS_HFLAG_RE 0x00100 /* Reversed endianness */
+#define MIPS_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
+#define MIPS_HFLAG_M16 0x00400 /* MIPS16 mode flag */
+#define MIPS_HFLAG_M16_SHIFT 10
+ /* If translation is interrupted between the branch instruction and
+ * the delay slot, record what type of branch it is so that we can
+ * resume translation properly. It might be possible to reduce
+ * this from three bits to two. */
+#define MIPS_HFLAG_BMASK_BASE 0x803800
+#define MIPS_HFLAG_B 0x00800 /* Unconditional branch */
+#define MIPS_HFLAG_BC 0x01000 /* Conditional branch */
+#define MIPS_HFLAG_BL 0x01800 /* Likely branch */
+#define MIPS_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
+ /* Extra flags about the current pending branch. */
+#define MIPS_HFLAG_BMASK_EXT 0x7C000
+#define MIPS_HFLAG_B16 0x04000 /* branch instruction was 16 bits */
+#define MIPS_HFLAG_BDS16 0x08000 /* branch requires 16-bit delay slot */
+#define MIPS_HFLAG_BDS32 0x10000 /* branch requires 32-bit delay slot */
+#define MIPS_HFLAG_BDS_STRICT 0x20000 /* Strict delay slot size */
+#define MIPS_HFLAG_BX 0x40000 /* branch exchanges execution mode */
+#define MIPS_HFLAG_BMASK (MIPS_HFLAG_BMASK_BASE | MIPS_HFLAG_BMASK_EXT)
+ /* MIPS DSP resources access. */
+#define MIPS_HFLAG_DSP 0x080000 /* Enable access to MIPS DSP resources. */
+#define MIPS_HFLAG_DSPR2 0x100000 /* Enable access to MIPS DSPR2 resources. */
+ /* Extra flag about HWREna register. */
+#define MIPS_HFLAG_HWRENA_ULR 0x200000 /* ULR bit from HWREna is set. */
+#define MIPS_HFLAG_SBRI 0x400000 /* R6 SDBBP causes RI excpt. in user mode */
+#define MIPS_HFLAG_FBNSLOT 0x800000 /* Forbidden slot */
+#define MIPS_HFLAG_MSA 0x1000000
+#define MIPS_HFLAG_FRE 0x2000000 /* FRE enabled */
+#define MIPS_HFLAG_ELPA 0x4000000
+#define MIPS_HFLAG_ITC_CACHE 0x8000000 /* CACHE instr. operates on ITC tag */
+ target_ulong btarget; /* Jump / branch target */
+ target_ulong bcond; /* Branch condition (if needed) */
+
+ int SYNCI_Step; /* Address step size for SYNCI */
+ int CCRes; /* Cycle count resolution/divisor */
+ uint32_t CP0_Status_rw_bitmask; /* Read/write bits in CP0_Status */
+ uint32_t CP0_TCStatus_rw_bitmask; /* Read/write bits in CP0_TCStatus */
+ int insn_flags; /* Supported instruction set */
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved across CPU reset. */
+ CPUMIPSMVPContext *mvp;
+#if !defined(CONFIG_USER_ONLY)
+ CPUMIPSTLBContext *tlb;
+#endif
+
+ const mips_def_t *cpu_model;
+ void *irq[8];
+ QEMUTimer *timer; /* Internal timer */
+ MemoryRegion *itc_tag; /* ITC Configuration Tags */
+ target_ulong exception_base; /* ExceptionBase input to the core */
+};
+
+/**
+ * MIPSCPU:
+ * @env: #CPUMIPSState
+ *
+ * A MIPS CPU.
+ */
+struct MIPSCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUMIPSState env;
+};
+
+static inline MIPSCPU *mips_env_get_cpu(CPUMIPSState *env)
+{
+ return container_of(env, MIPSCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(MIPSCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_mips_cpu;
+#endif
+
+void mips_cpu_do_interrupt(CPUState *cpu);
+bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void mips_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+#if !defined(CONFIG_USER_ONLY)
+int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type);
+void r4k_helper_tlbwi(CPUMIPSState *env);
+void r4k_helper_tlbwr(CPUMIPSState *env);
+void r4k_helper_tlbp(CPUMIPSState *env);
+void r4k_helper_tlbr(CPUMIPSState *env);
+void r4k_helper_tlbinv(CPUMIPSState *env);
+void r4k_helper_tlbinvf(CPUMIPSState *env);
+
+void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec, int unused,
+ unsigned size);
+#endif
+
+void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf);
+
+#define cpu_signal_handler cpu_mips_signal_handler
+#define cpu_list mips_cpu_list
+
+extern void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env);
+extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
+
+/* MMU modes definitions. We carefully match the indices with our
+ hflags layout. */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _super
+#define MMU_MODE2_SUFFIX _user
+#define MMU_USER_IDX 2
+static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
+{
+ return env->hflags & MIPS_HFLAG_KSU;
+}
+
+static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
+{
+ return (env->CP0_Status & (1 << CP0St_IE)) &&
+ !(env->CP0_Status & (1 << CP0St_EXL)) &&
+ !(env->CP0_Status & (1 << CP0St_ERL)) &&
+ !(env->hflags & MIPS_HFLAG_DM) &&
+ /* Note that the TCStatus IXMT field is initialized to zero,
+ and only MT capable cores can set it to one. So we don't
+ need to check for MT capabilities here. */
+ !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT));
+}
+
+/* Check if there is pending and not masked out interrupt */
+static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
+{
+ int32_t pending;
+ int32_t status;
+ bool r;
+
+ pending = env->CP0_Cause & CP0Ca_IP_mask;
+ status = env->CP0_Status & CP0Ca_IP_mask;
+
+ if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+ /* A MIPS configured with a vectorizing external interrupt controller
+ will feed a vector into the Cause pending lines. The core treats
+ the status lines as a vector level, not as indiviual masks. */
+ r = pending > status;
+ } else {
+ /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
+ treats the pending lines as individual interrupt lines, the status
+ lines are individual masks. */
+ r = (pending & status) != 0;
+ }
+ return r;
+}
+
+#include "exec/cpu-all.h"
+
+/* Memory access type :
+ * may be needed for precise access rights control and precise exceptions.
+ */
+enum {
+ /* 1 bit to define user level / supervisor access */
+ ACCESS_USER = 0x00,
+ ACCESS_SUPER = 0x01,
+ /* 1 bit to indicate direction */
+ ACCESS_STORE = 0x02,
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_INT = 0x20, /* Integer load/store access */
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
+};
+
+/* Exceptions */
+enum {
+ EXCP_NONE = -1,
+ EXCP_RESET = 0,
+ EXCP_SRESET,
+ EXCP_DSS,
+ EXCP_DINT,
+ EXCP_DDBL,
+ EXCP_DDBS,
+ EXCP_NMI,
+ EXCP_MCHECK,
+ EXCP_EXT_INTERRUPT, /* 8 */
+ EXCP_DFWATCH,
+ EXCP_DIB,
+ EXCP_IWATCH,
+ EXCP_AdEL,
+ EXCP_AdES,
+ EXCP_TLBF,
+ EXCP_IBE,
+ EXCP_DBp, /* 16 */
+ EXCP_SYSCALL,
+ EXCP_BREAK,
+ EXCP_CpU,
+ EXCP_RI,
+ EXCP_OVERFLOW,
+ EXCP_TRAP,
+ EXCP_FPE,
+ EXCP_DWATCH, /* 24 */
+ EXCP_LTLBL,
+ EXCP_TLBL,
+ EXCP_TLBS,
+ EXCP_DBE,
+ EXCP_THREAD,
+ EXCP_MDMX,
+ EXCP_C2E,
+ EXCP_CACHE, /* 32 */
+ EXCP_DSPDIS,
+ EXCP_MSADIS,
+ EXCP_MSAFPE,
+ EXCP_TLBXI,
+ EXCP_TLBRI,
+
+ EXCP_LAST = EXCP_TLBRI,
+};
+/* Dummy exception for conditional stores. */
+#define EXCP_SC 0x100
+
+/*
+ * This is an interrnally generated WAKE request line.
+ * It is driven by the CPU itself. Raised when the MT
+ * block wants to wake a VPE from an inactive state and
+ * cleared when VPE goes from active to inactive.
+ */
+#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
+
+void mips_tcg_init(void);
+MIPSCPU *cpu_mips_init(const char *cpu_model);
+int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define cpu_init(cpu_model) CPU(cpu_mips_init(cpu_model))
+bool cpu_supports_cps_smp(const char *cpu_model);
+void cpu_set_exception_base(int vp_index, target_ulong address);
+
+/* TODO QOM'ify CPU reset and remove */
+void cpu_state_reset(CPUMIPSState *s);
+
+/* mips_timer.c */
+uint32_t cpu_mips_get_random (CPUMIPSState *env);
+uint32_t cpu_mips_get_count (CPUMIPSState *env);
+void cpu_mips_store_count (CPUMIPSState *env, uint32_t value);
+void cpu_mips_store_compare (CPUMIPSState *env, uint32_t value);
+void cpu_mips_start_count(CPUMIPSState *env);
+void cpu_mips_stop_count(CPUMIPSState *env);
+
+/* mips_int.c */
+void cpu_mips_soft_irq(CPUMIPSState *env, int irq, int level);
+
+/* helper.c */
+int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+/* op_helper.c */
+uint32_t float_class_s(uint32_t arg, float_status *fst);
+uint64_t float_class_d(uint64_t arg, float_status *fst);
+
+#if !defined(CONFIG_USER_ONLY)
+void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra);
+hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address,
+ int rw);
+#endif
+target_ulong exception_resume_pc (CPUMIPSState *env);
+
+/* op_helper.c */
+extern unsigned int ieee_rm[];
+int ieee_ex_to_mips(int xcpt);
+
+static inline void restore_rounding_mode(CPUMIPSState *env)
+{
+ set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3],
+ &env->active_fpu.fp_status);
+}
+
+static inline void restore_flush_mode(CPUMIPSState *env)
+{
+ set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0,
+ &env->active_fpu.fp_status);
+}
+
+static inline void restore_snan_bit_mode(CPUMIPSState *env)
+{
+ set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0,
+ &env->active_fpu.fp_status);
+}
+
+static inline void restore_fp_status(CPUMIPSState *env)
+{
+ restore_rounding_mode(env);
+ restore_flush_mode(env);
+ restore_snan_bit_mode(env);
+}
+
+static inline void restore_msa_fp_status(CPUMIPSState *env)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM;
+ bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0;
+
+ set_float_rounding_mode(ieee_rm[rounding_mode], status);
+ set_flush_to_zero(flush_to_zero, status);
+ set_flush_inputs_to_zero(flush_to_zero, status);
+}
+
+static inline void restore_pamask(CPUMIPSState *env)
+{
+ if (env->hflags & MIPS_HFLAG_ELPA) {
+ env->PAMask = (1ULL << env->PABITS) - 1;
+ } else {
+ env->PAMask = PAMASK_BASE;
+ }
+}
+
+static inline void cpu_get_tb_cpu_state(CPUMIPSState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->active_tc.PC;
+ *cs_base = 0;
+ *flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK |
+ MIPS_HFLAG_HWRENA_ULR);
+}
+
+static inline int mips_vpe_active(CPUMIPSState *env)
+{
+ int active = 1;
+
+ /* Check that the VPE is enabled. */
+ if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
+ active = 0;
+ }
+ /* Check that the VPE is activated. */
+ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
+ active = 0;
+ }
+
+ /* Now verify that there are active thread contexts in the VPE.
+
+ This assumes the CPU model will internally reschedule threads
+ if the active one goes to sleep. If there are no threads available
+ the active one will be in a sleeping state, and we can turn off
+ the entire VPE. */
+ if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
+ /* TC is not activated. */
+ active = 0;
+ }
+ if (env->active_tc.CP0_TCHalt & 1) {
+ /* TC is in halt state. */
+ active = 0;
+ }
+
+ return active;
+}
+
+static inline int mips_vp_active(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+
+ /* Check if the VP disabled other VPs (which means the VP is enabled) */
+ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+ return 1;
+ }
+
+ /* Check if the virtual processor is disabled due to a DVP */
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ if ((&other_cpu->env != env) &&
+ ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static inline void compute_hflags(CPUMIPSState *env)
+{
+ env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
+ MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
+ MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2 |
+ MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA | MIPS_HFLAG_FRE |
+ MIPS_HFLAG_ELPA);
+ if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
+ !(env->CP0_Status & (1 << CP0St_ERL)) &&
+ !(env->hflags & MIPS_HFLAG_DM)) {
+ env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
+ }
+#if defined(TARGET_MIPS64)
+ if ((env->insn_flags & ISA_MIPS3) &&
+ (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
+ (env->CP0_Status & (1 << CP0St_PX)) ||
+ (env->CP0_Status & (1 << CP0St_UX)))) {
+ env->hflags |= MIPS_HFLAG_64;
+ }
+
+ if (!(env->insn_flags & ISA_MIPS3)) {
+ env->hflags |= MIPS_HFLAG_AWRAP;
+ } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
+ !(env->CP0_Status & (1 << CP0St_UX))) {
+ env->hflags |= MIPS_HFLAG_AWRAP;
+ } else if (env->insn_flags & ISA_MIPS64R6) {
+ /* Address wrapping for Supervisor and Kernel is specified in R6 */
+ if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
+ !(env->CP0_Status & (1 << CP0St_SX))) ||
+ (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
+ !(env->CP0_Status & (1 << CP0St_KX)))) {
+ env->hflags |= MIPS_HFLAG_AWRAP;
+ }
+ }
+#endif
+ if (((env->CP0_Status & (1 << CP0St_CU0)) &&
+ !(env->insn_flags & ISA_MIPS32R6)) ||
+ !(env->hflags & MIPS_HFLAG_KSU)) {
+ env->hflags |= MIPS_HFLAG_CP0;
+ }
+ if (env->CP0_Status & (1 << CP0St_CU1)) {
+ env->hflags |= MIPS_HFLAG_FPU;
+ }
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ env->hflags |= MIPS_HFLAG_F64;
+ }
+ if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
+ (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
+ env->hflags |= MIPS_HFLAG_SBRI;
+ }
+ if (env->insn_flags & ASE_DSPR2) {
+ /* Enables access MIPS DSP resources, now our cpu is DSP ASER2,
+ so enable to access DSPR2 resources. */
+ if (env->CP0_Status & (1 << CP0St_MX)) {
+ env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSPR2;
+ }
+
+ } else if (env->insn_flags & ASE_DSP) {
+ /* Enables access MIPS DSP resources, now our cpu is DSP ASE,
+ so enable to access DSP resources. */
+ if (env->CP0_Status & (1 << CP0St_MX)) {
+ env->hflags |= MIPS_HFLAG_DSP;
+ }
+
+ }
+ if (env->insn_flags & ISA_MIPS32R2) {
+ if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
+ env->hflags |= MIPS_HFLAG_COP1X;
+ }
+ } else if (env->insn_flags & ISA_MIPS32) {
+ if (env->hflags & MIPS_HFLAG_64) {
+ env->hflags |= MIPS_HFLAG_COP1X;
+ }
+ } else if (env->insn_flags & ISA_MIPS4) {
+ /* All supported MIPS IV CPUs use the XX (CU3) to enable
+ and disable the MIPS IV extensions to the MIPS III ISA.
+ Some other MIPS IV CPUs ignore the bit, so the check here
+ would be too restrictive for them. */
+ if (env->CP0_Status & (1U << CP0St_CU3)) {
+ env->hflags |= MIPS_HFLAG_COP1X;
+ }
+ }
+ if (env->insn_flags & ASE_MSA) {
+ if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
+ env->hflags |= MIPS_HFLAG_MSA;
+ }
+ }
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
+ env->hflags |= MIPS_HFLAG_FRE;
+ }
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
+ if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
+ env->hflags |= MIPS_HFLAG_ELPA;
+ }
+ }
+}
+
+void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global);
+void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
+void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
+void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
+
+void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+ int error_code, uintptr_t pc);
+
+static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
+ uint32_t exception,
+ uintptr_t pc)
+{
+ do_raise_exception_err(env, exception, 0, pc);
+}
+
+#endif /* MIPS_CPU_H */
diff --git a/target/mips/dsp_helper.c b/target/mips/dsp_helper.c
new file mode 100644
index 0000000000..dc707934ea
--- /dev/null
+++ b/target/mips/dsp_helper.c
@@ -0,0 +1,3762 @@
+/*
+ * MIPS ASE DSP Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2012 Jia Liu <proljc@gmail.com>
+ * Dongxue Zhang <elta.era@gmail.com>
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/bitops.h"
+
+/* As the byte ordering doesn't matter, i.e. all columns are treated
+ identically, these unions can be used directly. */
+typedef union {
+ uint8_t ub[4];
+ int8_t sb[4];
+ uint16_t uh[2];
+ int16_t sh[2];
+ uint32_t uw[1];
+ int32_t sw[1];
+} DSP32Value;
+
+typedef union {
+ uint8_t ub[8];
+ int8_t sb[8];
+ uint16_t uh[4];
+ int16_t sh[4];
+ uint32_t uw[2];
+ int32_t sw[2];
+ uint64_t ul[1];
+ int64_t sl[1];
+} DSP64Value;
+
+/*** MIPS DSP internal functions begin ***/
+#define MIPSDSP_ABS(x) (((x) >= 0) ? x : -x)
+#define MIPSDSP_OVERFLOW_ADD(a, b, c, d) (~(a ^ b) & (a ^ c) & d)
+#define MIPSDSP_OVERFLOW_SUB(a, b, c, d) ((a ^ b) & (a ^ c) & d)
+
+static inline void set_DSPControl_overflow_flag(uint32_t flag, int position,
+ CPUMIPSState *env)
+{
+ env->active_tc.DSPControl |= (target_ulong)flag << position;
+}
+
+static inline void set_DSPControl_carryflag(bool flag, CPUMIPSState *env)
+{
+ env->active_tc.DSPControl &= ~(1 << 13);
+ env->active_tc.DSPControl |= flag << 13;
+}
+
+static inline uint32_t get_DSPControl_carryflag(CPUMIPSState *env)
+{
+ return (env->active_tc.DSPControl >> 13) & 0x01;
+}
+
+static inline void set_DSPControl_24(uint32_t flag, int len, CPUMIPSState *env)
+{
+ uint32_t filter;
+
+ filter = ((0x01 << len) - 1) << 24;
+ filter = ~filter;
+
+ env->active_tc.DSPControl &= filter;
+ env->active_tc.DSPControl |= (target_ulong)flag << 24;
+}
+
+static inline void set_DSPControl_pos(uint32_t pos, CPUMIPSState *env)
+{
+ target_ulong dspc;
+
+ dspc = env->active_tc.DSPControl;
+#ifndef TARGET_MIPS64
+ dspc = dspc & 0xFFFFFFC0;
+ dspc |= (pos & 0x3F);
+#else
+ dspc = dspc & 0xFFFFFF80;
+ dspc |= (pos & 0x7F);
+#endif
+ env->active_tc.DSPControl = dspc;
+}
+
+static inline uint32_t get_DSPControl_pos(CPUMIPSState *env)
+{
+ target_ulong dspc;
+ uint32_t pos;
+
+ dspc = env->active_tc.DSPControl;
+
+#ifndef TARGET_MIPS64
+ pos = dspc & 0x3F;
+#else
+ pos = dspc & 0x7F;
+#endif
+
+ return pos;
+}
+
+static inline void set_DSPControl_efi(uint32_t flag, CPUMIPSState *env)
+{
+ env->active_tc.DSPControl &= 0xFFFFBFFF;
+ env->active_tc.DSPControl |= (target_ulong)flag << 14;
+}
+
+#define DO_MIPS_SAT_ABS(size) \
+static inline int##size##_t mipsdsp_sat_abs##size(int##size##_t a, \
+ CPUMIPSState *env) \
+{ \
+ if (a == INT##size##_MIN) { \
+ set_DSPControl_overflow_flag(1, 20, env); \
+ return INT##size##_MAX; \
+ } else { \
+ return MIPSDSP_ABS(a); \
+ } \
+}
+DO_MIPS_SAT_ABS(8)
+DO_MIPS_SAT_ABS(16)
+DO_MIPS_SAT_ABS(32)
+#undef DO_MIPS_SAT_ABS
+
+/* get sum value */
+static inline int16_t mipsdsp_add_i16(int16_t a, int16_t b, CPUMIPSState *env)
+{
+ int16_t tempI;
+
+ tempI = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x8000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return tempI;
+}
+
+static inline int16_t mipsdsp_sat_add_i16(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int16_t tempS;
+
+ tempS = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, tempS, 0x8000)) {
+ if (a > 0) {
+ tempS = 0x7FFF;
+ } else {
+ tempS = 0x8000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return tempS;
+}
+
+static inline int32_t mipsdsp_sat_add_i32(int32_t a, int32_t b,
+ CPUMIPSState *env)
+{
+ int32_t tempI;
+
+ tempI = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, tempI, 0x80000000)) {
+ if (a > 0) {
+ tempI = 0x7FFFFFFF;
+ } else {
+ tempI = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return tempI;
+}
+
+static inline uint8_t mipsdsp_add_u8(uint8_t a, uint8_t b, CPUMIPSState *env)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b;
+
+ if (temp & 0x0100) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0xFF;
+}
+
+static inline uint16_t mipsdsp_add_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint32_t temp;
+
+ temp = (uint32_t)a + (uint32_t)b;
+
+ if (temp & 0x00010000) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0xFFFF;
+}
+
+static inline uint8_t mipsdsp_sat_add_u8(uint8_t a, uint8_t b,
+ CPUMIPSState *env)
+{
+ uint8_t result;
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b;
+ result = temp & 0xFF;
+
+ if (0x0100 & temp) {
+ result = 0xFF;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return result;
+}
+
+static inline uint16_t mipsdsp_sat_add_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint16_t result;
+ uint32_t temp;
+
+ temp = (uint32_t)a + (uint32_t)b;
+ result = temp & 0xFFFF;
+
+ if (0x00010000 & temp) {
+ result = 0xFFFF;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return result;
+}
+
+static inline int32_t mipsdsp_sat32_acc_q31(int32_t acc, int32_t a,
+ CPUMIPSState *env)
+{
+ int64_t temp;
+ int32_t temp32, temp31, result;
+ int64_t temp_sum;
+
+#ifndef TARGET_MIPS64
+ temp = ((uint64_t)env->active_tc.HI[acc] << 32) |
+ (uint64_t)env->active_tc.LO[acc];
+#else
+ temp = (uint64_t)env->active_tc.LO[acc];
+#endif
+
+ temp_sum = (int64_t)a + temp;
+
+ temp32 = (temp_sum >> 32) & 0x01;
+ temp31 = (temp_sum >> 31) & 0x01;
+ result = temp_sum & 0xFFFFFFFF;
+
+ if (temp32 != temp31) {
+ if (temp32 == 0) {
+ result = 0x7FFFFFFF;
+ } else {
+ result = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 16 + acc, env);
+ }
+
+ return result;
+}
+
+#ifdef TARGET_MIPS64
+/* a[0] is LO, a[1] is HI. */
+static inline void mipsdsp_sat64_acc_add_q63(int64_t *ret,
+ int32_t ac,
+ int64_t *a,
+ CPUMIPSState *env)
+{
+ bool temp64;
+
+ ret[0] = env->active_tc.LO[ac] + a[0];
+ ret[1] = env->active_tc.HI[ac] + a[1];
+
+ if (((uint64_t)ret[0] < (uint64_t)env->active_tc.LO[ac]) &&
+ ((uint64_t)ret[0] < (uint64_t)a[0])) {
+ ret[1] += 1;
+ }
+ temp64 = ret[1] & 1;
+ if (temp64 != ((ret[0] >> 63) & 0x01)) {
+ if (temp64) {
+ ret[0] = (0x01ull << 63);
+ ret[1] = ~0ull;
+ } else {
+ ret[0] = (0x01ull << 63) - 1;
+ ret[1] = 0x00;
+ }
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ }
+}
+
+static inline void mipsdsp_sat64_acc_sub_q63(int64_t *ret,
+ int32_t ac,
+ int64_t *a,
+ CPUMIPSState *env)
+{
+ bool temp64;
+
+ ret[0] = env->active_tc.LO[ac] - a[0];
+ ret[1] = env->active_tc.HI[ac] - a[1];
+
+ if ((uint64_t)ret[0] > (uint64_t)env->active_tc.LO[ac]) {
+ ret[1] -= 1;
+ }
+ temp64 = ret[1] & 1;
+ if (temp64 != ((ret[0] >> 63) & 0x01)) {
+ if (temp64) {
+ ret[0] = (0x01ull << 63);
+ ret[1] = ~0ull;
+ } else {
+ ret[0] = (0x01ull << 63) - 1;
+ ret[1] = 0x00;
+ }
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ }
+}
+#endif
+
+static inline int32_t mipsdsp_mul_i16_i16(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = (int32_t)a * (int32_t)b;
+
+ if ((temp > (int)0x7FFF) || (temp < (int)0xFFFF8000)) {
+ set_DSPControl_overflow_flag(1, 21, env);
+ }
+ temp &= 0x0000FFFF;
+
+ return temp;
+}
+
+static inline int32_t mipsdsp_mul_u16_u16(int32_t a, int32_t b)
+{
+ return a * b;
+}
+
+#ifdef TARGET_MIPS64
+static inline int32_t mipsdsp_mul_i32_i32(int32_t a, int32_t b)
+{
+ return a * b;
+}
+#endif
+
+static inline int32_t mipsdsp_sat16_mul_i16_i16(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = (int32_t)a * (int32_t)b;
+
+ if (temp > (int)0x7FFF) {
+ temp = 0x00007FFF;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else if (temp < (int)0xffff8000) {
+ temp = 0xFFFF8000;
+ set_DSPControl_overflow_flag(1, 21, env);
+ }
+ temp &= 0x0000FFFF;
+
+ return temp;
+}
+
+static inline int32_t mipsdsp_mul_q15_q15_overflowflag21(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFFFFFF;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else {
+ temp = ((int16_t)a * (int16_t)b) << 1;
+ }
+
+ return temp;
+}
+
+/* right shift */
+static inline uint8_t mipsdsp_rshift_u8(uint8_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+static inline uint16_t mipsdsp_rshift_u16(uint16_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+static inline int8_t mipsdsp_rashift8(int8_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+static inline int16_t mipsdsp_rashift16(int16_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+
+#ifdef TARGET_MIPS64
+static inline int32_t mipsdsp_rashift32(int32_t a, target_ulong mov)
+{
+ return a >> mov;
+}
+#endif
+
+static inline int16_t mipsdsp_rshift1_add_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a + (int32_t)b;
+
+ return (temp >> 1) & 0xFFFF;
+}
+
+/* round right shift */
+static inline int16_t mipsdsp_rrshift1_add_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a + (int32_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFF;
+}
+
+static inline int32_t mipsdsp_rshift1_add_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a + (int64_t)b;
+
+ return (temp >> 1) & 0xFFFFFFFF;
+}
+
+static inline int32_t mipsdsp_rrshift1_add_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a + (int64_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFFFFFF;
+}
+
+static inline uint8_t mipsdsp_rshift1_add_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b;
+
+ return (temp >> 1) & 0x00FF;
+}
+
+static inline uint8_t mipsdsp_rrshift1_add_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a + (uint16_t)b + 1;
+
+ return (temp >> 1) & 0x00FF;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint8_t mipsdsp_rshift1_sub_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b;
+
+ return (temp >> 1) & 0x00FF;
+}
+
+static inline uint8_t mipsdsp_rrshift1_sub_u8(uint8_t a, uint8_t b)
+{
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b + 1;
+
+ return (temp >> 1) & 0x00FF;
+}
+#endif
+
+/* 128 bits long. p[0] is LO, p[1] is HI. */
+static inline void mipsdsp_rndrashift_short_acc(int64_t *p,
+ int32_t ac,
+ int32_t shift,
+ CPUMIPSState *env)
+{
+ int64_t acc;
+
+ acc = ((int64_t)env->active_tc.HI[ac] << 32) |
+ ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF);
+ p[0] = (shift == 0) ? (acc << 1) : (acc >> (shift - 1));
+ p[1] = (acc >> 63) & 0x01;
+}
+
+#ifdef TARGET_MIPS64
+/* 128 bits long. p[0] is LO, p[1] is HI */
+static inline void mipsdsp_rashift_acc(uint64_t *p,
+ uint32_t ac,
+ uint32_t shift,
+ CPUMIPSState *env)
+{
+ uint64_t tempB, tempA;
+
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+ shift = shift & 0x1F;
+
+ if (shift == 0) {
+ p[1] = tempB;
+ p[0] = tempA;
+ } else {
+ p[0] = (tempB << (64 - shift)) | (tempA >> shift);
+ p[1] = (int64_t)tempB >> shift;
+ }
+}
+
+/* 128 bits long. p[0] is LO, p[1] is HI , p[2] is sign of HI.*/
+static inline void mipsdsp_rndrashift_acc(uint64_t *p,
+ uint32_t ac,
+ uint32_t shift,
+ CPUMIPSState *env)
+{
+ int64_t tempB, tempA;
+
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+ shift = shift & 0x3F;
+
+ if (shift == 0) {
+ p[2] = tempB >> 63;
+ p[1] = (tempB << 1) | (tempA >> 63);
+ p[0] = tempA << 1;
+ } else {
+ p[0] = (tempB << (65 - shift)) | (tempA >> (shift - 1));
+ p[1] = (int64_t)tempB >> (shift - 1);
+ if (tempB >= 0) {
+ p[2] = 0x0;
+ } else {
+ p[2] = ~0ull;
+ }
+ }
+}
+#endif
+
+static inline int32_t mipsdsp_mul_q15_q15(int32_t ac, uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFFFFFF;
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ } else {
+ temp = ((int16_t)a * (int16_t)b) << 1;
+ }
+
+ return temp;
+}
+
+static inline int64_t mipsdsp_mul_q31_q31(int32_t ac, uint32_t a, uint32_t b,
+ CPUMIPSState *env)
+{
+ uint64_t temp;
+
+ if ((a == 0x80000000) && (b == 0x80000000)) {
+ temp = (0x01ull << 63) - 1;
+ set_DSPControl_overflow_flag(1, 16 + ac, env);
+ } else {
+ temp = ((int64_t)(int32_t)a * (int32_t)b) << 1;
+ }
+
+ return temp;
+}
+
+static inline uint16_t mipsdsp_mul_u8_u8(uint8_t a, uint8_t b)
+{
+ return (uint16_t)a * (uint16_t)b;
+}
+
+static inline uint16_t mipsdsp_mul_u8_u16(uint8_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint32_t tempI;
+
+ tempI = (uint32_t)a * (uint32_t)b;
+ if (tempI > 0x0000FFFF) {
+ tempI = 0x0000FFFF;
+ set_DSPControl_overflow_flag(1, 21, env);
+ }
+
+ return tempI & 0x0000FFFF;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint64_t mipsdsp_mul_u32_u32(uint32_t a, uint32_t b)
+{
+ return (uint64_t)a * (uint64_t)b;
+}
+#endif
+
+static inline int16_t mipsdsp_rndq15_mul_q15_q15(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFF0000;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else {
+ temp = ((int16_t)a * (int16_t)b) << 1;
+ temp = temp + 0x00008000;
+ }
+
+ return (temp & 0xFFFF0000) >> 16;
+}
+
+static inline int32_t mipsdsp_sat16_mul_q15_q15(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ if ((a == 0x8000) && (b == 0x8000)) {
+ temp = 0x7FFF0000;
+ set_DSPControl_overflow_flag(1, 21, env);
+ } else {
+ temp = (int16_t)a * (int16_t)b;
+ temp = temp << 1;
+ }
+
+ return (temp >> 16) & 0x0000FFFF;
+}
+
+static inline uint16_t mipsdsp_trunc16_sat16_round(int32_t a,
+ CPUMIPSState *env)
+{
+ uint16_t temp;
+
+
+ /*
+ * The value 0x00008000 will be added to the input Q31 value, and the code
+ * needs to check if the addition causes an overflow. Since a positive value
+ * is added, overflow can happen in one direction only.
+ */
+ if (a > 0x7FFF7FFF) {
+ temp = 0x7FFF;
+ set_DSPControl_overflow_flag(1, 22, env);
+ } else {
+ temp = ((a + 0x8000) >> 16) & 0xFFFF;
+ }
+
+ return temp;
+}
+
+static inline uint8_t mipsdsp_sat8_reduce_precision(uint16_t a,
+ CPUMIPSState *env)
+{
+ uint16_t mag;
+ uint32_t sign;
+
+ sign = (a >> 15) & 0x01;
+ mag = a & 0x7FFF;
+
+ if (sign == 0) {
+ if (mag > 0x7F80) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return 0xFF;
+ } else {
+ return (mag >> 7) & 0xFFFF;
+ }
+ } else {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return 0x00;
+ }
+}
+
+static inline uint8_t mipsdsp_lshift8(uint8_t a, uint8_t s, CPUMIPSState *env)
+{
+ uint8_t discard;
+
+ if (s != 0) {
+ discard = a >> (8 - s);
+
+ if (discard != 0x00) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ }
+ }
+ return a << s;
+}
+
+static inline uint16_t mipsdsp_lshift16(uint16_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint16_t discard;
+
+ if (s != 0) {
+ discard = (int16_t)a >> (15 - s);
+
+ if ((discard != 0x0000) && (discard != 0xFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ }
+ }
+ return a << s;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint32_t mipsdsp_lshift32(uint32_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint32_t discard;
+
+ if (s == 0) {
+ return a;
+ } else {
+ discard = (int32_t)a >> (31 - (s - 1));
+
+ if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ }
+ return a << s;
+ }
+}
+#endif
+
+static inline uint16_t mipsdsp_sat16_lshift(uint16_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint8_t sign;
+ uint16_t discard;
+
+ if (s == 0) {
+ return a;
+ } else {
+ sign = (a >> 15) & 0x01;
+ if (sign != 0) {
+ discard = (((0x01 << (16 - s)) - 1) << s) |
+ ((a >> (14 - (s - 1))) & ((0x01 << s) - 1));
+ } else {
+ discard = a >> (14 - (s - 1));
+ }
+
+ if ((discard != 0x0000) && (discard != 0xFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return (sign == 0) ? 0x7FFF : 0x8000;
+ } else {
+ return a << s;
+ }
+ }
+}
+
+static inline uint32_t mipsdsp_sat32_lshift(uint32_t a, uint8_t s,
+ CPUMIPSState *env)
+{
+ uint8_t sign;
+ uint32_t discard;
+
+ if (s == 0) {
+ return a;
+ } else {
+ sign = (a >> 31) & 0x01;
+ if (sign != 0) {
+ discard = (((0x01 << (32 - s)) - 1) << s) |
+ ((a >> (30 - (s - 1))) & ((0x01 << s) - 1));
+ } else {
+ discard = a >> (30 - (s - 1));
+ }
+
+ if ((discard != 0x00000000) && (discard != 0xFFFFFFFF)) {
+ set_DSPControl_overflow_flag(1, 22, env);
+ return (sign == 0) ? 0x7FFFFFFF : 0x80000000;
+ } else {
+ return a << s;
+ }
+ }
+}
+
+static inline uint8_t mipsdsp_rnd8_rashift(uint8_t a, uint8_t s)
+{
+ uint32_t temp;
+
+ if (s == 0) {
+ temp = (uint32_t)a << 1;
+ } else {
+ temp = (int32_t)(int8_t)a >> (s - 1);
+ }
+
+ return (temp + 1) >> 1;
+}
+
+static inline uint16_t mipsdsp_rnd16_rashift(uint16_t a, uint8_t s)
+{
+ uint32_t temp;
+
+ if (s == 0) {
+ temp = (uint32_t)a << 1;
+ } else {
+ temp = (int32_t)(int16_t)a >> (s - 1);
+ }
+
+ return (temp + 1) >> 1;
+}
+
+static inline uint32_t mipsdsp_rnd32_rashift(uint32_t a, uint8_t s)
+{
+ int64_t temp;
+
+ if (s == 0) {
+ temp = (uint64_t)a << 1;
+ } else {
+ temp = (int64_t)(int32_t)a >> (s - 1);
+ }
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFFFFFFull;
+}
+
+static inline uint16_t mipsdsp_sub_i16(int16_t a, int16_t b, CPUMIPSState *env)
+{
+ int16_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+
+static inline uint16_t mipsdsp_sat16_sub(int16_t a, int16_t b,
+ CPUMIPSState *env)
+{
+ int16_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x8000)) {
+ if (a >= 0) {
+ temp = 0x7FFF;
+ } else {
+ temp = 0x8000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+
+static inline uint32_t mipsdsp_sat32_sub(int32_t a, int32_t b,
+ CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) {
+ if (a >= 0) {
+ temp = 0x7FFFFFFF;
+ } else {
+ temp = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0xFFFFFFFFull;
+}
+
+static inline uint16_t mipsdsp_rshift1_sub_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a - (int32_t)b;
+
+ return (temp >> 1) & 0x0000FFFF;
+}
+
+static inline uint16_t mipsdsp_rrshift1_sub_q16(int16_t a, int16_t b)
+{
+ int32_t temp;
+
+ temp = (int32_t)a - (int32_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0x0000FFFF;
+}
+
+static inline uint32_t mipsdsp_rshift1_sub_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a - (int64_t)b;
+
+ return (temp >> 1) & 0xFFFFFFFFull;
+}
+
+static inline uint32_t mipsdsp_rrshift1_sub_q32(int32_t a, int32_t b)
+{
+ int64_t temp;
+
+ temp = (int64_t)a - (int64_t)b;
+ temp += 1;
+
+ return (temp >> 1) & 0xFFFFFFFFull;
+}
+
+static inline uint16_t mipsdsp_sub_u16_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint8_t temp16;
+ uint32_t temp;
+
+ temp = (uint32_t)a - (uint32_t)b;
+ temp16 = (temp >> 16) & 0x01;
+ if (temp16 == 1) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+ return temp & 0x0000FFFF;
+}
+
+static inline uint16_t mipsdsp_satu16_sub_u16_u16(uint16_t a, uint16_t b,
+ CPUMIPSState *env)
+{
+ uint8_t temp16;
+ uint32_t temp;
+
+ temp = (uint32_t)a - (uint32_t)b;
+ temp16 = (temp >> 16) & 0x01;
+
+ if (temp16 == 1) {
+ temp = 0x0000;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0x0000FFFF;
+}
+
+static inline uint8_t mipsdsp_sub_u8(uint8_t a, uint8_t b, CPUMIPSState *env)
+{
+ uint8_t temp8;
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b;
+ temp8 = (temp >> 8) & 0x01;
+ if (temp8 == 1) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0x00FF;
+}
+
+static inline uint8_t mipsdsp_satu8_sub(uint8_t a, uint8_t b, CPUMIPSState *env)
+{
+ uint8_t temp8;
+ uint16_t temp;
+
+ temp = (uint16_t)a - (uint16_t)b;
+ temp8 = (temp >> 8) & 0x01;
+ if (temp8 == 1) {
+ temp = 0x00;
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp & 0x00FF;
+}
+
+#ifdef TARGET_MIPS64
+static inline uint32_t mipsdsp_sub32(int32_t a, int32_t b, CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = a - b;
+ if (MIPSDSP_OVERFLOW_SUB(a, b, temp, 0x80000000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+
+static inline int32_t mipsdsp_add_i32(int32_t a, int32_t b, CPUMIPSState *env)
+{
+ int32_t temp;
+
+ temp = a + b;
+
+ if (MIPSDSP_OVERFLOW_ADD(a, b, temp, 0x80000000)) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ return temp;
+}
+#endif
+
+static inline int32_t mipsdsp_cmp_eq(int32_t a, int32_t b)
+{
+ return a == b;
+}
+
+static inline int32_t mipsdsp_cmp_le(int32_t a, int32_t b)
+{
+ return a <= b;
+}
+
+static inline int32_t mipsdsp_cmp_lt(int32_t a, int32_t b)
+{
+ return a < b;
+}
+
+static inline int32_t mipsdsp_cmpu_eq(uint32_t a, uint32_t b)
+{
+ return a == b;
+}
+
+static inline int32_t mipsdsp_cmpu_le(uint32_t a, uint32_t b)
+{
+ return a <= b;
+}
+
+static inline int32_t mipsdsp_cmpu_lt(uint32_t a, uint32_t b)
+{
+ return a < b;
+}
+/*** MIPS DSP internal functions end ***/
+
+#define MIPSDSP_LHI 0xFFFFFFFF00000000ull
+#define MIPSDSP_LLO 0x00000000FFFFFFFFull
+#define MIPSDSP_HI 0xFFFF0000
+#define MIPSDSP_LO 0x0000FFFF
+#define MIPSDSP_Q3 0xFF000000
+#define MIPSDSP_Q2 0x00FF0000
+#define MIPSDSP_Q1 0x0000FF00
+#define MIPSDSP_Q0 0x000000FF
+
+#define MIPSDSP_SPLIT32_8(num, a, b, c, d) \
+ do { \
+ a = (num >> 24) & MIPSDSP_Q0; \
+ b = (num >> 16) & MIPSDSP_Q0; \
+ c = (num >> 8) & MIPSDSP_Q0; \
+ d = num & MIPSDSP_Q0; \
+ } while (0)
+
+#define MIPSDSP_SPLIT32_16(num, a, b) \
+ do { \
+ a = (num >> 16) & MIPSDSP_LO; \
+ b = num & MIPSDSP_LO; \
+ } while (0)
+
+#define MIPSDSP_RETURN32_8(a, b, c, d) ((target_long)(int32_t) \
+ (((uint32_t)a << 24) | \
+ (((uint32_t)b << 16) | \
+ (((uint32_t)c << 8) | \
+ ((uint32_t)d & 0xFF)))))
+#define MIPSDSP_RETURN32_16(a, b) ((target_long)(int32_t) \
+ (((uint32_t)a << 16) | \
+ ((uint32_t)b & 0xFFFF)))
+
+#ifdef TARGET_MIPS64
+#define MIPSDSP_SPLIT64_16(num, a, b, c, d) \
+ do { \
+ a = (num >> 48) & MIPSDSP_LO; \
+ b = (num >> 32) & MIPSDSP_LO; \
+ c = (num >> 16) & MIPSDSP_LO; \
+ d = num & MIPSDSP_LO; \
+ } while (0)
+
+#define MIPSDSP_SPLIT64_32(num, a, b) \
+ do { \
+ a = (num >> 32) & MIPSDSP_LLO; \
+ b = num & MIPSDSP_LLO; \
+ } while (0)
+
+#define MIPSDSP_RETURN64_16(a, b, c, d) (((uint64_t)a << 48) | \
+ ((uint64_t)b << 32) | \
+ ((uint64_t)c << 16) | \
+ (uint64_t)d)
+#define MIPSDSP_RETURN64_32(a, b) (((uint64_t)a << 32) | (uint64_t)b)
+#endif
+
+/** DSP Arithmetic Sub-class insns **/
+#define MIPSDSP32_UNOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \
+{ \
+ DSP32Value dt; \
+ unsigned int i; \
+ \
+ dt.sw[0] = rt; \
+ \
+ for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \
+ dt.element[i] = mipsdsp_##func(dt.element[i], env); \
+ } \
+ \
+ return (target_long)dt.sw[0]; \
+}
+MIPSDSP32_UNOP_ENV(absq_s_ph, sat_abs16, sh)
+MIPSDSP32_UNOP_ENV(absq_s_qb, sat_abs8, sb)
+MIPSDSP32_UNOP_ENV(absq_s_w, sat_abs32, sw)
+#undef MIPSDSP32_UNOP_ENV
+
+#if defined(TARGET_MIPS64)
+#define MIPSDSP64_UNOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rt, CPUMIPSState *env) \
+{ \
+ DSP64Value dt; \
+ unsigned int i; \
+ \
+ dt.sl[0] = rt; \
+ \
+ for (i = 0; i < ARRAY_SIZE(dt.element); i++) { \
+ dt.element[i] = mipsdsp_##func(dt.element[i], env); \
+ } \
+ \
+ return dt.sl[0]; \
+}
+MIPSDSP64_UNOP_ENV(absq_s_ob, sat_abs8, sb)
+MIPSDSP64_UNOP_ENV(absq_s_qh, sat_abs16, sh)
+MIPSDSP64_UNOP_ENV(absq_s_pw, sat_abs32, sw)
+#undef MIPSDSP64_UNOP_ENV
+#endif
+
+#define MIPSDSP32_BINOP(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt) \
+{ \
+ DSP32Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sw[0] = rs; \
+ dt.sw[0] = rt; \
+ \
+ for (i = 0; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \
+ } \
+ \
+ return (target_long)ds.sw[0]; \
+}
+MIPSDSP32_BINOP(addqh_ph, rshift1_add_q16, sh);
+MIPSDSP32_BINOP(addqh_r_ph, rrshift1_add_q16, sh);
+MIPSDSP32_BINOP(addqh_r_w, rrshift1_add_q32, sw);
+MIPSDSP32_BINOP(addqh_w, rshift1_add_q32, sw);
+MIPSDSP32_BINOP(adduh_qb, rshift1_add_u8, ub);
+MIPSDSP32_BINOP(adduh_r_qb, rrshift1_add_u8, ub);
+MIPSDSP32_BINOP(subqh_ph, rshift1_sub_q16, sh);
+MIPSDSP32_BINOP(subqh_r_ph, rrshift1_sub_q16, sh);
+MIPSDSP32_BINOP(subqh_r_w, rrshift1_sub_q32, sw);
+MIPSDSP32_BINOP(subqh_w, rshift1_sub_q32, sw);
+#undef MIPSDSP32_BINOP
+
+#define MIPSDSP32_BINOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ DSP32Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sw[0] = rs; \
+ dt.sw[0] = rt; \
+ \
+ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \
+ } \
+ \
+ return (target_long)ds.sw[0]; \
+}
+MIPSDSP32_BINOP_ENV(addq_ph, add_i16, sh)
+MIPSDSP32_BINOP_ENV(addq_s_ph, sat_add_i16, sh)
+MIPSDSP32_BINOP_ENV(addq_s_w, sat_add_i32, sw);
+MIPSDSP32_BINOP_ENV(addu_ph, add_u16, sh)
+MIPSDSP32_BINOP_ENV(addu_qb, add_u8, ub);
+MIPSDSP32_BINOP_ENV(addu_s_ph, sat_add_u16, sh)
+MIPSDSP32_BINOP_ENV(addu_s_qb, sat_add_u8, ub);
+MIPSDSP32_BINOP_ENV(subq_ph, sub_i16, sh);
+MIPSDSP32_BINOP_ENV(subq_s_ph, sat16_sub, sh);
+MIPSDSP32_BINOP_ENV(subq_s_w, sat32_sub, sw);
+MIPSDSP32_BINOP_ENV(subu_ph, sub_u16_u16, sh);
+MIPSDSP32_BINOP_ENV(subu_qb, sub_u8, ub);
+MIPSDSP32_BINOP_ENV(subu_s_ph, satu16_sub_u16_u16, sh);
+MIPSDSP32_BINOP_ENV(subu_s_qb, satu8_sub, ub);
+#undef MIPSDSP32_BINOP_ENV
+
+#ifdef TARGET_MIPS64
+#define MIPSDSP64_BINOP(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt) \
+{ \
+ DSP64Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sl[0] = rs; \
+ dt.sl[0] = rt; \
+ \
+ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i]); \
+ } \
+ \
+ return ds.sl[0]; \
+}
+MIPSDSP64_BINOP(adduh_ob, rshift1_add_u8, ub);
+MIPSDSP64_BINOP(adduh_r_ob, rrshift1_add_u8, ub);
+MIPSDSP64_BINOP(subuh_ob, rshift1_sub_u8, ub);
+MIPSDSP64_BINOP(subuh_r_ob, rrshift1_sub_u8, ub);
+#undef MIPSDSP64_BINOP
+
+#define MIPSDSP64_BINOP_ENV(name, func, element) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ DSP64Value ds, dt; \
+ unsigned int i; \
+ \
+ ds.sl[0] = rs; \
+ dt.sl[0] = rt; \
+ \
+ for (i = 0 ; i < ARRAY_SIZE(ds.element); i++) { \
+ ds.element[i] = mipsdsp_##func(ds.element[i], dt.element[i], env); \
+ } \
+ \
+ return ds.sl[0]; \
+}
+MIPSDSP64_BINOP_ENV(addq_pw, add_i32, sw);
+MIPSDSP64_BINOP_ENV(addq_qh, add_i16, sh);
+MIPSDSP64_BINOP_ENV(addq_s_pw, sat_add_i32, sw);
+MIPSDSP64_BINOP_ENV(addq_s_qh, sat_add_i16, sh);
+MIPSDSP64_BINOP_ENV(addu_ob, add_u8, uh);
+MIPSDSP64_BINOP_ENV(addu_qh, add_u16, uh);
+MIPSDSP64_BINOP_ENV(addu_s_ob, sat_add_u8, uh);
+MIPSDSP64_BINOP_ENV(addu_s_qh, sat_add_u16, uh);
+MIPSDSP64_BINOP_ENV(subq_pw, sub32, sw);
+MIPSDSP64_BINOP_ENV(subq_qh, sub_i16, sh);
+MIPSDSP64_BINOP_ENV(subq_s_pw, sat32_sub, sw);
+MIPSDSP64_BINOP_ENV(subq_s_qh, sat16_sub, sh);
+MIPSDSP64_BINOP_ENV(subu_ob, sub_u8, uh);
+MIPSDSP64_BINOP_ENV(subu_qh, sub_u16_u16, uh);
+MIPSDSP64_BINOP_ENV(subu_s_ob, satu8_sub, uh);
+MIPSDSP64_BINOP_ENV(subu_s_qh, satu16_sub_u16_u16, uh);
+#undef MIPSDSP64_BINOP_ENV
+
+#endif
+
+#define SUBUH_QB(name, var) \
+target_ulong helper_##name##_qb(target_ulong rs, target_ulong rt) \
+{ \
+ uint8_t rs3, rs2, rs1, rs0; \
+ uint8_t rt3, rt2, rt1, rt0; \
+ uint8_t tempD, tempC, tempB, tempA; \
+ \
+ MIPSDSP_SPLIT32_8(rs, rs3, rs2, rs1, rs0); \
+ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \
+ \
+ tempD = ((uint16_t)rs3 - (uint16_t)rt3 + var) >> 1; \
+ tempC = ((uint16_t)rs2 - (uint16_t)rt2 + var) >> 1; \
+ tempB = ((uint16_t)rs1 - (uint16_t)rt1 + var) >> 1; \
+ tempA = ((uint16_t)rs0 - (uint16_t)rt0 + var) >> 1; \
+ \
+ return ((uint32_t)tempD << 24) | ((uint32_t)tempC << 16) | \
+ ((uint32_t)tempB << 8) | ((uint32_t)tempA); \
+}
+
+SUBUH_QB(subuh, 0);
+SUBUH_QB(subuh_r, 1);
+
+#undef SUBUH_QB
+
+target_ulong helper_addsc(target_ulong rs, target_ulong rt, CPUMIPSState *env)
+{
+ uint64_t temp, tempRs, tempRt;
+ bool flag;
+
+ tempRs = (uint64_t)rs & MIPSDSP_LLO;
+ tempRt = (uint64_t)rt & MIPSDSP_LLO;
+
+ temp = tempRs + tempRt;
+ flag = (temp & 0x0100000000ull) >> 32;
+ set_DSPControl_carryflag(flag, env);
+
+ return (target_long)(int32_t)(temp & MIPSDSP_LLO);
+}
+
+target_ulong helper_addwc(target_ulong rs, target_ulong rt, CPUMIPSState *env)
+{
+ uint32_t rd;
+ int32_t temp32, temp31;
+ int64_t tempL;
+
+ tempL = (int64_t)(int32_t)rs + (int64_t)(int32_t)rt +
+ get_DSPControl_carryflag(env);
+ temp31 = (tempL >> 31) & 0x01;
+ temp32 = (tempL >> 32) & 0x01;
+
+ if (temp31 != temp32) {
+ set_DSPControl_overflow_flag(1, 20, env);
+ }
+
+ rd = tempL & MIPSDSP_LLO;
+
+ return (target_long)(int32_t)rd;
+}
+
+target_ulong helper_modsub(target_ulong rs, target_ulong rt)
+{
+ int32_t decr;
+ uint16_t lastindex;
+ target_ulong rd;
+
+ decr = rt & MIPSDSP_Q0;
+ lastindex = (rt >> 8) & MIPSDSP_LO;
+
+ if ((rs & MIPSDSP_LLO) == 0x00000000) {
+ rd = (target_ulong)lastindex;
+ } else {
+ rd = rs - decr;
+ }
+
+ return rd;
+}
+
+target_ulong helper_raddu_w_qb(target_ulong rs)
+{
+ target_ulong ret = 0;
+ DSP32Value ds;
+ unsigned int i;
+
+ ds.uw[0] = rs;
+ for (i = 0; i < 4; i++) {
+ ret += ds.ub[i];
+ }
+ return ret;
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_raddu_l_ob(target_ulong rs)
+{
+ target_ulong ret = 0;
+ DSP64Value ds;
+ unsigned int i;
+
+ ds.ul[0] = rs;
+ for (i = 0; i < 8; i++) {
+ ret += ds.ub[i];
+ }
+ return ret;
+}
+#endif
+
+#define PRECR_QB_PH(name, a, b)\
+target_ulong helper_##name##_qb_ph(target_ulong rs, target_ulong rt) \
+{ \
+ uint8_t tempD, tempC, tempB, tempA; \
+ \
+ tempD = (rs >> a) & MIPSDSP_Q0; \
+ tempC = (rs >> b) & MIPSDSP_Q0; \
+ tempB = (rt >> a) & MIPSDSP_Q0; \
+ tempA = (rt >> b) & MIPSDSP_Q0; \
+ \
+ return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA); \
+}
+
+PRECR_QB_PH(precr, 16, 0);
+PRECR_QB_PH(precrq, 24, 8);
+
+#undef PRECR_QB_OH
+
+target_ulong helper_precr_sra_ph_w(uint32_t sa, target_ulong rs,
+ target_ulong rt)
+{
+ uint16_t tempB, tempA;
+
+ tempB = ((int32_t)rt >> sa) & MIPSDSP_LO;
+ tempA = ((int32_t)rs >> sa) & MIPSDSP_LO;
+
+ return MIPSDSP_RETURN32_16(tempB, tempA);
+}
+
+target_ulong helper_precr_sra_r_ph_w(uint32_t sa,
+ target_ulong rs, target_ulong rt)
+{
+ uint64_t tempB, tempA;
+
+ /* If sa = 0, then (sa - 1) = -1 will case shift error, so we need else. */
+ if (sa == 0) {
+ tempB = (rt & MIPSDSP_LO) << 1;
+ tempA = (rs & MIPSDSP_LO) << 1;
+ } else {
+ tempB = ((int32_t)rt >> (sa - 1)) + 1;
+ tempA = ((int32_t)rs >> (sa - 1)) + 1;
+ }
+ rt = (((tempB >> 1) & MIPSDSP_LO) << 16) | ((tempA >> 1) & MIPSDSP_LO);
+
+ return (target_long)(int32_t)rt;
+}
+
+target_ulong helper_precrq_ph_w(target_ulong rs, target_ulong rt)
+{
+ uint16_t tempB, tempA;
+
+ tempB = (rs & MIPSDSP_HI) >> 16;
+ tempA = (rt & MIPSDSP_HI) >> 16;
+
+ return MIPSDSP_RETURN32_16(tempB, tempA);
+}
+
+target_ulong helper_precrq_rs_ph_w(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ uint16_t tempB, tempA;
+
+ tempB = mipsdsp_trunc16_sat16_round(rs, env);
+ tempA = mipsdsp_trunc16_sat16_round(rt, env);
+
+ return MIPSDSP_RETURN32_16(tempB, tempA);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_precr_ob_qh(target_ulong rs, target_ulong rt)
+{
+ uint8_t rs6, rs4, rs2, rs0;
+ uint8_t rt6, rt4, rt2, rt0;
+ uint64_t temp;
+
+ rs6 = (rs >> 48) & MIPSDSP_Q0;
+ rs4 = (rs >> 32) & MIPSDSP_Q0;
+ rs2 = (rs >> 16) & MIPSDSP_Q0;
+ rs0 = rs & MIPSDSP_Q0;
+ rt6 = (rt >> 48) & MIPSDSP_Q0;
+ rt4 = (rt >> 32) & MIPSDSP_Q0;
+ rt2 = (rt >> 16) & MIPSDSP_Q0;
+ rt0 = rt & MIPSDSP_Q0;
+
+ temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) |
+ ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) |
+ ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) |
+ ((uint64_t)rt2 << 8) | (uint64_t)rt0;
+
+ return temp;
+}
+
+#define PRECR_QH_PW(name, var) \
+target_ulong helper_precr_##name##_qh_pw(target_ulong rs, target_ulong rt, \
+ uint32_t sa) \
+{ \
+ uint16_t rs3, rs2, rs1, rs0; \
+ uint16_t rt3, rt2, rt1, rt0; \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ /* When sa = 0, we use rt2, rt0, rs2, rs0; \
+ * when sa != 0, we use rt3, rt1, rs3, rs1. */ \
+ if (sa == 0) { \
+ tempD = rt2 << var; \
+ tempC = rt0 << var; \
+ tempB = rs2 << var; \
+ tempA = rs0 << var; \
+ } else { \
+ tempD = (((int16_t)rt3 >> sa) + var) >> var; \
+ tempC = (((int16_t)rt1 >> sa) + var) >> var; \
+ tempB = (((int16_t)rs3 >> sa) + var) >> var; \
+ tempA = (((int16_t)rs1 >> sa) + var) >> var; \
+ } \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+PRECR_QH_PW(sra, 0);
+PRECR_QH_PW(sra_r, 1);
+
+#undef PRECR_QH_PW
+
+target_ulong helper_precrq_ob_qh(target_ulong rs, target_ulong rt)
+{
+ uint8_t rs6, rs4, rs2, rs0;
+ uint8_t rt6, rt4, rt2, rt0;
+ uint64_t temp;
+
+ rs6 = (rs >> 56) & MIPSDSP_Q0;
+ rs4 = (rs >> 40) & MIPSDSP_Q0;
+ rs2 = (rs >> 24) & MIPSDSP_Q0;
+ rs0 = (rs >> 8) & MIPSDSP_Q0;
+ rt6 = (rt >> 56) & MIPSDSP_Q0;
+ rt4 = (rt >> 40) & MIPSDSP_Q0;
+ rt2 = (rt >> 24) & MIPSDSP_Q0;
+ rt0 = (rt >> 8) & MIPSDSP_Q0;
+
+ temp = ((uint64_t)rs6 << 56) | ((uint64_t)rs4 << 48) |
+ ((uint64_t)rs2 << 40) | ((uint64_t)rs0 << 32) |
+ ((uint64_t)rt6 << 24) | ((uint64_t)rt4 << 16) |
+ ((uint64_t)rt2 << 8) | (uint64_t)rt0;
+
+ return temp;
+}
+
+target_ulong helper_precrq_qh_pw(target_ulong rs, target_ulong rt)
+{
+ uint16_t tempD, tempC, tempB, tempA;
+
+ tempD = (rs >> 48) & MIPSDSP_LO;
+ tempC = (rs >> 16) & MIPSDSP_LO;
+ tempB = (rt >> 48) & MIPSDSP_LO;
+ tempA = (rt >> 16) & MIPSDSP_LO;
+
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA);
+}
+
+target_ulong helper_precrq_rs_qh_pw(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ uint32_t rs2, rs0;
+ uint32_t rt2, rt0;
+ uint16_t tempD, tempC, tempB, tempA;
+
+ rs2 = (rs >> 32) & MIPSDSP_LLO;
+ rs0 = rs & MIPSDSP_LLO;
+ rt2 = (rt >> 32) & MIPSDSP_LLO;
+ rt0 = rt & MIPSDSP_LLO;
+
+ tempD = mipsdsp_trunc16_sat16_round(rs2, env);
+ tempC = mipsdsp_trunc16_sat16_round(rs0, env);
+ tempB = mipsdsp_trunc16_sat16_round(rt2, env);
+ tempA = mipsdsp_trunc16_sat16_round(rt0, env);
+
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA);
+}
+
+target_ulong helper_precrq_pw_l(target_ulong rs, target_ulong rt)
+{
+ uint32_t tempB, tempA;
+
+ tempB = (rs >> 32) & MIPSDSP_LLO;
+ tempA = (rt >> 32) & MIPSDSP_LLO;
+
+ return MIPSDSP_RETURN64_32(tempB, tempA);
+}
+#endif
+
+target_ulong helper_precrqu_s_qb_ph(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ uint8_t tempD, tempC, tempB, tempA;
+ uint16_t rsh, rsl, rth, rtl;
+
+ rsh = (rs & MIPSDSP_HI) >> 16;
+ rsl = rs & MIPSDSP_LO;
+ rth = (rt & MIPSDSP_HI) >> 16;
+ rtl = rt & MIPSDSP_LO;
+
+ tempD = mipsdsp_sat8_reduce_precision(rsh, env);
+ tempC = mipsdsp_sat8_reduce_precision(rsl, env);
+ tempB = mipsdsp_sat8_reduce_precision(rth, env);
+ tempA = mipsdsp_sat8_reduce_precision(rtl, env);
+
+ return MIPSDSP_RETURN32_8(tempD, tempC, tempB, tempA);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_precrqu_s_ob_qh(target_ulong rs, target_ulong rt,
+ CPUMIPSState *env)
+{
+ int i;
+ uint16_t rs3, rs2, rs1, rs0;
+ uint16_t rt3, rt2, rt1, rt0;
+ uint8_t temp[8];
+ uint64_t result;
+
+ result = 0;
+
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0);
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0);
+
+ temp[7] = mipsdsp_sat8_reduce_precision(rs3, env);
+ temp[6] = mipsdsp_sat8_reduce_precision(rs2, env);
+ temp[5] = mipsdsp_sat8_reduce_precision(rs1, env);
+ temp[4] = mipsdsp_sat8_reduce_precision(rs0, env);
+ temp[3] = mipsdsp_sat8_reduce_precision(rt3, env);
+ temp[2] = mipsdsp_sat8_reduce_precision(rt2, env);
+ temp[1] = mipsdsp_sat8_reduce_precision(rt1, env);
+ temp[0] = mipsdsp_sat8_reduce_precision(rt0, env);
+
+ for (i = 0; i < 8; i++) {
+ result |= (uint64_t)temp[i] << (8 * i);
+ }
+
+ return result;
+}
+
+#define PRECEQ_PW(name, a, b) \
+target_ulong helper_preceq_pw_##name(target_ulong rt) \
+{ \
+ uint16_t tempB, tempA; \
+ uint32_t tempBI, tempAI; \
+ \
+ tempB = (rt >> a) & MIPSDSP_LO; \
+ tempA = (rt >> b) & MIPSDSP_LO; \
+ \
+ tempBI = (uint32_t)tempB << 16; \
+ tempAI = (uint32_t)tempA << 16; \
+ \
+ return MIPSDSP_RETURN64_32(tempBI, tempAI); \
+}
+
+PRECEQ_PW(qhl, 48, 32);
+PRECEQ_PW(qhr, 16, 0);
+PRECEQ_PW(qhla, 48, 16);
+PRECEQ_PW(qhra, 32, 0);
+
+#undef PRECEQ_PW
+
+#endif
+
+#define PRECEQU_PH(name, a, b) \
+target_ulong helper_precequ_ph_##name(target_ulong rt) \
+{ \
+ uint16_t tempB, tempA; \
+ \
+ tempB = (rt >> a) & MIPSDSP_Q0; \
+ tempA = (rt >> b) & MIPSDSP_Q0; \
+ \
+ tempB = tempB << 7; \
+ tempA = tempA << 7; \
+ \
+ return MIPSDSP_RETURN32_16(tempB, tempA); \
+}
+
+PRECEQU_PH(qbl, 24, 16);
+PRECEQU_PH(qbr, 8, 0);
+PRECEQU_PH(qbla, 24, 8);
+PRECEQU_PH(qbra, 16, 0);
+
+#undef PRECEQU_PH
+
+#if defined(TARGET_MIPS64)
+#define PRECEQU_QH(name, a, b, c, d) \
+target_ulong helper_precequ_qh_##name(target_ulong rt) \
+{ \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ tempD = (rt >> a) & MIPSDSP_Q0; \
+ tempC = (rt >> b) & MIPSDSP_Q0; \
+ tempB = (rt >> c) & MIPSDSP_Q0; \
+ tempA = (rt >> d) & MIPSDSP_Q0; \
+ \
+ tempD = tempD << 7; \
+ tempC = tempC << 7; \
+ tempB = tempB << 7; \
+ tempA = tempA << 7; \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+PRECEQU_QH(obl, 56, 48, 40, 32);
+PRECEQU_QH(obr, 24, 16, 8, 0);
+PRECEQU_QH(obla, 56, 40, 24, 8);
+PRECEQU_QH(obra, 48, 32, 16, 0);
+
+#undef PRECEQU_QH
+
+#endif
+
+#define PRECEU_PH(name, a, b) \
+target_ulong helper_preceu_ph_##name(target_ulong rt) \
+{ \
+ uint16_t tempB, tempA; \
+ \
+ tempB = (rt >> a) & MIPSDSP_Q0; \
+ tempA = (rt >> b) & MIPSDSP_Q0; \
+ \
+ return MIPSDSP_RETURN32_16(tempB, tempA); \
+}
+
+PRECEU_PH(qbl, 24, 16);
+PRECEU_PH(qbr, 8, 0);
+PRECEU_PH(qbla, 24, 8);
+PRECEU_PH(qbra, 16, 0);
+
+#undef PRECEU_PH
+
+#if defined(TARGET_MIPS64)
+#define PRECEU_QH(name, a, b, c, d) \
+target_ulong helper_preceu_qh_##name(target_ulong rt) \
+{ \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ tempD = (rt >> a) & MIPSDSP_Q0; \
+ tempC = (rt >> b) & MIPSDSP_Q0; \
+ tempB = (rt >> c) & MIPSDSP_Q0; \
+ tempA = (rt >> d) & MIPSDSP_Q0; \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+PRECEU_QH(obl, 56, 48, 40, 32);
+PRECEU_QH(obr, 24, 16, 8, 0);
+PRECEU_QH(obla, 56, 40, 24, 8);
+PRECEU_QH(obra, 48, 32, 16, 0);
+
+#undef PRECEU_QH
+
+#endif
+
+/** DSP GPR-Based Shift Sub-class insns **/
+#define SHIFT_QB(name, func) \
+target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt) \
+{ \
+ uint8_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x07; \
+ \
+ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa); \
+ rt2 = mipsdsp_##func(rt2, sa); \
+ rt1 = mipsdsp_##func(rt1, sa); \
+ rt0 = mipsdsp_##func(rt0, sa); \
+ \
+ return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \
+}
+
+#define SHIFT_QB_ENV(name, func) \
+target_ulong helper_##name##_qb(target_ulong sa, target_ulong rt,\
+ CPUMIPSState *env) \
+{ \
+ uint8_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x07; \
+ \
+ MIPSDSP_SPLIT32_8(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa, env); \
+ rt2 = mipsdsp_##func(rt2, sa, env); \
+ rt1 = mipsdsp_##func(rt1, sa, env); \
+ rt0 = mipsdsp_##func(rt0, sa, env); \
+ \
+ return MIPSDSP_RETURN32_8(rt3, rt2, rt1, rt0); \
+}
+
+SHIFT_QB_ENV(shll, lshift8);
+SHIFT_QB(shrl, rshift_u8);
+
+SHIFT_QB(shra, rashift8);
+SHIFT_QB(shra_r, rnd8_rashift);
+
+#undef SHIFT_QB
+#undef SHIFT_QB_ENV
+
+#if defined(TARGET_MIPS64)
+#define SHIFT_OB(name, func) \
+target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa) \
+{ \
+ int i; \
+ uint8_t rt_t[8]; \
+ uint64_t temp; \
+ \
+ sa = sa & 0x07; \
+ temp = 0; \
+ \
+ for (i = 0; i < 8; i++) { \
+ rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \
+ rt_t[i] = mipsdsp_##func(rt_t[i], sa); \
+ temp |= (uint64_t)rt_t[i] << (8 * i); \
+ } \
+ \
+ return temp; \
+}
+
+#define SHIFT_OB_ENV(name, func) \
+target_ulong helper_##name##_ob(target_ulong rt, target_ulong sa, \
+ CPUMIPSState *env) \
+{ \
+ int i; \
+ uint8_t rt_t[8]; \
+ uint64_t temp; \
+ \
+ sa = sa & 0x07; \
+ temp = 0; \
+ \
+ for (i = 0; i < 8; i++) { \
+ rt_t[i] = (rt >> (8 * i)) & MIPSDSP_Q0; \
+ rt_t[i] = mipsdsp_##func(rt_t[i], sa, env); \
+ temp |= (uint64_t)rt_t[i] << (8 * i); \
+ } \
+ \
+ return temp; \
+}
+
+SHIFT_OB_ENV(shll, lshift8);
+SHIFT_OB(shrl, rshift_u8);
+
+SHIFT_OB(shra, rashift8);
+SHIFT_OB(shra_r, rnd8_rashift);
+
+#undef SHIFT_OB
+#undef SHIFT_OB_ENV
+
+#endif
+
+#define SHIFT_PH(name, func) \
+target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rth, rtl; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ rth = mipsdsp_##func(rth, sa, env); \
+ rtl = mipsdsp_##func(rtl, sa, env); \
+ \
+ return MIPSDSP_RETURN32_16(rth, rtl); \
+}
+
+SHIFT_PH(shll, lshift16);
+SHIFT_PH(shll_s, sat16_lshift);
+
+#undef SHIFT_PH
+
+#if defined(TARGET_MIPS64)
+#define SHIFT_QH(name, func) \
+target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa) \
+{ \
+ uint16_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa); \
+ rt2 = mipsdsp_##func(rt2, sa); \
+ rt1 = mipsdsp_##func(rt1, sa); \
+ rt0 = mipsdsp_##func(rt0, sa); \
+ \
+ return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \
+}
+
+#define SHIFT_QH_ENV(name, func) \
+target_ulong helper_##name##_qh(target_ulong rt, target_ulong sa, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rt3, rt2, rt1, rt0; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ rt3 = mipsdsp_##func(rt3, sa, env); \
+ rt2 = mipsdsp_##func(rt2, sa, env); \
+ rt1 = mipsdsp_##func(rt1, sa, env); \
+ rt0 = mipsdsp_##func(rt0, sa, env); \
+ \
+ return MIPSDSP_RETURN64_16(rt3, rt2, rt1, rt0); \
+}
+
+SHIFT_QH_ENV(shll, lshift16);
+SHIFT_QH_ENV(shll_s, sat16_lshift);
+
+SHIFT_QH(shrl, rshift_u16);
+SHIFT_QH(shra, rashift16);
+SHIFT_QH(shra_r, rnd16_rashift);
+
+#undef SHIFT_QH
+#undef SHIFT_QH_ENV
+
+#endif
+
+#define SHIFT_W(name, func) \
+target_ulong helper_##name##_w(target_ulong sa, target_ulong rt) \
+{ \
+ uint32_t temp; \
+ \
+ sa = sa & 0x1F; \
+ temp = mipsdsp_##func(rt, sa); \
+ \
+ return (target_long)(int32_t)temp; \
+}
+
+#define SHIFT_W_ENV(name, func) \
+target_ulong helper_##name##_w(target_ulong sa, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint32_t temp; \
+ \
+ sa = sa & 0x1F; \
+ temp = mipsdsp_##func(rt, sa, env); \
+ \
+ return (target_long)(int32_t)temp; \
+}
+
+SHIFT_W_ENV(shll_s, sat32_lshift);
+SHIFT_W(shra_r, rnd32_rashift);
+
+#undef SHIFT_W
+#undef SHIFT_W_ENV
+
+#if defined(TARGET_MIPS64)
+#define SHIFT_PW(name, func) \
+target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa) \
+{ \
+ uint32_t rt1, rt0; \
+ \
+ sa = sa & 0x1F; \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ rt1 = mipsdsp_##func(rt1, sa); \
+ rt0 = mipsdsp_##func(rt0, sa); \
+ \
+ return MIPSDSP_RETURN64_32(rt1, rt0); \
+}
+
+#define SHIFT_PW_ENV(name, func) \
+target_ulong helper_##name##_pw(target_ulong rt, target_ulong sa, \
+ CPUMIPSState *env) \
+{ \
+ uint32_t rt1, rt0; \
+ \
+ sa = sa & 0x1F; \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ rt1 = mipsdsp_##func(rt1, sa, env); \
+ rt0 = mipsdsp_##func(rt0, sa, env); \
+ \
+ return MIPSDSP_RETURN64_32(rt1, rt0); \
+}
+
+SHIFT_PW_ENV(shll, lshift32);
+SHIFT_PW_ENV(shll_s, sat32_lshift);
+
+SHIFT_PW(shra, rashift32);
+SHIFT_PW(shra_r, rnd32_rashift);
+
+#undef SHIFT_PW
+#undef SHIFT_PW_ENV
+
+#endif
+
+#define SHIFT_PH(name, func) \
+target_ulong helper_##name##_ph(target_ulong sa, target_ulong rt) \
+{ \
+ uint16_t rth, rtl; \
+ \
+ sa = sa & 0x0F; \
+ \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ rth = mipsdsp_##func(rth, sa); \
+ rtl = mipsdsp_##func(rtl, sa); \
+ \
+ return MIPSDSP_RETURN32_16(rth, rtl); \
+}
+
+SHIFT_PH(shrl, rshift_u16);
+SHIFT_PH(shra, rashift16);
+SHIFT_PH(shra_r, rnd16_rashift);
+
+#undef SHIFT_PH
+
+/** DSP Multiply Sub-class insns **/
+/* Return value made up by two 16bits value.
+ * FIXME give the macro a better name.
+ */
+#define MUL_RETURN32_16_PH(name, func, \
+ rsmov1, rsmov2, rsfilter, \
+ rtmov1, rtmov2, rtfilter) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rsB, rsA, rtB, rtA; \
+ \
+ rsB = (rs >> rsmov1) & rsfilter; \
+ rsA = (rs >> rsmov2) & rsfilter; \
+ rtB = (rt >> rtmov1) & rtfilter; \
+ rtA = (rt >> rtmov2) & rtfilter; \
+ \
+ rsB = mipsdsp_##func(rsB, rtB, env); \
+ rsA = mipsdsp_##func(rsA, rtA, env); \
+ \
+ return MIPSDSP_RETURN32_16(rsB, rsA); \
+}
+
+MUL_RETURN32_16_PH(muleu_s_ph_qbl, mul_u8_u16, \
+ 24, 16, MIPSDSP_Q0, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(muleu_s_ph_qbr, mul_u8_u16, \
+ 8, 0, MIPSDSP_Q0, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mulq_rs_ph, rndq15_mul_q15_q15, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mul_ph, mul_i16_i16, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mul_s_ph, sat16_mul_i16_i16, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+MUL_RETURN32_16_PH(mulq_s_ph, sat16_mul_q15_q15, \
+ 16, 0, MIPSDSP_LO, \
+ 16, 0, MIPSDSP_LO);
+
+#undef MUL_RETURN32_16_PH
+
+#define MUL_RETURN32_32_ph(name, func, movbits) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rth; \
+ int32_t temp; \
+ \
+ rsh = (rs >> movbits) & MIPSDSP_LO; \
+ rth = (rt >> movbits) & MIPSDSP_LO; \
+ temp = mipsdsp_##func(rsh, rth, env); \
+ \
+ return (target_long)(int32_t)temp; \
+}
+
+MUL_RETURN32_32_ph(muleq_s_w_phl, mul_q15_q15_overflowflag21, 16);
+MUL_RETURN32_32_ph(muleq_s_w_phr, mul_q15_q15_overflowflag21, 0);
+
+#undef MUL_RETURN32_32_ph
+
+#define MUL_VOID_PH(name, use_ac_env) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rsl, rth, rtl; \
+ int32_t tempB, tempA; \
+ int64_t acc, dotp; \
+ \
+ MIPSDSP_SPLIT32_16(rs, rsh, rsl); \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ if (use_ac_env == 1) { \
+ tempB = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rsl, rtl, env); \
+ } else { \
+ tempB = mipsdsp_mul_u16_u16(rsh, rth); \
+ tempA = mipsdsp_mul_u16_u16(rsl, rtl); \
+ } \
+ \
+ dotp = (int64_t)tempB - (int64_t)tempA; \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ dotp = dotp + acc; \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((dotp & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)(dotp & MIPSDSP_LLO); \
+}
+
+MUL_VOID_PH(mulsaq_s_w_ph, 1);
+MUL_VOID_PH(mulsa_w_ph, 0);
+
+#undef MUL_VOID_PH
+
+#if defined(TARGET_MIPS64)
+#define MUL_RETURN64_16_QH(name, func, \
+ rsmov1, rsmov2, rsmov3, rsmov4, rsfilter, \
+ rtmov1, rtmov2, rtmov3, rtmov4, rtfilter) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rs3, rs2, rs1, rs0; \
+ uint16_t rt3, rt2, rt1, rt0; \
+ uint16_t tempD, tempC, tempB, tempA; \
+ \
+ rs3 = (rs >> rsmov1) & rsfilter; \
+ rs2 = (rs >> rsmov2) & rsfilter; \
+ rs1 = (rs >> rsmov3) & rsfilter; \
+ rs0 = (rs >> rsmov4) & rsfilter; \
+ rt3 = (rt >> rtmov1) & rtfilter; \
+ rt2 = (rt >> rtmov2) & rtfilter; \
+ rt1 = (rt >> rtmov3) & rtfilter; \
+ rt0 = (rt >> rtmov4) & rtfilter; \
+ \
+ tempD = mipsdsp_##func(rs3, rt3, env); \
+ tempC = mipsdsp_##func(rs2, rt2, env); \
+ tempB = mipsdsp_##func(rs1, rt1, env); \
+ tempA = mipsdsp_##func(rs0, rt0, env); \
+ \
+ return MIPSDSP_RETURN64_16(tempD, tempC, tempB, tempA); \
+}
+
+MUL_RETURN64_16_QH(muleu_s_qh_obl, mul_u8_u16, \
+ 56, 48, 40, 32, MIPSDSP_Q0, \
+ 48, 32, 16, 0, MIPSDSP_LO);
+MUL_RETURN64_16_QH(muleu_s_qh_obr, mul_u8_u16, \
+ 24, 16, 8, 0, MIPSDSP_Q0, \
+ 48, 32, 16, 0, MIPSDSP_LO);
+MUL_RETURN64_16_QH(mulq_rs_qh, rndq15_mul_q15_q15, \
+ 48, 32, 16, 0, MIPSDSP_LO, \
+ 48, 32, 16, 0, MIPSDSP_LO);
+
+#undef MUL_RETURN64_16_QH
+
+#define MUL_RETURN64_32_QH(name, \
+ rsmov1, rsmov2, \
+ rtmov1, rtmov2) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint16_t rsB, rsA; \
+ uint16_t rtB, rtA; \
+ uint32_t tempB, tempA; \
+ \
+ rsB = (rs >> rsmov1) & MIPSDSP_LO; \
+ rsA = (rs >> rsmov2) & MIPSDSP_LO; \
+ rtB = (rt >> rtmov1) & MIPSDSP_LO; \
+ rtA = (rt >> rtmov2) & MIPSDSP_LO; \
+ \
+ tempB = mipsdsp_mul_q15_q15(5, rsB, rtB, env); \
+ tempA = mipsdsp_mul_q15_q15(5, rsA, rtA, env); \
+ \
+ return ((uint64_t)tempB << 32) | (uint64_t)tempA; \
+}
+
+MUL_RETURN64_32_QH(muleq_s_pw_qhl, 48, 32, 48, 32);
+MUL_RETURN64_32_QH(muleq_s_pw_qhr, 16, 0, 16, 0);
+
+#undef MUL_RETURN64_32_QH
+
+void helper_mulsaq_s_w_qh(target_ulong rs, target_ulong rt, uint32_t ac,
+ CPUMIPSState *env)
+{
+ int16_t rs3, rs2, rs1, rs0;
+ int16_t rt3, rt2, rt1, rt0;
+ int32_t tempD, tempC, tempB, tempA;
+ int64_t acc[2];
+ int64_t temp[2];
+ int64_t temp_sum;
+
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0);
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0);
+
+ tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env);
+ tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env);
+ tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env);
+ tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env);
+
+ temp[0] = ((int32_t)tempD - (int32_t)tempC) +
+ ((int32_t)tempB - (int32_t)tempA);
+ temp[0] = (int64_t)(temp[0] << 30) >> 30;
+ if (((temp[0] >> 33) & 0x01) == 0) {
+ temp[1] = 0x00;
+ } else {
+ temp[1] = ~0ull;
+ }
+
+ acc[0] = env->active_tc.LO[ac];
+ acc[1] = env->active_tc.HI[ac];
+
+ temp_sum = acc[0] + temp[0];
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) &&
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) {
+ acc[1] += 1;
+ }
+ acc[0] = temp_sum;
+ acc[1] += temp[1];
+
+ env->active_tc.HI[ac] = acc[1];
+ env->active_tc.LO[ac] = acc[0];
+}
+#endif
+
+#define DP_QB(name, func, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint8_t rs3, rs2; \
+ uint8_t rt3, rt2; \
+ uint16_t tempB, tempA; \
+ uint64_t tempC, dotp; \
+ \
+ rs3 = (rs >> rsmov1) & MIPSDSP_Q0; \
+ rs2 = (rs >> rsmov2) & MIPSDSP_Q0; \
+ rt3 = (rt >> rtmov1) & MIPSDSP_Q0; \
+ rt2 = (rt >> rtmov2) & MIPSDSP_Q0; \
+ tempB = mipsdsp_##func(rs3, rt3); \
+ tempA = mipsdsp_##func(rs2, rt2); \
+ dotp = (int64_t)tempB + (int64_t)tempA; \
+ if (is_add) { \
+ tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \
+ + dotp; \
+ } else { \
+ tempC = (((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO)) \
+ - dotp; \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((tempC & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)(tempC & MIPSDSP_LLO); \
+}
+
+DP_QB(dpau_h_qbl, mul_u8_u8, 1, 24, 16, 24, 16);
+DP_QB(dpau_h_qbr, mul_u8_u8, 1, 8, 0, 8, 0);
+DP_QB(dpsu_h_qbl, mul_u8_u8, 0, 24, 16, 24, 16);
+DP_QB(dpsu_h_qbr, mul_u8_u8, 0, 8, 0, 8, 0);
+
+#undef DP_QB
+
+#if defined(TARGET_MIPS64)
+#define DP_OB(name, add_sub, \
+ rsmov1, rsmov2, rsmov3, rsmov4, \
+ rtmov1, rtmov2, rtmov3, rtmov4) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ uint8_t rsD, rsC, rsB, rsA; \
+ uint8_t rtD, rtC, rtB, rtA; \
+ uint16_t tempD, tempC, tempB, tempA; \
+ uint64_t temp[2]; \
+ uint64_t acc[2]; \
+ uint64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ rsD = (rs >> rsmov1) & MIPSDSP_Q0; \
+ rsC = (rs >> rsmov2) & MIPSDSP_Q0; \
+ rsB = (rs >> rsmov3) & MIPSDSP_Q0; \
+ rsA = (rs >> rsmov4) & MIPSDSP_Q0; \
+ rtD = (rt >> rtmov1) & MIPSDSP_Q0; \
+ rtC = (rt >> rtmov2) & MIPSDSP_Q0; \
+ rtB = (rt >> rtmov3) & MIPSDSP_Q0; \
+ rtA = (rt >> rtmov4) & MIPSDSP_Q0; \
+ \
+ tempD = mipsdsp_mul_u8_u8(rsD, rtD); \
+ tempC = mipsdsp_mul_u8_u8(rsC, rtC); \
+ tempB = mipsdsp_mul_u8_u8(rsB, rtB); \
+ tempA = mipsdsp_mul_u8_u8(rsA, rtA); \
+ \
+ temp[0] = (uint64_t)tempD + (uint64_t)tempC + \
+ (uint64_t)tempB + (uint64_t)tempA; \
+ \
+ acc[0] = env->active_tc.LO[ac]; \
+ acc[1] = env->active_tc.HI[ac]; \
+ \
+ if (add_sub) { \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] + temp[1]; \
+ } else { \
+ temp_sum = acc[0] - temp[0]; \
+ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \
+ acc[1] -= 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] - temp[1]; \
+ } \
+ \
+ env->active_tc.HI[ac] = temp[1]; \
+ env->active_tc.LO[ac] = temp[0]; \
+}
+
+DP_OB(dpau_h_obl, 1, 56, 48, 40, 32, 56, 48, 40, 32);
+DP_OB(dpau_h_obr, 1, 24, 16, 8, 0, 24, 16, 8, 0);
+DP_OB(dpsu_h_obl, 0, 56, 48, 40, 32, 56, 48, 40, 32);
+DP_OB(dpsu_h_obr, 0, 24, 16, 8, 0, 24, 16, 8, 0);
+
+#undef DP_OB
+#endif
+
+#define DP_NOFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsB, rsA, rtB, rtA; \
+ int32_t tempA, tempB; \
+ int64_t acc; \
+ \
+ rsB = (rs >> rsmov1) & MIPSDSP_LO; \
+ rsA = (rs >> rsmov2) & MIPSDSP_LO; \
+ rtB = (rt >> rtmov1) & MIPSDSP_LO; \
+ rtA = (rt >> rtmov2) & MIPSDSP_LO; \
+ \
+ tempB = (int32_t)rsB * (int32_t)rtB; \
+ tempA = (int32_t)rsA * (int32_t)rtA; \
+ \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ \
+ if (is_add) { \
+ acc = acc + ((int64_t)tempB + (int64_t)tempA); \
+ } else { \
+ acc = acc - ((int64_t)tempB + (int64_t)tempA); \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t)((acc & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)(acc & MIPSDSP_LLO); \
+}
+
+DP_NOFUNC_PH(dpa_w_ph, 1, 16, 0, 16, 0);
+DP_NOFUNC_PH(dpax_w_ph, 1, 16, 0, 0, 16);
+DP_NOFUNC_PH(dps_w_ph, 0, 16, 0, 16, 0);
+DP_NOFUNC_PH(dpsx_w_ph, 0, 16, 0, 0, 16);
+#undef DP_NOFUNC_PH
+
+#define DP_HASFUNC_PH(name, is_add, rsmov1, rsmov2, rtmov1, rtmov2) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsB, rsA, rtB, rtA; \
+ int32_t tempB, tempA; \
+ int64_t acc, dotp; \
+ \
+ rsB = (rs >> rsmov1) & MIPSDSP_LO; \
+ rsA = (rs >> rsmov2) & MIPSDSP_LO; \
+ rtB = (rt >> rtmov1) & MIPSDSP_LO; \
+ rtA = (rt >> rtmov2) & MIPSDSP_LO; \
+ \
+ tempB = mipsdsp_mul_q15_q15(ac, rsB, rtB, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rsA, rtA, env); \
+ \
+ dotp = (int64_t)tempB + (int64_t)tempA; \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ \
+ if (is_add) { \
+ acc = acc + dotp; \
+ } else { \
+ acc = acc - dotp; \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((acc & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (acc & MIPSDSP_LLO); \
+}
+
+DP_HASFUNC_PH(dpaq_s_w_ph, 1, 16, 0, 16, 0);
+DP_HASFUNC_PH(dpaqx_s_w_ph, 1, 16, 0, 0, 16);
+DP_HASFUNC_PH(dpsq_s_w_ph, 0, 16, 0, 16, 0);
+DP_HASFUNC_PH(dpsqx_s_w_ph, 0, 16, 0, 0, 16);
+
+#undef DP_HASFUNC_PH
+
+#define DP_128OPERATION_PH(name, is_add) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rsl, rth, rtl; \
+ int32_t tempB, tempA, tempC62_31, tempC63; \
+ int64_t acc, dotp, tempC; \
+ \
+ MIPSDSP_SPLIT32_16(rs, rsh, rsl); \
+ MIPSDSP_SPLIT32_16(rt, rth, rtl); \
+ \
+ tempB = mipsdsp_mul_q15_q15(ac, rsh, rtl, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rsl, rth, env); \
+ \
+ dotp = (int64_t)tempB + (int64_t)tempA; \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ if (is_add) { \
+ tempC = acc + dotp; \
+ } else { \
+ tempC = acc - dotp; \
+ } \
+ tempC63 = (tempC >> 63) & 0x01; \
+ tempC62_31 = (tempC >> 31) & 0xFFFFFFFF; \
+ \
+ if ((tempC63 == 0) && (tempC62_31 != 0x00000000)) { \
+ tempC = 0x7FFFFFFF; \
+ set_DSPControl_overflow_flag(1, 16 + ac, env); \
+ } \
+ \
+ if ((tempC63 == 1) && (tempC62_31 != 0xFFFFFFFF)) { \
+ tempC = (int64_t)(int32_t)0x80000000; \
+ set_DSPControl_overflow_flag(1, 16 + ac, env); \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((tempC & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (tempC & MIPSDSP_LLO); \
+}
+
+DP_128OPERATION_PH(dpaqx_sa_w_ph, 1);
+DP_128OPERATION_PH(dpsqx_sa_w_ph, 0);
+
+#undef DP_128OPERATION_HP
+
+#if defined(TARGET_MIPS64)
+#define DP_QH(name, is_add, use_ac_env) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs3, rs2, rs1, rs0; \
+ int32_t rt3, rt2, rt1, rt0; \
+ int32_t tempD, tempC, tempB, tempA; \
+ int64_t acc[2]; \
+ int64_t temp[2]; \
+ int64_t temp_sum; \
+ \
+ MIPSDSP_SPLIT64_16(rs, rs3, rs2, rs1, rs0); \
+ MIPSDSP_SPLIT64_16(rt, rt3, rt2, rt1, rt0); \
+ \
+ if (use_ac_env) { \
+ tempD = mipsdsp_mul_q15_q15(ac, rs3, rt3, env); \
+ tempC = mipsdsp_mul_q15_q15(ac, rs2, rt2, env); \
+ tempB = mipsdsp_mul_q15_q15(ac, rs1, rt1, env); \
+ tempA = mipsdsp_mul_q15_q15(ac, rs0, rt0, env); \
+ } else { \
+ tempD = mipsdsp_mul_u16_u16(rs3, rt3); \
+ tempC = mipsdsp_mul_u16_u16(rs2, rt2); \
+ tempB = mipsdsp_mul_u16_u16(rs1, rt1); \
+ tempA = mipsdsp_mul_u16_u16(rs0, rt0); \
+ } \
+ \
+ temp[0] = (int64_t)tempD + (int64_t)tempC + \
+ (int64_t)tempB + (int64_t)tempA; \
+ \
+ if (temp[0] >= 0) { \
+ temp[1] = 0; \
+ } else { \
+ temp[1] = ~0ull; \
+ } \
+ \
+ acc[1] = env->active_tc.HI[ac]; \
+ acc[0] = env->active_tc.LO[ac]; \
+ \
+ if (is_add) { \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] = acc[1] + 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] + temp[1]; \
+ } else { \
+ temp_sum = acc[0] - temp[0]; \
+ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \
+ acc[1] = acc[1] - 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] - temp[1]; \
+ } \
+ \
+ env->active_tc.HI[ac] = temp[1]; \
+ env->active_tc.LO[ac] = temp[0]; \
+}
+
+DP_QH(dpa_w_qh, 1, 0);
+DP_QH(dpaq_s_w_qh, 1, 1);
+DP_QH(dps_w_qh, 0, 0);
+DP_QH(dpsq_s_w_qh, 0, 1);
+
+#undef DP_QH
+
+#endif
+
+#define DP_L_W(name, is_add) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int32_t temp63; \
+ int64_t dotp, acc; \
+ uint64_t temp; \
+ bool overflow; \
+ \
+ dotp = mipsdsp_mul_q31_q31(ac, rs, rt, env); \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ if (is_add) { \
+ temp = acc + dotp; \
+ overflow = MIPSDSP_OVERFLOW_ADD((uint64_t)acc, (uint64_t)dotp, \
+ temp, (0x01ull << 63)); \
+ } else { \
+ temp = acc - dotp; \
+ overflow = MIPSDSP_OVERFLOW_SUB((uint64_t)acc, (uint64_t)dotp, \
+ temp, (0x01ull << 63)); \
+ } \
+ \
+ if (overflow) { \
+ temp63 = (temp >> 63) & 0x01; \
+ if (temp63 == 1) { \
+ temp = (0x01ull << 63) - 1; \
+ } else { \
+ temp = 0x01ull << 63; \
+ } \
+ \
+ set_DSPControl_overflow_flag(1, 16 + ac, env); \
+ } \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((temp & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (temp & MIPSDSP_LLO); \
+}
+
+DP_L_W(dpaq_sa_l_w, 1);
+DP_L_W(dpsq_sa_l_w, 0);
+
+#undef DP_L_W
+
+#if defined(TARGET_MIPS64)
+#define DP_L_PW(name, func) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs1, rs0; \
+ int32_t rt1, rt0; \
+ int64_t tempB[2], tempA[2]; \
+ int64_t temp[2]; \
+ int64_t acc[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ MIPSDSP_SPLIT64_32(rs, rs1, rs0); \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env); \
+ tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env); \
+ \
+ if (tempB[0] >= 0) { \
+ tempB[1] = 0x00; \
+ } else { \
+ tempB[1] = ~0ull; \
+ } \
+ \
+ if (tempA[0] >= 0) { \
+ tempA[1] = 0x00; \
+ } else { \
+ tempA[1] = ~0ull; \
+ } \
+ \
+ temp_sum = tempB[0] + tempA[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)tempB[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)tempA[0])) { \
+ temp[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] += tempB[1] + tempA[1]; \
+ \
+ mipsdsp_##func(acc, ac, temp, env); \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+DP_L_PW(dpaq_sa_l_pw, sat64_acc_add_q63);
+DP_L_PW(dpsq_sa_l_pw, sat64_acc_sub_q63);
+
+#undef DP_L_PW
+
+void helper_mulsaq_s_l_pw(target_ulong rs, target_ulong rt, uint32_t ac,
+ CPUMIPSState *env)
+{
+ int32_t rs1, rs0;
+ int32_t rt1, rt0;
+ int64_t tempB[2], tempA[2];
+ int64_t temp[2];
+ int64_t acc[2];
+ int64_t temp_sum;
+
+ rs1 = (rs >> 32) & MIPSDSP_LLO;
+ rs0 = rs & MIPSDSP_LLO;
+ rt1 = (rt >> 32) & MIPSDSP_LLO;
+ rt0 = rt & MIPSDSP_LLO;
+
+ tempB[0] = mipsdsp_mul_q31_q31(ac, rs1, rt1, env);
+ tempA[0] = mipsdsp_mul_q31_q31(ac, rs0, rt0, env);
+
+ if (tempB[0] >= 0) {
+ tempB[1] = 0x00;
+ } else {
+ tempB[1] = ~0ull;
+ }
+
+ if (tempA[0] >= 0) {
+ tempA[1] = 0x00;
+ } else {
+ tempA[1] = ~0ull;
+ }
+
+ acc[0] = env->active_tc.LO[ac];
+ acc[1] = env->active_tc.HI[ac];
+
+ temp_sum = tempB[0] - tempA[0];
+ if ((uint64_t)temp_sum > (uint64_t)tempB[0]) {
+ tempB[1] -= 1;
+ }
+ temp[0] = temp_sum;
+ temp[1] = tempB[1] - tempA[1];
+
+ if ((temp[1] & 0x01) == 0) {
+ temp[1] = 0x00;
+ } else {
+ temp[1] = ~0ull;
+ }
+
+ temp_sum = acc[0] + temp[0];
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) &&
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) {
+ acc[1] += 1;
+ }
+ acc[0] = temp_sum;
+ acc[1] += temp[1];
+
+ env->active_tc.HI[ac] = acc[1];
+ env->active_tc.LO[ac] = acc[0];
+}
+#endif
+
+#define MAQ_S_W(name, mov) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rth; \
+ int32_t tempA; \
+ int64_t tempL, acc; \
+ \
+ rsh = (rs >> mov) & MIPSDSP_LO; \
+ rth = (rt >> mov) & MIPSDSP_LO; \
+ tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) | \
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO); \
+ tempL = (int64_t)tempA + acc; \
+ env->active_tc.HI[ac] = (target_long)(int32_t) \
+ ((tempL & MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t) \
+ (tempL & MIPSDSP_LLO); \
+}
+
+MAQ_S_W(maq_s_w_phl, 16);
+MAQ_S_W(maq_s_w_phr, 0);
+
+#undef MAQ_S_W
+
+#define MAQ_SA_W(name, mov) \
+void helper_##name(uint32_t ac, target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rsh, rth; \
+ int32_t tempA; \
+ \
+ rsh = (rs >> mov) & MIPSDSP_LO; \
+ rth = (rt >> mov) & MIPSDSP_LO; \
+ tempA = mipsdsp_mul_q15_q15(ac, rsh, rth, env); \
+ tempA = mipsdsp_sat32_acc_q31(ac, tempA, env); \
+ \
+ env->active_tc.HI[ac] = (target_long)(int32_t)(((int64_t)tempA & \
+ MIPSDSP_LHI) >> 32); \
+ env->active_tc.LO[ac] = (target_long)(int32_t)((int64_t)tempA & \
+ MIPSDSP_LLO); \
+}
+
+MAQ_SA_W(maq_sa_w_phl, 16);
+MAQ_SA_W(maq_sa_w_phr, 0);
+
+#undef MAQ_SA_W
+
+#define MULQ_W(name, addvar) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs_t, rt_t; \
+ int32_t tempI; \
+ int64_t tempL; \
+ \
+ rs_t = rs & MIPSDSP_LLO; \
+ rt_t = rt & MIPSDSP_LLO; \
+ \
+ if ((rs_t == 0x80000000) && (rt_t == 0x80000000)) { \
+ tempL = 0x7FFFFFFF00000000ull; \
+ set_DSPControl_overflow_flag(1, 21, env); \
+ } else { \
+ tempL = ((int64_t)rs_t * (int64_t)rt_t) << 1; \
+ tempL += addvar; \
+ } \
+ tempI = (tempL & MIPSDSP_LHI) >> 32; \
+ \
+ return (target_long)(int32_t)tempI; \
+}
+
+MULQ_W(mulq_s_w, 0);
+MULQ_W(mulq_rs_w, 0x80000000ull);
+
+#undef MULQ_W
+
+#if defined(TARGET_MIPS64)
+
+#define MAQ_S_W_QH(name, mov) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rs_t, rt_t; \
+ int32_t temp_mul; \
+ int64_t temp[2]; \
+ int64_t acc[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ rs_t = (rs >> mov) & MIPSDSP_LO; \
+ rt_t = (rt >> mov) & MIPSDSP_LO; \
+ temp_mul = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \
+ \
+ temp[0] = (int64_t)temp_mul; \
+ if (temp[0] >= 0) { \
+ temp[1] = 0x00; \
+ } else { \
+ temp[1] = ~0ull; \
+ } \
+ \
+ acc[0] = env->active_tc.LO[ac]; \
+ acc[1] = env->active_tc.HI[ac]; \
+ \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ acc[0] = temp_sum; \
+ acc[1] += temp[1]; \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+MAQ_S_W_QH(maq_s_w_qhll, 48);
+MAQ_S_W_QH(maq_s_w_qhlr, 32);
+MAQ_S_W_QH(maq_s_w_qhrl, 16);
+MAQ_S_W_QH(maq_s_w_qhrr, 0);
+
+#undef MAQ_S_W_QH
+
+#define MAQ_SA_W(name, mov) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int16_t rs_t, rt_t; \
+ int32_t temp; \
+ int64_t acc[2]; \
+ \
+ rs_t = (rs >> mov) & MIPSDSP_LO; \
+ rt_t = (rt >> mov) & MIPSDSP_LO; \
+ temp = mipsdsp_mul_q15_q15(ac, rs_t, rt_t, env); \
+ temp = mipsdsp_sat32_acc_q31(ac, temp, env); \
+ \
+ acc[0] = (int64_t)(int32_t)temp; \
+ if (acc[0] >= 0) { \
+ acc[1] = 0x00; \
+ } else { \
+ acc[1] = ~0ull; \
+ } \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+MAQ_SA_W(maq_sa_w_qhll, 48);
+MAQ_SA_W(maq_sa_w_qhlr, 32);
+MAQ_SA_W(maq_sa_w_qhrl, 16);
+MAQ_SA_W(maq_sa_w_qhrr, 0);
+
+#undef MAQ_SA_W
+
+#define MAQ_S_L_PW(name, mov) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs_t, rt_t; \
+ int64_t temp[2]; \
+ int64_t acc[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0; \
+ temp[1] = 0; \
+ \
+ rs_t = (rs >> mov) & MIPSDSP_LLO; \
+ rt_t = (rt >> mov) & MIPSDSP_LLO; \
+ \
+ temp[0] = mipsdsp_mul_q31_q31(ac, rs_t, rt_t, env); \
+ if (temp[0] >= 0) { \
+ temp[1] = 0x00; \
+ } else { \
+ temp[1] = ~0ull; \
+ } \
+ \
+ acc[0] = env->active_tc.LO[ac]; \
+ acc[1] = env->active_tc.HI[ac]; \
+ \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ acc[0] = temp_sum; \
+ acc[1] += temp[1]; \
+ \
+ env->active_tc.HI[ac] = acc[1]; \
+ env->active_tc.LO[ac] = acc[0]; \
+}
+
+MAQ_S_L_PW(maq_s_l_pwl, 32);
+MAQ_S_L_PW(maq_s_l_pwr, 0);
+
+#undef MAQ_S_L_PW
+
+#define DM_OPERATE(name, func, is_add, sigext) \
+void helper_##name(target_ulong rs, target_ulong rt, uint32_t ac, \
+ CPUMIPSState *env) \
+{ \
+ int32_t rs1, rs0; \
+ int32_t rt1, rt0; \
+ int64_t tempBL[2], tempAL[2]; \
+ int64_t acc[2]; \
+ int64_t temp[2]; \
+ int64_t temp_sum; \
+ \
+ temp[0] = 0x00; \
+ temp[1] = 0x00; \
+ \
+ MIPSDSP_SPLIT64_32(rs, rs1, rs0); \
+ MIPSDSP_SPLIT64_32(rt, rt1, rt0); \
+ \
+ if (sigext) { \
+ tempBL[0] = (int64_t)mipsdsp_##func(rs1, rt1); \
+ tempAL[0] = (int64_t)mipsdsp_##func(rs0, rt0); \
+ \
+ if (tempBL[0] >= 0) { \
+ tempBL[1] = 0x0; \
+ } else { \
+ tempBL[1] = ~0ull; \
+ } \
+ \
+ if (tempAL[0] >= 0) { \
+ tempAL[1] = 0x0; \
+ } else { \
+ tempAL[1] = ~0ull; \
+ } \
+ } else { \
+ tempBL[0] = mipsdsp_##func(rs1, rt1); \
+ tempAL[0] = mipsdsp_##func(rs0, rt0); \
+ tempBL[1] = 0; \
+ tempAL[1] = 0; \
+ } \
+ \
+ acc[1] = env->active_tc.HI[ac]; \
+ acc[0] = env->active_tc.LO[ac]; \
+ \
+ temp_sum = tempBL[0] + tempAL[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)tempBL[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)tempAL[0])) { \
+ temp[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] += tempBL[1] + tempAL[1]; \
+ \
+ if (is_add) { \
+ temp_sum = acc[0] + temp[0]; \
+ if (((uint64_t)temp_sum < (uint64_t)acc[0]) && \
+ ((uint64_t)temp_sum < (uint64_t)temp[0])) { \
+ acc[1] += 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] + temp[1]; \
+ } else { \
+ temp_sum = acc[0] - temp[0]; \
+ if ((uint64_t)temp_sum > (uint64_t)acc[0]) { \
+ acc[1] -= 1; \
+ } \
+ temp[0] = temp_sum; \
+ temp[1] = acc[1] - temp[1]; \
+ } \
+ \
+ env->active_tc.HI[ac] = temp[1]; \
+ env->active_tc.LO[ac] = temp[0]; \
+}
+
+DM_OPERATE(dmadd, mul_i32_i32, 1, 1);
+DM_OPERATE(dmaddu, mul_u32_u32, 1, 0);
+DM_OPERATE(dmsub, mul_i32_i32, 0, 1);
+DM_OPERATE(dmsubu, mul_u32_u32, 0, 0);
+#undef DM_OPERATE
+#endif
+
+/** DSP Bit/Manipulation Sub-class insns **/
+target_ulong helper_bitrev(target_ulong rt)
+{
+ int32_t temp;
+ uint32_t rd;
+ int i;
+
+ temp = rt & MIPSDSP_LO;
+ rd = 0;
+ for (i = 0; i < 16; i++) {
+ rd = (rd << 1) | (temp & 1);
+ temp = temp >> 1;
+ }
+
+ return (target_ulong)rd;
+}
+
+#define BIT_INSV(name, posfilter, ret_type) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong rs, \
+ target_ulong rt) \
+{ \
+ uint32_t pos, size, msb, lsb; \
+ uint32_t const sizefilter = 0x3F; \
+ target_ulong temp; \
+ target_ulong dspc; \
+ \
+ dspc = env->active_tc.DSPControl; \
+ \
+ pos = dspc & posfilter; \
+ size = (dspc >> 7) & sizefilter; \
+ \
+ msb = pos + size - 1; \
+ lsb = pos; \
+ \
+ if (lsb > msb || (msb > TARGET_LONG_BITS)) { \
+ return rt; \
+ } \
+ \
+ temp = deposit64(rt, pos, size, rs); \
+ \
+ return (target_long)(ret_type)temp; \
+}
+
+BIT_INSV(insv, 0x1F, int32_t);
+#ifdef TARGET_MIPS64
+BIT_INSV(dinsv, 0x7F, target_long);
+#endif
+
+#undef BIT_INSV
+
+
+/** DSP Compare-Pick Sub-class insns **/
+#define CMP_HAS_RET(name, func, split_num, filter, bit_size) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt) \
+{ \
+ uint32_t rs_t, rt_t; \
+ uint8_t cc; \
+ uint32_t temp = 0; \
+ int i; \
+ \
+ for (i = 0; i < split_num; i++) { \
+ rs_t = (rs >> (bit_size * i)) & filter; \
+ rt_t = (rt >> (bit_size * i)) & filter; \
+ cc = mipsdsp_##func(rs_t, rt_t); \
+ temp |= cc << i; \
+ } \
+ \
+ return (target_ulong)temp; \
+}
+
+CMP_HAS_RET(cmpgu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8);
+
+#ifdef TARGET_MIPS64
+CMP_HAS_RET(cmpgu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8);
+CMP_HAS_RET(cmpgu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8);
+#endif
+
+#undef CMP_HAS_RET
+
+
+#define CMP_NO_RET(name, func, split_num, filter, bit_size) \
+void helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int##bit_size##_t rs_t, rt_t; \
+ int##bit_size##_t flag = 0; \
+ int##bit_size##_t cc; \
+ int i; \
+ \
+ for (i = 0; i < split_num; i++) { \
+ rs_t = (rs >> (bit_size * i)) & filter; \
+ rt_t = (rt >> (bit_size * i)) & filter; \
+ \
+ cc = mipsdsp_##func((int32_t)rs_t, (int32_t)rt_t); \
+ flag |= cc << i; \
+ } \
+ \
+ set_DSPControl_24(flag, split_num, env); \
+}
+
+CMP_NO_RET(cmpu_eq_qb, cmpu_eq, 4, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_lt_qb, cmpu_lt, 4, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_le_qb, cmpu_le, 4, MIPSDSP_Q0, 8);
+
+CMP_NO_RET(cmp_eq_ph, cmp_eq, 2, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_lt_ph, cmp_lt, 2, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_le_ph, cmp_le, 2, MIPSDSP_LO, 16);
+
+#ifdef TARGET_MIPS64
+CMP_NO_RET(cmpu_eq_ob, cmpu_eq, 8, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_lt_ob, cmpu_lt, 8, MIPSDSP_Q0, 8);
+CMP_NO_RET(cmpu_le_ob, cmpu_le, 8, MIPSDSP_Q0, 8);
+
+CMP_NO_RET(cmp_eq_qh, cmp_eq, 4, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_lt_qh, cmp_lt, 4, MIPSDSP_LO, 16);
+CMP_NO_RET(cmp_le_qh, cmp_le, 4, MIPSDSP_LO, 16);
+
+CMP_NO_RET(cmp_eq_pw, cmp_eq, 2, MIPSDSP_LLO, 32);
+CMP_NO_RET(cmp_lt_pw, cmp_lt, 2, MIPSDSP_LLO, 32);
+CMP_NO_RET(cmp_le_pw, cmp_le, 2, MIPSDSP_LLO, 32);
+#endif
+#undef CMP_NO_RET
+
+#if defined(TARGET_MIPS64)
+
+#define CMPGDU_OB(name) \
+target_ulong helper_cmpgdu_##name##_ob(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ int i; \
+ uint8_t rs_t, rt_t; \
+ uint32_t cond; \
+ \
+ cond = 0; \
+ \
+ for (i = 0; i < 8; i++) { \
+ rs_t = (rs >> (8 * i)) & MIPSDSP_Q0; \
+ rt_t = (rt >> (8 * i)) & MIPSDSP_Q0; \
+ \
+ if (mipsdsp_cmpu_##name(rs_t, rt_t)) { \
+ cond |= 0x01 << i; \
+ } \
+ } \
+ \
+ set_DSPControl_24(cond, 8, env); \
+ \
+ return (uint64_t)cond; \
+}
+
+CMPGDU_OB(eq)
+CMPGDU_OB(lt)
+CMPGDU_OB(le)
+#undef CMPGDU_OB
+#endif
+
+#define PICK_INSN(name, split_num, filter, bit_size, ret32bit) \
+target_ulong helper_##name(target_ulong rs, target_ulong rt, \
+ CPUMIPSState *env) \
+{ \
+ uint32_t rs_t, rt_t; \
+ uint32_t cc; \
+ target_ulong dsp; \
+ int i; \
+ target_ulong result = 0; \
+ \
+ dsp = env->active_tc.DSPControl; \
+ for (i = 0; i < split_num; i++) { \
+ rs_t = (rs >> (bit_size * i)) & filter; \
+ rt_t = (rt >> (bit_size * i)) & filter; \
+ cc = (dsp >> (24 + i)) & 0x01; \
+ cc = cc == 1 ? rs_t : rt_t; \
+ \
+ result |= (target_ulong)cc << (bit_size * i); \
+ } \
+ \
+ if (ret32bit) { \
+ result = (target_long)(int32_t)(result & MIPSDSP_LLO); \
+ } \
+ \
+ return result; \
+}
+
+PICK_INSN(pick_qb, 4, MIPSDSP_Q0, 8, 1);
+PICK_INSN(pick_ph, 2, MIPSDSP_LO, 16, 1);
+
+#ifdef TARGET_MIPS64
+PICK_INSN(pick_ob, 8, MIPSDSP_Q0, 8, 0);
+PICK_INSN(pick_qh, 4, MIPSDSP_LO, 16, 0);
+PICK_INSN(pick_pw, 2, MIPSDSP_LLO, 32, 0);
+#endif
+#undef PICK_INSN
+
+target_ulong helper_packrl_ph(target_ulong rs, target_ulong rt)
+{
+ uint32_t rsl, rth;
+
+ rsl = rs & MIPSDSP_LO;
+ rth = (rt & MIPSDSP_HI) >> 16;
+
+ return (target_long)(int32_t)((rsl << 16) | rth);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_packrl_pw(target_ulong rs, target_ulong rt)
+{
+ uint32_t rs0, rt1;
+
+ rs0 = rs & MIPSDSP_LLO;
+ rt1 = (rt >> 32) & MIPSDSP_LLO;
+
+ return ((uint64_t)rs0 << 32) | (uint64_t)rt1;
+}
+#endif
+
+/** DSP Accumulator and DSPControl Access Sub-class insns **/
+target_ulong helper_extr_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int32_t tempI;
+ int64_t tempDL[2];
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env);
+ if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ tempI = (tempDL[0] >> 1) & MIPSDSP_LLO;
+
+ tempDL[0] += 1;
+ if (tempDL[0] == 0) {
+ tempDL[1] += 1;
+ }
+
+ if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)tempI;
+}
+
+target_ulong helper_extr_r_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int64_t tempDL[2];
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env);
+ if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ tempDL[0] += 1;
+ if (tempDL[0] == 0) {
+ tempDL[1] += 1;
+ }
+
+ if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)(int32_t)(tempDL[0] >> 1);
+}
+
+target_ulong helper_extr_rs_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int32_t tempI, temp64;
+ int64_t tempDL[2];
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rndrashift_short_acc(tempDL, ac, shift, env);
+ if ((tempDL[1] != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ (tempDL[1] != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+ tempDL[0] += 1;
+ if (tempDL[0] == 0) {
+ tempDL[1] += 1;
+ }
+ tempI = tempDL[0] >> 1;
+
+ if (((tempDL[1] & 0x01) != 0 || (tempDL[0] & MIPSDSP_LHI) != 0) &&
+ ((tempDL[1] & 0x01) != 1 || (tempDL[0] & MIPSDSP_LHI) != MIPSDSP_LHI)) {
+ temp64 = tempDL[1] & 0x01;
+ if (temp64 == 0) {
+ tempI = 0x7FFFFFFF;
+ } else {
+ tempI = 0x80000000;
+ }
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)tempI;
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dextr_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+
+ shift = shift & 0x3F;
+
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ return (int64_t)(int32_t)(temp[0] >> 1);
+}
+
+target_ulong helper_dextr_r_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (int64_t)(int32_t)(temp[0] >> 1);
+}
+
+target_ulong helper_dextr_rs_w(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ if (temp128 == 0) {
+ temp[0] = 0x0FFFFFFFF;
+ } else {
+ temp[0] = 0x0100000000ULL;
+ }
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (int64_t)(int32_t)(temp[0] >> 1);
+}
+
+target_ulong helper_dextr_l(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ target_ulong result;
+
+ shift = shift & 0x3F;
+
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+ result = (temp[1] << 63) | (temp[0] >> 1);
+
+ return result;
+}
+
+target_ulong helper_dextr_r_l(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+ target_ulong result;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ result = (temp[1] << 63) | (temp[0] >> 1);
+
+ return result;
+}
+
+target_ulong helper_dextr_rs_l(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ uint64_t temp[3];
+ uint32_t temp128;
+ target_ulong result;
+
+ shift = shift & 0x3F;
+ mipsdsp_rndrashift_acc(temp, ac, shift, env);
+
+ temp[0] += 1;
+ if (temp[0] == 0) {
+ temp[1] += 1;
+ if (temp[1] == 0) {
+ temp[2] += 1;
+ }
+ }
+
+ temp128 = temp[2] & 0x01;
+
+ if ((temp128 != 0 || temp[1] != 0) &&
+ (temp128 != 1 || temp[1] != ~0ull)) {
+ if (temp128 == 0) {
+ temp[1] &= ~0x00ull - 1;
+ temp[0] |= ~0x00ull - 1;
+ } else {
+ temp[1] |= 0x01;
+ temp[0] &= 0x01;
+ }
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+ result = (temp[1] << 63) | (temp[0] >> 1);
+
+ return result;
+}
+#endif
+
+target_ulong helper_extr_s_h(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int64_t temp, acc;
+
+ shift = shift & 0x1F;
+
+ acc = ((int64_t)env->active_tc.HI[ac] << 32) |
+ ((int64_t)env->active_tc.LO[ac] & 0xFFFFFFFF);
+
+ temp = acc >> shift;
+
+ if (temp > (int64_t)0x7FFF) {
+ temp = 0x00007FFF;
+ set_DSPControl_overflow_flag(1, 23, env);
+ } else if (temp < (int64_t)0xFFFFFFFFFFFF8000ULL) {
+ temp = 0xFFFF8000;
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (target_long)(int32_t)(temp & 0xFFFFFFFF);
+}
+
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dextr_s_h(target_ulong ac, target_ulong shift,
+ CPUMIPSState *env)
+{
+ int64_t temp[2];
+ uint32_t temp127;
+
+ shift = shift & 0x1F;
+
+ mipsdsp_rashift_acc((uint64_t *)temp, ac, shift, env);
+
+ temp127 = (temp[1] >> 63) & 0x01;
+
+ if ((temp127 == 0) && (temp[1] > 0 || temp[0] > 32767)) {
+ temp[0] &= 0xFFFF0000;
+ temp[0] |= 0x00007FFF;
+ set_DSPControl_overflow_flag(1, 23, env);
+ } else if ((temp127 == 1) &&
+ (temp[1] < 0xFFFFFFFFFFFFFFFFll
+ || temp[0] < 0xFFFFFFFFFFFF1000ll)) {
+ temp[0] &= 0xFFFF0000;
+ temp[0] |= 0x00008000;
+ set_DSPControl_overflow_flag(1, 23, env);
+ }
+
+ return (int64_t)(int16_t)(temp[0] & MIPSDSP_LO);
+}
+
+#endif
+
+target_ulong helper_extp(target_ulong ac, target_ulong size, CPUMIPSState *env)
+{
+ int32_t start_pos;
+ int sub;
+ uint32_t temp;
+ uint64_t acc;
+
+ size = size & 0x1F;
+
+ temp = 0;
+ start_pos = get_DSPControl_pos(env);
+ sub = start_pos - (size + 1);
+ if (sub >= -1) {
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) |
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO);
+ temp = (acc >> (start_pos - size)) & (~0U >> (31 - size));
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return (target_ulong)temp;
+}
+
+target_ulong helper_extpdp(target_ulong ac, target_ulong size,
+ CPUMIPSState *env)
+{
+ int32_t start_pos;
+ int sub;
+ uint32_t temp;
+ uint64_t acc;
+
+ size = size & 0x1F;
+ temp = 0;
+ start_pos = get_DSPControl_pos(env);
+ sub = start_pos - (size + 1);
+ if (sub >= -1) {
+ acc = ((uint64_t)env->active_tc.HI[ac] << 32) |
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO);
+ temp = extract64(acc, start_pos - size, size + 1);
+
+ set_DSPControl_pos(sub, env);
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return (target_ulong)temp;
+}
+
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dextp(target_ulong ac, target_ulong size, CPUMIPSState *env)
+{
+ int start_pos;
+ int len;
+ int sub;
+ uint64_t tempB, tempA;
+ uint64_t temp;
+
+ temp = 0;
+
+ size = size & 0x3F;
+ start_pos = get_DSPControl_pos(env);
+ len = start_pos - size;
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+
+ sub = start_pos - (size + 1);
+
+ if (sub >= -1) {
+ temp = (tempB << (64 - len)) | (tempA >> len);
+ temp = temp & ((1ULL << (size + 1)) - 1);
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return temp;
+}
+
+target_ulong helper_dextpdp(target_ulong ac, target_ulong size,
+ CPUMIPSState *env)
+{
+ int start_pos;
+ int len;
+ int sub;
+ uint64_t tempB, tempA;
+ uint64_t temp;
+
+ temp = 0;
+ size = size & 0x3F;
+ start_pos = get_DSPControl_pos(env);
+ len = start_pos - size;
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+
+ sub = start_pos - (size + 1);
+
+ if (sub >= -1) {
+ temp = (tempB << (64 - len)) | (tempA >> len);
+ temp = temp & ((1ULL << (size + 1)) - 1);
+ set_DSPControl_pos(sub, env);
+ set_DSPControl_efi(0, env);
+ } else {
+ set_DSPControl_efi(1, env);
+ }
+
+ return temp;
+}
+
+#endif
+
+void helper_shilo(target_ulong ac, target_ulong rs, CPUMIPSState *env)
+{
+ int8_t rs5_0;
+ uint64_t temp, acc;
+
+ rs5_0 = rs & 0x3F;
+ rs5_0 = (int8_t)(rs5_0 << 2) >> 2;
+
+ if (unlikely(rs5_0 == 0)) {
+ return;
+ }
+
+ acc = (((uint64_t)env->active_tc.HI[ac] << 32) & MIPSDSP_LHI) |
+ ((uint64_t)env->active_tc.LO[ac] & MIPSDSP_LLO);
+
+ if (rs5_0 > 0) {
+ temp = acc >> rs5_0;
+ } else {
+ temp = acc << -rs5_0;
+ }
+
+ env->active_tc.HI[ac] = (target_ulong)(int32_t)((temp & MIPSDSP_LHI) >> 32);
+ env->active_tc.LO[ac] = (target_ulong)(int32_t)(temp & MIPSDSP_LLO);
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dshilo(target_ulong shift, target_ulong ac, CPUMIPSState *env)
+{
+ int8_t shift_t;
+ uint64_t tempB, tempA;
+
+ shift_t = (int8_t)(shift << 1) >> 1;
+
+ tempB = env->active_tc.HI[ac];
+ tempA = env->active_tc.LO[ac];
+
+ if (shift_t != 0) {
+ if (shift_t >= 0) {
+ tempA = (tempB << (64 - shift_t)) | (tempA >> shift_t);
+ tempB = tempB >> shift_t;
+ } else {
+ shift_t = -shift_t;
+ tempB = (tempB << shift_t) | (tempA >> (64 - shift_t));
+ tempA = tempA << shift_t;
+ }
+ }
+
+ env->active_tc.HI[ac] = tempB;
+ env->active_tc.LO[ac] = tempA;
+}
+
+#endif
+void helper_mthlip(target_ulong ac, target_ulong rs, CPUMIPSState *env)
+{
+ int32_t tempA, tempB, pos;
+
+ tempA = rs;
+ tempB = env->active_tc.LO[ac];
+ env->active_tc.HI[ac] = (target_long)tempB;
+ env->active_tc.LO[ac] = (target_long)tempA;
+ pos = get_DSPControl_pos(env);
+
+ if (pos > 32) {
+ return;
+ } else {
+ set_DSPControl_pos(pos + 32, env);
+ }
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dmthlip(target_ulong rs, target_ulong ac, CPUMIPSState *env)
+{
+ uint8_t ac_t;
+ uint8_t pos;
+ uint64_t tempB, tempA;
+
+ ac_t = ac & 0x3;
+
+ tempA = rs;
+ tempB = env->active_tc.LO[ac_t];
+
+ env->active_tc.HI[ac_t] = tempB;
+ env->active_tc.LO[ac_t] = tempA;
+
+ pos = get_DSPControl_pos(env);
+
+ if (pos <= 64) {
+ pos = pos + 64;
+ set_DSPControl_pos(pos, env);
+ }
+}
+#endif
+
+void cpu_wrdsp(uint32_t rs, uint32_t mask_num, CPUMIPSState *env)
+{
+ uint8_t mask[6];
+ uint8_t i;
+ uint32_t newbits, overwrite;
+ target_ulong dsp;
+
+ newbits = 0x00;
+ overwrite = 0xFFFFFFFF;
+ dsp = env->active_tc.DSPControl;
+
+ for (i = 0; i < 6; i++) {
+ mask[i] = (mask_num >> i) & 0x01;
+ }
+
+ if (mask[0] == 1) {
+#if defined(TARGET_MIPS64)
+ overwrite &= 0xFFFFFF80;
+ newbits &= 0xFFFFFF80;
+ newbits |= 0x0000007F & rs;
+#else
+ overwrite &= 0xFFFFFFC0;
+ newbits &= 0xFFFFFFC0;
+ newbits |= 0x0000003F & rs;
+#endif
+ }
+
+ if (mask[1] == 1) {
+ overwrite &= 0xFFFFE07F;
+ newbits &= 0xFFFFE07F;
+ newbits |= 0x00001F80 & rs;
+ }
+
+ if (mask[2] == 1) {
+ overwrite &= 0xFFFFDFFF;
+ newbits &= 0xFFFFDFFF;
+ newbits |= 0x00002000 & rs;
+ }
+
+ if (mask[3] == 1) {
+ overwrite &= 0xFF00FFFF;
+ newbits &= 0xFF00FFFF;
+ newbits |= 0x00FF0000 & rs;
+ }
+
+ if (mask[4] == 1) {
+ overwrite &= 0x00FFFFFF;
+ newbits &= 0x00FFFFFF;
+#if defined(TARGET_MIPS64)
+ newbits |= 0xFF000000 & rs;
+#else
+ newbits |= 0x0F000000 & rs;
+#endif
+ }
+
+ if (mask[5] == 1) {
+ overwrite &= 0xFFFFBFFF;
+ newbits &= 0xFFFFBFFF;
+ newbits |= 0x00004000 & rs;
+ }
+
+ dsp = dsp & overwrite;
+ dsp = dsp | newbits;
+ env->active_tc.DSPControl = dsp;
+}
+
+void helper_wrdsp(target_ulong rs, target_ulong mask_num, CPUMIPSState *env)
+{
+ cpu_wrdsp(rs, mask_num, env);
+}
+
+uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env)
+{
+ uint8_t mask[6];
+ uint32_t ruler, i;
+ target_ulong temp;
+ target_ulong dsp;
+
+ ruler = 0x01;
+ for (i = 0; i < 6; i++) {
+ mask[i] = (mask_num & ruler) >> i ;
+ ruler = ruler << 1;
+ }
+
+ temp = 0x00;
+ dsp = env->active_tc.DSPControl;
+
+ if (mask[0] == 1) {
+#if defined(TARGET_MIPS64)
+ temp |= dsp & 0x7F;
+#else
+ temp |= dsp & 0x3F;
+#endif
+ }
+
+ if (mask[1] == 1) {
+ temp |= dsp & 0x1F80;
+ }
+
+ if (mask[2] == 1) {
+ temp |= dsp & 0x2000;
+ }
+
+ if (mask[3] == 1) {
+ temp |= dsp & 0x00FF0000;
+ }
+
+ if (mask[4] == 1) {
+#if defined(TARGET_MIPS64)
+ temp |= dsp & 0xFF000000;
+#else
+ temp |= dsp & 0x0F000000;
+#endif
+ }
+
+ if (mask[5] == 1) {
+ temp |= dsp & 0x4000;
+ }
+
+ return temp;
+}
+
+target_ulong helper_rddsp(target_ulong mask_num, CPUMIPSState *env)
+{
+ return cpu_rddsp(mask_num, env);
+}
+
+
+#undef MIPSDSP_LHI
+#undef MIPSDSP_LLO
+#undef MIPSDSP_HI
+#undef MIPSDSP_LO
+#undef MIPSDSP_Q3
+#undef MIPSDSP_Q2
+#undef MIPSDSP_Q1
+#undef MIPSDSP_Q0
+
+#undef MIPSDSP_SPLIT32_8
+#undef MIPSDSP_SPLIT32_16
+
+#undef MIPSDSP_RETURN32_8
+#undef MIPSDSP_RETURN32_16
+
+#ifdef TARGET_MIPS64
+#undef MIPSDSP_SPLIT64_16
+#undef MIPSDSP_SPLIT64_32
+#undef MIPSDSP_RETURN64_16
+#undef MIPSDSP_RETURN64_32
+#endif
diff --git a/target/mips/gdbstub.c b/target/mips/gdbstub.c
new file mode 100644
index 0000000000..7c682289c2
--- /dev/null
+++ b/target/mips/gdbstub.c
@@ -0,0 +1,149 @@
+/*
+ * MIPS gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int mips_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_regl(mem_buf, env->active_tc.gpr[n]);
+ }
+ if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 72) {
+ switch (n) {
+ case 70:
+ return gdb_get_regl(mem_buf, (int32_t)env->active_fpu.fcr31);
+ case 71:
+ return gdb_get_regl(mem_buf, (int32_t)env->active_fpu.fcr0);
+ default:
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ return gdb_get_regl(mem_buf,
+ env->active_fpu.fpr[n - 38].d);
+ } else {
+ return gdb_get_regl(mem_buf,
+ env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
+ }
+ }
+ }
+ switch (n) {
+ case 32:
+ return gdb_get_regl(mem_buf, (int32_t)env->CP0_Status);
+ case 33:
+ return gdb_get_regl(mem_buf, env->active_tc.LO[0]);
+ case 34:
+ return gdb_get_regl(mem_buf, env->active_tc.HI[0]);
+ case 35:
+ return gdb_get_regl(mem_buf, env->CP0_BadVAddr);
+ case 36:
+ return gdb_get_regl(mem_buf, (int32_t)env->CP0_Cause);
+ case 37:
+ return gdb_get_regl(mem_buf, env->active_tc.PC |
+ !!(env->hflags & MIPS_HFLAG_M16));
+ case 72:
+ return gdb_get_regl(mem_buf, 0); /* fp */
+ case 89:
+ return gdb_get_regl(mem_buf, (int32_t)env->CP0_PRid);
+ default:
+ if (n > 89) {
+ return 0;
+ }
+ /* 16 embedded regs. */
+ return gdb_get_regl(mem_buf, 0);
+ }
+
+ return 0;
+}
+
+int mips_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ target_ulong tmp;
+
+ tmp = ldtul_p(mem_buf);
+
+ if (n < 32) {
+ env->active_tc.gpr[n] = tmp;
+ return sizeof(target_ulong);
+ }
+ if (env->CP0_Config1 & (1 << CP0C1_FP) && n >= 38 && n < 72) {
+ switch (n) {
+ case 70:
+ env->active_fpu.fcr31 = (tmp & env->active_fpu.fcr31_rw_bitmask) |
+ (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
+ restore_fp_status(env);
+ break;
+ case 71:
+ /* FIR is read-only. Ignore writes. */
+ break;
+ default:
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ env->active_fpu.fpr[n - 38].d = tmp;
+ } else {
+ env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
+ }
+ break;
+ }
+ return sizeof(target_ulong);
+ }
+ switch (n) {
+ case 32:
+#ifndef CONFIG_USER_ONLY
+ cpu_mips_store_status(env, tmp);
+#endif
+ break;
+ case 33:
+ env->active_tc.LO[0] = tmp;
+ break;
+ case 34:
+ env->active_tc.HI[0] = tmp;
+ break;
+ case 35:
+ env->CP0_BadVAddr = tmp;
+ break;
+ case 36:
+#ifndef CONFIG_USER_ONLY
+ cpu_mips_store_cause(env, tmp);
+#endif
+ break;
+ case 37:
+ env->active_tc.PC = tmp & ~(target_ulong)1;
+ if (tmp & 1) {
+ env->hflags |= MIPS_HFLAG_M16;
+ } else {
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ }
+ break;
+ case 72: /* fp, ignored */
+ break;
+ default:
+ if (n > 89) {
+ return 0;
+ }
+ /* Other registers are readonly. Ignore writes. */
+ break;
+ }
+
+ return sizeof(target_ulong);
+}
diff --git a/target/mips/helper.c b/target/mips/helper.c
new file mode 100644
index 0000000000..c864b15b97
--- /dev/null
+++ b/target/mips/helper.c
@@ -0,0 +1,969 @@
+/*
+ * MIPS emulation helpers for qemu.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+
+enum {
+ TLBRET_XI = -6,
+ TLBRET_RI = -5,
+ TLBRET_DIRTY = -4,
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+#if !defined(CONFIG_USER_ONLY)
+
+/* no MMU emulation */
+int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type)
+{
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE;
+ return TLBRET_MATCH;
+}
+
+/* fixed mapping MMU emulation */
+int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type)
+{
+ if (address <= (int32_t)0x7FFFFFFFUL) {
+ if (!(env->CP0_Status & (1 << CP0St_ERL)))
+ *physical = address + 0x40000000UL;
+ else
+ *physical = address;
+ } else if (address <= (int32_t)0xBFFFFFFFUL)
+ *physical = address & 0x1FFFFFFF;
+ else
+ *physical = address;
+
+ *prot = PAGE_READ | PAGE_WRITE;
+ return TLBRET_MATCH;
+}
+
+/* MIPS32/MIPS64 R4000-style MMU emulation */
+int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot,
+ target_ulong address, int rw, int access_type)
+{
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ int i;
+
+ for (i = 0; i < env->tlb->tlb_in_use; i++) {
+ r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ target_ulong tag = address & ~mask;
+ target_ulong VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+
+ /* Check ASID, virtual page number & size */
+ if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ int n = !!(address & mask & ~(mask >> 1));
+ /* Check access rights */
+ if (!(n ? tlb->V1 : tlb->V0)) {
+ return TLBRET_INVALID;
+ }
+ if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
+ return TLBRET_XI;
+ }
+ if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
+ return TLBRET_RI;
+ }
+ if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) {
+ *physical = tlb->PFN[n] | (address & (mask >> 1));
+ *prot = PAGE_READ;
+ if (n ? tlb->D1 : tlb->D0)
+ *prot |= PAGE_WRITE;
+ return TLBRET_MATCH;
+ }
+ return TLBRET_DIRTY;
+ }
+ }
+ return TLBRET_NOMATCH;
+}
+
+static int get_physical_address (CPUMIPSState *env, hwaddr *physical,
+ int *prot, target_ulong real_address,
+ int rw, int access_type)
+{
+ /* User mode can only access useg/xuseg */
+ int user_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM;
+ int supervisor_mode = (env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_SM;
+ int kernel_mode = !user_mode && !supervisor_mode;
+#if defined(TARGET_MIPS64)
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+#endif
+ int ret = TLBRET_MATCH;
+ /* effective address (modified for KVM T&E kernel segments) */
+ target_ulong address = real_address;
+
+#define USEG_LIMIT 0x7FFFFFFFUL
+#define KSEG0_BASE 0x80000000UL
+#define KSEG1_BASE 0xA0000000UL
+#define KSEG2_BASE 0xC0000000UL
+#define KSEG3_BASE 0xE0000000UL
+
+#define KVM_KSEG0_BASE 0x40000000UL
+#define KVM_KSEG2_BASE 0x60000000UL
+
+ if (kvm_enabled()) {
+ /* KVM T&E adds guest kernel segments in useg */
+ if (real_address >= KVM_KSEG0_BASE) {
+ if (real_address < KVM_KSEG2_BASE) {
+ /* kseg0 */
+ address += KSEG0_BASE - KVM_KSEG0_BASE;
+ } else if (real_address <= USEG_LIMIT) {
+ /* kseg2/3 */
+ address += KSEG2_BASE - KVM_KSEG2_BASE;
+ }
+ }
+ }
+
+ if (address <= USEG_LIMIT) {
+ /* useg */
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ *physical = address & 0xFFFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE;
+ } else {
+ ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
+ }
+#if defined(TARGET_MIPS64)
+ } else if (address < 0x4000000000000000ULL) {
+ /* xuseg */
+ if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0x8000000000000000ULL) {
+ /* xsseg */
+ if ((supervisor_mode || kernel_mode) &&
+ SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0xC000000000000000ULL) {
+ /* xkphys */
+ if (kernel_mode && KX &&
+ (address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) {
+ *physical = address & env->PAMask;
+ *prot = PAGE_READ | PAGE_WRITE;
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < 0xFFFFFFFF80000000ULL) {
+ /* xkseg */
+ if (kernel_mode && KX &&
+ address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) {
+ ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+#endif
+ } else if (address < (int32_t)KSEG1_BASE) {
+ /* kseg0 */
+ if (kernel_mode) {
+ *physical = address - (int32_t)KSEG0_BASE;
+ *prot = PAGE_READ | PAGE_WRITE;
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < (int32_t)KSEG2_BASE) {
+ /* kseg1 */
+ if (kernel_mode) {
+ *physical = address - (int32_t)KSEG1_BASE;
+ *prot = PAGE_READ | PAGE_WRITE;
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else if (address < (int32_t)KSEG3_BASE) {
+ /* sseg (kseg2) */
+ if (supervisor_mode || kernel_mode) {
+ ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ } else {
+ /* kseg3 */
+ /* XXX: debug segment is not emulated */
+ if (kernel_mode) {
+ ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type);
+ } else {
+ ret = TLBRET_BADADDR;
+ }
+ }
+ return ret;
+}
+
+void cpu_mips_tlb_flush(CPUMIPSState *env, int flush_global)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
+ /* Flush qemu's TLB and discard all shadowed entries. */
+ tlb_flush(CPU(cpu), flush_global);
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+}
+
+/* Called for updates to CP0_Status. */
+void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
+{
+ int32_t tcstatus, *tcst;
+ uint32_t v = cpu->CP0_Status;
+ uint32_t cu, mx, asid, ksu;
+ uint32_t mask = ((1 << CP0TCSt_TCU3)
+ | (1 << CP0TCSt_TCU2)
+ | (1 << CP0TCSt_TCU1)
+ | (1 << CP0TCSt_TCU0)
+ | (1 << CP0TCSt_TMX)
+ | (3 << CP0TCSt_TKSU)
+ | (0xff << CP0TCSt_TASID));
+
+ cu = (v >> CP0St_CU0) & 0xf;
+ mx = (v >> CP0St_MX) & 0x1;
+ ksu = (v >> CP0St_KSU) & 0x3;
+ asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+
+ tcstatus = cu << CP0TCSt_TCU0;
+ tcstatus |= mx << CP0TCSt_TMX;
+ tcstatus |= ksu << CP0TCSt_TKSU;
+ tcstatus |= asid;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~mask;
+ *tcst |= tcstatus;
+ compute_hflags(cpu);
+}
+
+void cpu_mips_store_status(CPUMIPSState *env, target_ulong val)
+{
+ uint32_t mask = env->CP0_Status_rw_bitmask;
+ target_ulong old = env->CP0_Status;
+
+ if (env->insn_flags & ISA_MIPS32R6) {
+ bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3;
+#if defined(TARGET_MIPS64)
+ uint32_t ksux = (1 << CP0St_KX) & val;
+ ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */
+ ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */
+ val = (val & ~(7 << CP0St_UX)) | ksux;
+#endif
+ if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) {
+ mask &= ~(3 << CP0St_KSU);
+ }
+ mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val);
+ }
+
+ env->CP0_Status = (old & ~mask) | (val & mask);
+#if defined(TARGET_MIPS64)
+ if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) {
+ /* Access to at least one of the 64-bit segments has been disabled */
+ cpu_mips_tlb_flush(env, 1);
+ }
+#endif
+ if (env->CP0_Config3 & (1 << CP0C3_MT)) {
+ sync_c0_status(env, env, env->current_tc);
+ } else {
+ compute_hflags(env);
+ }
+}
+
+void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val)
+{
+ uint32_t mask = 0x00C00300;
+ uint32_t old = env->CP0_Cause;
+ int i;
+
+ if (env->insn_flags & ISA_MIPS32R2) {
+ mask |= 1 << CP0Ca_DC;
+ }
+ if (env->insn_flags & ISA_MIPS32R6) {
+ mask &= ~((1 << CP0Ca_WP) & val);
+ }
+
+ env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask);
+
+ if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
+ if (env->CP0_Cause & (1 << CP0Ca_DC)) {
+ cpu_mips_stop_count(env);
+ } else {
+ cpu_mips_start_count(env);
+ }
+ }
+
+ /* Set/reset software interrupts */
+ for (i = 0 ; i < 2 ; i++) {
+ if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
+ cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i)));
+ }
+ }
+}
+#endif
+
+static void raise_mmu_exception(CPUMIPSState *env, target_ulong address,
+ int rw, int tlb_error)
+{
+ CPUState *cs = CPU(mips_env_get_cpu(env));
+ int exception = 0, error_code = 0;
+
+ if (rw == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+
+ switch (tlb_error) {
+ default:
+ case TLBRET_BADADDR:
+ /* Reference to kernel address from user mode or supervisor mode */
+ /* Reference to supervisor address from user mode */
+ if (rw == MMU_DATA_STORE) {
+ exception = EXCP_AdES;
+ } else {
+ exception = EXCP_AdEL;
+ }
+ break;
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (rw == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ error_code |= EXCP_TLB_NOMATCH;
+ break;
+ case TLBRET_INVALID:
+ /* TLB match with no valid bit */
+ if (rw == MMU_DATA_STORE) {
+ exception = EXCP_TLBS;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_DIRTY:
+ /* TLB match but 'D' bit is cleared */
+ exception = EXCP_LTLBL;
+ break;
+ case TLBRET_XI:
+ /* Execute-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBXI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ case TLBRET_RI:
+ /* Read-Inhibit Exception */
+ if (env->CP0_PageGrain & (1 << CP0PG_IEC)) {
+ exception = EXCP_TLBRI;
+ } else {
+ exception = EXCP_TLBL;
+ }
+ break;
+ }
+ /* Raise exception */
+ env->CP0_BadVAddr = address;
+ env->CP0_Context = (env->CP0_Context & ~0x007fffff) |
+ ((address >> 9) & 0x007ffff0);
+ env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) |
+ (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) |
+ (address & (TARGET_PAGE_MASK << 1));
+#if defined(TARGET_MIPS64)
+ env->CP0_EntryHi &= env->SEGMask;
+ env->CP0_XContext =
+ /* PTEBase */ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) |
+ /* R */ (extract64(address, 62, 2) << (env->SEGBITS - 9)) |
+ /* BadVPN2 */ (extract64(address, 13, env->SEGBITS - 13) << 4);
+#endif
+ cs->exception_index = exception;
+ env->error_code = error_code;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ hwaddr phys_addr;
+ int prot;
+
+ if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0,
+ ACCESS_INT) != 0) {
+ return -1;
+ }
+ return phys_addr;
+}
+#endif
+
+int mips_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+#if !defined(CONFIG_USER_ONLY)
+ hwaddr physical;
+ int prot;
+ int access_type;
+#endif
+ int ret = 0;
+
+#if 0
+ log_cpu_state(cs, 0);
+#endif
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
+ __func__, env->active_tc.PC, address, rw, mmu_idx);
+
+ /* data access */
+#if !defined(CONFIG_USER_ONLY)
+ /* XXX: put correct access by using cpu_restore_state()
+ correctly */
+ access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot,
+ address, rw, access_type);
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
+ " prot %d\n",
+ __func__, address, ret, physical, prot);
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
+ } else if (ret < 0)
+#endif
+ {
+ raise_mmu_exception(env, address, rw, ret);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw)
+{
+ hwaddr physical;
+ int prot;
+ int access_type;
+ int ret = 0;
+
+ /* data access */
+ access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot,
+ address, rw, access_type);
+ if (ret != TLBRET_MATCH) {
+ raise_mmu_exception(env, address, rw, ret);
+ return -1LL;
+ } else {
+ return physical;
+ }
+}
+
+static const char * const excp_names[EXCP_LAST + 1] = {
+ [EXCP_RESET] = "reset",
+ [EXCP_SRESET] = "soft reset",
+ [EXCP_DSS] = "debug single step",
+ [EXCP_DINT] = "debug interrupt",
+ [EXCP_NMI] = "non-maskable interrupt",
+ [EXCP_MCHECK] = "machine check",
+ [EXCP_EXT_INTERRUPT] = "interrupt",
+ [EXCP_DFWATCH] = "deferred watchpoint",
+ [EXCP_DIB] = "debug instruction breakpoint",
+ [EXCP_IWATCH] = "instruction fetch watchpoint",
+ [EXCP_AdEL] = "address error load",
+ [EXCP_AdES] = "address error store",
+ [EXCP_TLBF] = "TLB refill",
+ [EXCP_IBE] = "instruction bus error",
+ [EXCP_DBp] = "debug breakpoint",
+ [EXCP_SYSCALL] = "syscall",
+ [EXCP_BREAK] = "break",
+ [EXCP_CpU] = "coprocessor unusable",
+ [EXCP_RI] = "reserved instruction",
+ [EXCP_OVERFLOW] = "arithmetic overflow",
+ [EXCP_TRAP] = "trap",
+ [EXCP_FPE] = "floating point",
+ [EXCP_DDBS] = "debug data break store",
+ [EXCP_DWATCH] = "data watchpoint",
+ [EXCP_LTLBL] = "TLB modify",
+ [EXCP_TLBL] = "TLB load",
+ [EXCP_TLBS] = "TLB store",
+ [EXCP_DBE] = "data bus error",
+ [EXCP_DDBL] = "debug data break load",
+ [EXCP_THREAD] = "thread",
+ [EXCP_MDMX] = "MDMX",
+ [EXCP_C2E] = "precise coprocessor 2",
+ [EXCP_CACHE] = "cache error",
+ [EXCP_TLBXI] = "TLB execute-inhibit",
+ [EXCP_TLBRI] = "TLB read-inhibit",
+ [EXCP_MSADIS] = "MSA disabled",
+ [EXCP_MSAFPE] = "MSA floating point",
+};
+#endif
+
+target_ulong exception_resume_pc (CPUMIPSState *env)
+{
+ target_ulong bad_pc;
+ target_ulong isa_mode;
+
+ isa_mode = !!(env->hflags & MIPS_HFLAG_M16);
+ bad_pc = env->active_tc.PC | isa_mode;
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ /* If the exception was raised from a delay slot, come back to
+ the jump. */
+ bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4);
+ }
+
+ return bad_pc;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void set_hflags_for_handler (CPUMIPSState *env)
+{
+ /* Exception handlers are entered in 32-bit mode. */
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ /* ...except that microMIPS lets you choose. */
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->hflags |= (!!(env->CP0_Config3
+ & (1 << CP0C3_ISA_ON_EXC))
+ << MIPS_HFLAG_M16_SHIFT);
+ }
+}
+
+static inline void set_badinstr_registers(CPUMIPSState *env)
+{
+ if (env->hflags & MIPS_HFLAG_M16) {
+ /* TODO: add BadInstr support for microMIPS */
+ return;
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_BI)) {
+ env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC);
+ }
+ if ((env->CP0_Config3 & (1 << CP0C3_BP)) &&
+ (env->hflags & MIPS_HFLAG_BMASK)) {
+ env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4);
+ }
+}
+#endif
+
+void mips_cpu_do_interrupt(CPUState *cs)
+{
+#if !defined(CONFIG_USER_ONLY)
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ bool update_badinstr = 0;
+ target_ulong offset;
+ int cause = -1;
+ const char *name;
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
+ name = "unknown";
+ } else {
+ name = excp_names[cs->exception_index];
+ }
+
+ qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx
+ " %s exception\n",
+ __func__, env->active_tc.PC, env->CP0_EPC, name);
+ }
+ if (cs->exception_index == EXCP_EXT_INTERRUPT &&
+ (env->hflags & MIPS_HFLAG_DM)) {
+ cs->exception_index = EXCP_DINT;
+ }
+ offset = 0x180;
+ switch (cs->exception_index) {
+ case EXCP_DSS:
+ env->CP0_Debug |= 1 << CP0DB_DSS;
+ /* Debug single step cannot be raised inside a delay slot and
+ resume will always occur on the next instruction
+ (but we assume the pc has always been updated during
+ code translation). */
+ env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16);
+ goto enter_debug_mode;
+ case EXCP_DINT:
+ env->CP0_Debug |= 1 << CP0DB_DINT;
+ goto set_DEPC;
+ case EXCP_DIB:
+ env->CP0_Debug |= 1 << CP0DB_DIB;
+ goto set_DEPC;
+ case EXCP_DBp:
+ env->CP0_Debug |= 1 << CP0DB_DBp;
+ goto set_DEPC;
+ case EXCP_DDBS:
+ env->CP0_Debug |= 1 << CP0DB_DDBS;
+ goto set_DEPC;
+ case EXCP_DDBL:
+ env->CP0_Debug |= 1 << CP0DB_DDBL;
+ set_DEPC:
+ env->CP0_DEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ enter_debug_mode:
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS64R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ /* EJTAG probe trap enable is not implemented... */
+ if (!(env->CP0_Status & (1 << CP0St_EXL)))
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ env->active_tc.PC = env->exception_base + 0x480;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_RESET:
+ cpu_reset(CPU(cpu));
+ break;
+ case EXCP_SRESET:
+ env->CP0_Status |= (1 << CP0St_SR);
+ memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo));
+ goto set_error_EPC;
+ case EXCP_NMI:
+ env->CP0_Status |= (1 << CP0St_NMI);
+ set_error_EPC:
+ env->CP0_ErrorEPC = exception_resume_pc(env);
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS64R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ if (!(env->CP0_Status & (1 << CP0St_EXL)))
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ env->active_tc.PC = env->exception_base;
+ set_hflags_for_handler(env);
+ break;
+ case EXCP_EXT_INTERRUPT:
+ cause = 0;
+ if (env->CP0_Cause & (1 << CP0Ca_IV)) {
+ uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f;
+
+ if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) {
+ offset = 0x200;
+ } else {
+ uint32_t vector = 0;
+ uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP;
+
+ if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
+ /* For VEIC mode, the external interrupt controller feeds
+ * the vector through the CP0Cause IP lines. */
+ vector = pending;
+ } else {
+ /* Vectored Interrupts
+ * Mask with Status.IM7-IM0 to get enabled interrupts. */
+ pending &= (env->CP0_Status >> CP0St_IM) & 0xff;
+ /* Find the highest-priority interrupt. */
+ while (pending >>= 1) {
+ vector++;
+ }
+ }
+ offset = 0x200 + (vector * (spacing << 5));
+ }
+ }
+ goto set_EPC;
+ case EXCP_LTLBL:
+ cause = 1;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_TLBL:
+ cause = 2;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
+ offset = 0x080;
+ else
+#endif
+ offset = 0x000;
+ }
+ goto set_EPC;
+ case EXCP_TLBS:
+ cause = 3;
+ update_badinstr = 1;
+ if ((env->error_code & EXCP_TLB_NOMATCH) &&
+ !(env->CP0_Status & (1 << CP0St_EXL))) {
+#if defined(TARGET_MIPS64)
+ int R = env->CP0_BadVAddr >> 62;
+ int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0;
+ int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0;
+ int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0;
+
+ if (((R == 0 && UX) || (R == 1 && SX) || (R == 3 && KX)) &&
+ (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F))))
+ offset = 0x080;
+ else
+#endif
+ offset = 0x000;
+ }
+ goto set_EPC;
+ case EXCP_AdEL:
+ cause = 4;
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
+ goto set_EPC;
+ case EXCP_AdES:
+ cause = 5;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_IBE:
+ cause = 6;
+ goto set_EPC;
+ case EXCP_DBE:
+ cause = 7;
+ goto set_EPC;
+ case EXCP_SYSCALL:
+ cause = 8;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_BREAK:
+ cause = 9;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_RI:
+ cause = 10;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_CpU:
+ cause = 11;
+ update_badinstr = 1;
+ env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) |
+ (env->error_code << CP0Ca_CE);
+ goto set_EPC;
+ case EXCP_OVERFLOW:
+ cause = 12;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TRAP:
+ cause = 13;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MSAFPE:
+ cause = 14;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_FPE:
+ cause = 15;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_C2E:
+ cause = 18;
+ goto set_EPC;
+ case EXCP_TLBRI:
+ cause = 19;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_TLBXI:
+ cause = 20;
+ goto set_EPC;
+ case EXCP_MSADIS:
+ cause = 21;
+ update_badinstr = 1;
+ goto set_EPC;
+ case EXCP_MDMX:
+ cause = 22;
+ goto set_EPC;
+ case EXCP_DWATCH:
+ cause = 23;
+ /* XXX: TODO: manage deferred watch exceptions */
+ goto set_EPC;
+ case EXCP_MCHECK:
+ cause = 24;
+ goto set_EPC;
+ case EXCP_THREAD:
+ cause = 25;
+ goto set_EPC;
+ case EXCP_DSPDIS:
+ cause = 26;
+ goto set_EPC;
+ case EXCP_CACHE:
+ cause = 30;
+ if (env->CP0_Status & (1 << CP0St_BEV)) {
+ offset = 0x100;
+ } else {
+ offset = 0x20000100;
+ }
+ set_EPC:
+ if (!(env->CP0_Status & (1 << CP0St_EXL))) {
+ env->CP0_EPC = exception_resume_pc(env);
+ if (update_badinstr) {
+ set_badinstr_registers(env);
+ }
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ env->CP0_Cause |= (1U << CP0Ca_BD);
+ } else {
+ env->CP0_Cause &= ~(1U << CP0Ca_BD);
+ }
+ env->CP0_Status |= (1 << CP0St_EXL);
+ if (env->insn_flags & ISA_MIPS3) {
+ env->hflags |= MIPS_HFLAG_64;
+ if (!(env->insn_flags & ISA_MIPS64R6) ||
+ env->CP0_Status & (1 << CP0St_KX)) {
+ env->hflags &= ~MIPS_HFLAG_AWRAP;
+ }
+ }
+ env->hflags |= MIPS_HFLAG_CP0;
+ env->hflags &= ~(MIPS_HFLAG_KSU);
+ }
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ if (env->CP0_Status & (1 << CP0St_BEV)) {
+ env->active_tc.PC = env->exception_base + 0x200;
+ } else {
+ env->active_tc.PC = (int32_t)(env->CP0_EBase & ~0x3ff);
+ }
+ env->active_tc.PC += offset;
+ set_hflags_for_handler(env);
+ env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC);
+ break;
+ default:
+ abort();
+ }
+ if (qemu_loglevel_mask(CPU_LOG_INT)
+ && cs->exception_index != EXCP_EXT_INTERRUPT) {
+ qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n"
+ " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n",
+ __func__, env->active_tc.PC, env->CP0_EPC, cause,
+ env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr,
+ env->CP0_DEPC);
+ }
+#endif
+ cs->exception_index = EXCP_NONE;
+}
+
+bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ if (cpu_mips_hw_interrupts_enabled(env) &&
+ cpu_mips_hw_interrupts_pending(env)) {
+ /* Raise it */
+ cs->exception_index = EXCP_EXT_INTERRUPT;
+ env->error_code = 0;
+ mips_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ CPUState *cs;
+ r4k_tlb_t *tlb;
+ target_ulong addr;
+ target_ulong end;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ target_ulong mask;
+
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ /* The qemu TLB is flushed when the ASID changes, so no need to
+ flush these entries again. */
+ if (tlb->G == 0 && tlb->ASID != ASID) {
+ return;
+ }
+
+ if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) {
+ /* For tlbwr, we can shadow the discarded entry into
+ a new (fake) TLB entry, as long as the guest can not
+ tell that it's there. */
+ env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb;
+ env->tlb->tlb_in_use++;
+ return;
+ }
+
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ if (tlb->V0) {
+ cs = CPU(cpu);
+ addr = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | (mask >> 1);
+ while (addr < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+ if (tlb->V1) {
+ cs = CPU(cpu);
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | mask;
+ while (addr - 1 < end) {
+ tlb_flush_page(cs, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+}
+#endif
+
+void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
+ uint32_t exception,
+ int error_code,
+ uintptr_t pc)
+{
+ CPUState *cs = CPU(mips_env_get_cpu(env));
+
+ if (exception < EXCP_SC) {
+ qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n",
+ __func__, exception, error_code);
+ }
+ cs->exception_index = exception;
+ env->error_code = error_code;
+
+ cpu_loop_exit_restore(cs, pc);
+}
diff --git a/target/mips/helper.h b/target/mips/helper.h
new file mode 100644
index 0000000000..666936c81b
--- /dev/null
+++ b/target/mips/helper.h
@@ -0,0 +1,962 @@
+DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int)
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
+DEF_HELPER_1(raise_exception_debug, noreturn, env)
+
+DEF_HELPER_1(do_semihosting, void, env)
+
+#ifdef TARGET_MIPS64
+DEF_HELPER_4(sdl, void, env, tl, tl, int)
+DEF_HELPER_4(sdr, void, env, tl, tl, int)
+#endif
+DEF_HELPER_4(swl, void, env, tl, tl, int)
+DEF_HELPER_4(swr, void, env, tl, tl, int)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(ll, tl, env, tl, int)
+DEF_HELPER_4(sc, tl, env, tl, tl, int)
+#ifdef TARGET_MIPS64
+DEF_HELPER_3(lld, tl, env, tl, int)
+DEF_HELPER_4(scd, tl, env, tl, tl, int)
+#endif
+#endif
+
+DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
+#ifdef TARGET_MIPS64
+DEF_HELPER_FLAGS_1(dclo, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(dclz, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+
+DEF_HELPER_3(muls, tl, env, tl, tl)
+DEF_HELPER_3(mulsu, tl, env, tl, tl)
+DEF_HELPER_3(macc, tl, env, tl, tl)
+DEF_HELPER_3(maccu, tl, env, tl, tl)
+DEF_HELPER_3(msac, tl, env, tl, tl)
+DEF_HELPER_3(msacu, tl, env, tl, tl)
+DEF_HELPER_3(mulhi, tl, env, tl, tl)
+DEF_HELPER_3(mulhiu, tl, env, tl, tl)
+DEF_HELPER_3(mulshi, tl, env, tl, tl)
+DEF_HELPER_3(mulshiu, tl, env, tl, tl)
+DEF_HELPER_3(macchi, tl, env, tl, tl)
+DEF_HELPER_3(macchiu, tl, env, tl, tl)
+DEF_HELPER_3(msachi, tl, env, tl, tl)
+DEF_HELPER_3(msachiu, tl, env, tl, tl)
+
+DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+#ifdef TARGET_MIPS64
+DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+
+#ifndef CONFIG_USER_ONLY
+/* CP0 helpers */
+DEF_HELPER_1(mfc0_mvpcontrol, tl, env)
+DEF_HELPER_1(mfc0_mvpconf0, tl, env)
+DEF_HELPER_1(mfc0_mvpconf1, tl, env)
+DEF_HELPER_1(mftc0_vpecontrol, tl, env)
+DEF_HELPER_1(mftc0_vpeconf0, tl, env)
+DEF_HELPER_1(mfc0_random, tl, env)
+DEF_HELPER_1(mfc0_tcstatus, tl, env)
+DEF_HELPER_1(mftc0_tcstatus, tl, env)
+DEF_HELPER_1(mfc0_tcbind, tl, env)
+DEF_HELPER_1(mftc0_tcbind, tl, env)
+DEF_HELPER_1(mfc0_tcrestart, tl, env)
+DEF_HELPER_1(mftc0_tcrestart, tl, env)
+DEF_HELPER_1(mfc0_tchalt, tl, env)
+DEF_HELPER_1(mftc0_tchalt, tl, env)
+DEF_HELPER_1(mfc0_tccontext, tl, env)
+DEF_HELPER_1(mftc0_tccontext, tl, env)
+DEF_HELPER_1(mfc0_tcschedule, tl, env)
+DEF_HELPER_1(mftc0_tcschedule, tl, env)
+DEF_HELPER_1(mfc0_tcschefback, tl, env)
+DEF_HELPER_1(mftc0_tcschefback, tl, env)
+DEF_HELPER_1(mfc0_count, tl, env)
+DEF_HELPER_1(mftc0_entryhi, tl, env)
+DEF_HELPER_1(mftc0_status, tl, env)
+DEF_HELPER_1(mftc0_cause, tl, env)
+DEF_HELPER_1(mftc0_epc, tl, env)
+DEF_HELPER_1(mftc0_ebase, tl, env)
+DEF_HELPER_2(mftc0_configx, tl, env, tl)
+DEF_HELPER_1(mfc0_lladdr, tl, env)
+DEF_HELPER_1(mfc0_maar, tl, env)
+DEF_HELPER_1(mfhc0_maar, tl, env)
+DEF_HELPER_2(mfc0_watchlo, tl, env, i32)
+DEF_HELPER_2(mfc0_watchhi, tl, env, i32)
+DEF_HELPER_1(mfc0_debug, tl, env)
+DEF_HELPER_1(mftc0_debug, tl, env)
+#ifdef TARGET_MIPS64
+DEF_HELPER_1(dmfc0_tcrestart, tl, env)
+DEF_HELPER_1(dmfc0_tchalt, tl, env)
+DEF_HELPER_1(dmfc0_tccontext, tl, env)
+DEF_HELPER_1(dmfc0_tcschedule, tl, env)
+DEF_HELPER_1(dmfc0_tcschefback, tl, env)
+DEF_HELPER_1(dmfc0_lladdr, tl, env)
+DEF_HELPER_1(dmfc0_maar, tl, env)
+DEF_HELPER_2(dmfc0_watchlo, tl, env, i32)
+#endif /* TARGET_MIPS64 */
+
+DEF_HELPER_2(mtc0_index, void, env, tl)
+DEF_HELPER_2(mtc0_mvpcontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mttc0_vpecontrol, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mttc0_vpeconf0, void, env, tl)
+DEF_HELPER_2(mtc0_vpeconf1, void, env, tl)
+DEF_HELPER_2(mtc0_yqmask, void, env, tl)
+DEF_HELPER_2(mtc0_vpeopt, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo0, void, env, tl)
+DEF_HELPER_2(mtc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mttc0_tcstatus, void, env, tl)
+DEF_HELPER_2(mtc0_tcbind, void, env, tl)
+DEF_HELPER_2(mttc0_tcbind, void, env, tl)
+DEF_HELPER_2(mtc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mttc0_tcrestart, void, env, tl)
+DEF_HELPER_2(mtc0_tchalt, void, env, tl)
+DEF_HELPER_2(mttc0_tchalt, void, env, tl)
+DEF_HELPER_2(mtc0_tccontext, void, env, tl)
+DEF_HELPER_2(mttc0_tccontext, void, env, tl)
+DEF_HELPER_2(mtc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mttc0_tcschedule, void, env, tl)
+DEF_HELPER_2(mtc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mttc0_tcschefback, void, env, tl)
+DEF_HELPER_2(mtc0_entrylo1, void, env, tl)
+DEF_HELPER_2(mtc0_context, void, env, tl)
+DEF_HELPER_2(mtc0_pagemask, void, env, tl)
+DEF_HELPER_2(mtc0_pagegrain, void, env, tl)
+DEF_HELPER_2(mtc0_wired, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf0, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf1, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf2, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf3, void, env, tl)
+DEF_HELPER_2(mtc0_srsconf4, void, env, tl)
+DEF_HELPER_2(mtc0_hwrena, void, env, tl)
+DEF_HELPER_2(mtc0_count, void, env, tl)
+DEF_HELPER_2(mtc0_entryhi, void, env, tl)
+DEF_HELPER_2(mttc0_entryhi, void, env, tl)
+DEF_HELPER_2(mtc0_compare, void, env, tl)
+DEF_HELPER_2(mtc0_status, void, env, tl)
+DEF_HELPER_2(mttc0_status, void, env, tl)
+DEF_HELPER_2(mtc0_intctl, void, env, tl)
+DEF_HELPER_2(mtc0_srsctl, void, env, tl)
+DEF_HELPER_2(mtc0_cause, void, env, tl)
+DEF_HELPER_2(mttc0_cause, void, env, tl)
+DEF_HELPER_2(mtc0_ebase, void, env, tl)
+DEF_HELPER_2(mttc0_ebase, void, env, tl)
+DEF_HELPER_2(mtc0_config0, void, env, tl)
+DEF_HELPER_2(mtc0_config2, void, env, tl)
+DEF_HELPER_2(mtc0_config3, void, env, tl)
+DEF_HELPER_2(mtc0_config4, void, env, tl)
+DEF_HELPER_2(mtc0_config5, void, env, tl)
+DEF_HELPER_2(mtc0_lladdr, void, env, tl)
+DEF_HELPER_2(mtc0_maar, void, env, tl)
+DEF_HELPER_2(mthc0_maar, void, env, tl)
+DEF_HELPER_2(mtc0_maari, void, env, tl)
+DEF_HELPER_3(mtc0_watchlo, void, env, tl, i32)
+DEF_HELPER_3(mtc0_watchhi, void, env, tl, i32)
+DEF_HELPER_2(mtc0_xcontext, void, env, tl)
+DEF_HELPER_2(mtc0_framemask, void, env, tl)
+DEF_HELPER_2(mtc0_debug, void, env, tl)
+DEF_HELPER_2(mttc0_debug, void, env, tl)
+DEF_HELPER_2(mtc0_performance0, void, env, tl)
+DEF_HELPER_2(mtc0_errctl, void, env, tl)
+DEF_HELPER_2(mtc0_taglo, void, env, tl)
+DEF_HELPER_2(mtc0_datalo, void, env, tl)
+DEF_HELPER_2(mtc0_taghi, void, env, tl)
+DEF_HELPER_2(mtc0_datahi, void, env, tl)
+
+#if defined(TARGET_MIPS64)
+DEF_HELPER_2(dmtc0_entrylo0, void, env, i64)
+DEF_HELPER_2(dmtc0_entrylo1, void, env, i64)
+#endif
+
+/* MIPS MT functions */
+DEF_HELPER_2(mftgpr, tl, env, i32)
+DEF_HELPER_2(mftlo, tl, env, i32)
+DEF_HELPER_2(mfthi, tl, env, i32)
+DEF_HELPER_2(mftacx, tl, env, i32)
+DEF_HELPER_1(mftdsp, tl, env)
+DEF_HELPER_3(mttgpr, void, env, tl, i32)
+DEF_HELPER_3(mttlo, void, env, tl, i32)
+DEF_HELPER_3(mtthi, void, env, tl, i32)
+DEF_HELPER_3(mttacx, void, env, tl, i32)
+DEF_HELPER_2(mttdsp, void, env, tl)
+DEF_HELPER_0(dmt, tl)
+DEF_HELPER_0(emt, tl)
+DEF_HELPER_1(dvpe, tl, env)
+DEF_HELPER_1(evpe, tl, env)
+
+/* R6 Multi-threading */
+DEF_HELPER_1(dvp, tl, env)
+DEF_HELPER_1(evp, tl, env)
+#endif /* !CONFIG_USER_ONLY */
+
+/* microMIPS functions */
+DEF_HELPER_4(lwm, void, env, tl, tl, i32)
+DEF_HELPER_4(swm, void, env, tl, tl, i32)
+#ifdef TARGET_MIPS64
+DEF_HELPER_4(ldm, void, env, tl, tl, i32)
+DEF_HELPER_4(sdm, void, env, tl, tl, i32)
+#endif
+
+DEF_HELPER_2(fork, void, tl, tl)
+DEF_HELPER_2(yield, tl, env, tl)
+
+/* CP1 functions */
+DEF_HELPER_2(cfc1, tl, env, i32)
+DEF_HELPER_4(ctc1, void, env, tl, i32, i32)
+
+DEF_HELPER_2(float_cvtd_s, i64, env, i32)
+DEF_HELPER_2(float_cvtd_w, i64, env, i32)
+DEF_HELPER_2(float_cvtd_l, i64, env, i64)
+DEF_HELPER_2(float_cvtps_pw, i64, env, i64)
+DEF_HELPER_2(float_cvtpw_ps, i64, env, i64)
+DEF_HELPER_2(float_cvts_d, i32, env, i64)
+DEF_HELPER_2(float_cvts_w, i32, env, i32)
+DEF_HELPER_2(float_cvts_l, i32, env, i64)
+DEF_HELPER_2(float_cvts_pl, i32, env, i32)
+DEF_HELPER_2(float_cvts_pu, i32, env, i32)
+
+DEF_HELPER_3(float_addr_ps, i64, env, i64, i64)
+DEF_HELPER_3(float_mulr_ps, i64, env, i64, i64)
+
+DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32)
+DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
+DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64)
+FOP_PROTO(maddf)
+FOP_PROTO(msubf)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64)
+FOP_PROTO(max)
+FOP_PROTO(maxa)
+FOP_PROTO(min)
+FOP_PROTO(mina)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _l_s, i64, env, i32) \
+DEF_HELPER_2(float_ ## op ## _l_d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _w_s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _w_d, i32, env, i64)
+FOP_PROTO(cvt)
+FOP_PROTO(round)
+FOP_PROTO(trunc)
+FOP_PROTO(ceil)
+FOP_PROTO(floor)
+FOP_PROTO(cvt_2008)
+FOP_PROTO(round_2008)
+FOP_PROTO(trunc_2008)
+FOP_PROTO(ceil_2008)
+FOP_PROTO(floor_2008)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64)
+FOP_PROTO(sqrt)
+FOP_PROTO(rsqrt)
+FOP_PROTO(recip)
+FOP_PROTO(rint)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_1(float_ ## op ## _s, i32, i32) \
+DEF_HELPER_1(float_ ## op ## _d, i64, i64) \
+DEF_HELPER_1(float_ ## op ## _ps, i64, i64)
+FOP_PROTO(abs)
+FOP_PROTO(chs)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_2(float_ ## op ## _s, i32, env, i32) \
+DEF_HELPER_2(float_ ## op ## _d, i64, env, i64) \
+DEF_HELPER_2(float_ ## op ## _ps, i64, env, i64)
+FOP_PROTO(recip1)
+FOP_PROTO(rsqrt1)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(float_ ## op ## _s, i32, env, i32, i32) \
+DEF_HELPER_3(float_ ## op ## _d, i64, env, i64, i64) \
+DEF_HELPER_3(float_ ## op ## _ps, i64, env, i64, i64)
+FOP_PROTO(add)
+FOP_PROTO(sub)
+FOP_PROTO(mul)
+FOP_PROTO(div)
+FOP_PROTO(recip2)
+FOP_PROTO(rsqrt2)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(float_ ## op ## _s, i32, env, i32, i32, i32) \
+DEF_HELPER_4(float_ ## op ## _d, i64, env, i64, i64, i64) \
+DEF_HELPER_4(float_ ## op ## _ps, i64, env, i64, i64, i64)
+FOP_PROTO(madd)
+FOP_PROTO(msub)
+FOP_PROTO(nmadd)
+FOP_PROTO(nmsub)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_4(cmp_d_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmpabs_d_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmp_s_ ## op, void, env, i32, i32, int) \
+DEF_HELPER_4(cmpabs_s_ ## op, void, env, i32, i32, int) \
+DEF_HELPER_4(cmp_ps_ ## op, void, env, i64, i64, int) \
+DEF_HELPER_4(cmpabs_ps_ ## op, void, env, i64, i64, int)
+FOP_PROTO(f)
+FOP_PROTO(un)
+FOP_PROTO(eq)
+FOP_PROTO(ueq)
+FOP_PROTO(olt)
+FOP_PROTO(ult)
+FOP_PROTO(ole)
+FOP_PROTO(ule)
+FOP_PROTO(sf)
+FOP_PROTO(ngle)
+FOP_PROTO(seq)
+FOP_PROTO(ngl)
+FOP_PROTO(lt)
+FOP_PROTO(nge)
+FOP_PROTO(le)
+FOP_PROTO(ngt)
+#undef FOP_PROTO
+
+#define FOP_PROTO(op) \
+DEF_HELPER_3(r6_cmp_d_ ## op, i64, env, i64, i64) \
+DEF_HELPER_3(r6_cmp_s_ ## op, i32, env, i32, i32)
+FOP_PROTO(af)
+FOP_PROTO(un)
+FOP_PROTO(eq)
+FOP_PROTO(ueq)
+FOP_PROTO(lt)
+FOP_PROTO(ult)
+FOP_PROTO(le)
+FOP_PROTO(ule)
+FOP_PROTO(saf)
+FOP_PROTO(sun)
+FOP_PROTO(seq)
+FOP_PROTO(sueq)
+FOP_PROTO(slt)
+FOP_PROTO(sult)
+FOP_PROTO(sle)
+FOP_PROTO(sule)
+FOP_PROTO(or)
+FOP_PROTO(une)
+FOP_PROTO(ne)
+FOP_PROTO(sor)
+FOP_PROTO(sune)
+FOP_PROTO(sne)
+#undef FOP_PROTO
+
+/* Special functions */
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_1(tlbwi, void, env)
+DEF_HELPER_1(tlbwr, void, env)
+DEF_HELPER_1(tlbp, void, env)
+DEF_HELPER_1(tlbr, void, env)
+DEF_HELPER_1(tlbinv, void, env)
+DEF_HELPER_1(tlbinvf, void, env)
+DEF_HELPER_1(di, tl, env)
+DEF_HELPER_1(ei, tl, env)
+DEF_HELPER_1(eret, void, env)
+DEF_HELPER_1(eretnc, void, env)
+DEF_HELPER_1(deret, void, env)
+#endif /* !CONFIG_USER_ONLY */
+DEF_HELPER_1(rdhwr_cpunum, tl, env)
+DEF_HELPER_1(rdhwr_synci_step, tl, env)
+DEF_HELPER_1(rdhwr_cc, tl, env)
+DEF_HELPER_1(rdhwr_ccres, tl, env)
+DEF_HELPER_1(rdhwr_performance, tl, env)
+DEF_HELPER_1(rdhwr_xnp, tl, env)
+DEF_HELPER_2(pmon, void, env, int)
+DEF_HELPER_1(wait, void, env)
+
+/* Loongson multimedia functions. */
+DEF_HELPER_FLAGS_2(paddsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddush, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddsb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddusb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(paddb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(psubsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubush, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubsb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubusb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psubb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pshufh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(packsswh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(packsshb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(packushb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(punpcklhw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpckhhw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpcklbh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpckhbh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpcklwd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(punpckhwd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pavgh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pavgb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmaxsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pminsh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmaxub, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pminub, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pcmpeqw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpgtw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpeqh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpgth, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpeqb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pcmpgtb, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(psllw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psllh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psrlw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psrlh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psraw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(psrah, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pmullh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmulhh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmulhuh, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(pmaddhw, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(pasubub, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_1(biadd, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(pmovmskb, TCG_CALL_NO_RWG_SE, i64, i64)
+
+/*** MIPS DSP ***/
+/* DSP Arithmetic Sub-class insns */
+DEF_HELPER_FLAGS_3(addq_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addq_s_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(addq_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addq_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(addq_s_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(addq_pw, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addq_s_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(addu_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(adduh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(adduh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(addu_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(addqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(addqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(addqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(addqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(addu_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(adduh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(adduh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(addu_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addu_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(subq_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subq_s_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(subq_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subq_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(subq_s_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(subq_pw, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subq_s_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(subu_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(subuh_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subuh_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(subu_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(subqh_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subqh_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subqh_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subqh_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(subu_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(subuh_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(subuh_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(subu_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(subu_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(addsc, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(addwc, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(modsub, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_1(raddu_w_qb, TCG_CALL_NO_RWG_SE, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_1(raddu_l_ob, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+DEF_HELPER_FLAGS_2(absq_s_qb, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_ph, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_w, 0, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(absq_s_ob, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_qh, 0, tl, tl, env)
+DEF_HELPER_FLAGS_2(absq_s_pw, 0, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_2(precr_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(precrq_qb_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precr_sra_ph_w, TCG_CALL_NO_RWG_SE,
+ tl, i32, tl, tl)
+DEF_HELPER_FLAGS_3(precr_sra_r_ph_w, TCG_CALL_NO_RWG_SE,
+ tl, i32, tl, tl)
+DEF_HELPER_FLAGS_2(precrq_ph_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precrq_rs_ph_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(precr_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precr_sra_qh_pw,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, i32)
+DEF_HELPER_FLAGS_3(precr_sra_r_qh_pw,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, i32)
+DEF_HELPER_FLAGS_2(precrq_ob_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(precrq_qh_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(precrq_rs_qh_pw,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(precrq_pw_l, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+DEF_HELPER_FLAGS_3(precrqu_s_qb_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(precrqu_s_ob_qh,
+ TCG_CALL_NO_RWG_SE, tl, tl, tl, env)
+
+DEF_HELPER_FLAGS_1(preceq_pw_qhl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceq_pw_qhr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceq_pw_qhla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceq_pw_qhra, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+DEF_HELPER_FLAGS_1(precequ_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_1(precequ_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(precequ_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+DEF_HELPER_FLAGS_1(preceu_ph_qbl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_ph_qbr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_ph_qbla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_ph_qbra, TCG_CALL_NO_RWG_SE, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_1(preceu_qh_obl, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_qh_obr, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_qh_obla, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(preceu_qh_obra, TCG_CALL_NO_RWG_SE, tl, tl)
+#endif
+
+/* DSP GPR-Based Shift Sub-class insns */
+DEF_HELPER_FLAGS_3(shll_qb, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(shll_ob, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(shll_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(shll_s_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(shll_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(shll_s_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(shll_s_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(shll_pw, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(shll_s_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_2(shrl_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(shrl_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shrl_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+DEF_HELPER_FLAGS_2(shra_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(shra_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+DEF_HELPER_FLAGS_2(shra_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_w, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(shra_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_qh, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(shra_r_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+
+/* DSP Multiply Sub-class insns */
+DEF_HELPER_FLAGS_3(muleu_s_ph_qbl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleu_s_ph_qbr, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(muleu_s_qh_obl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleu_s_qh_obr, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(mulq_rs_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(mulq_rs_qh, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(muleq_s_w_phl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleq_s_w_phr, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(muleq_s_pw_qhl, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(muleq_s_pw_qhr, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_4(dpau_h_qbl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpau_h_qbr, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpau_h_obl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dpau_h_obr, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsu_h_qbl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpsu_h_qbr, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpsu_h_obl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dpsu_h_obr, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpa_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpa_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpax_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpaq_s_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpaq_s_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpaqx_s_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpaqx_sa_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dps_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dps_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsx_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpsq_s_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpsq_s_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsqx_s_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(dpsqx_sa_w_ph, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(mulsaq_s_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(mulsaq_s_w_qh, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpaq_sa_l_w, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpaq_sa_l_pw, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(dpsq_sa_l_w, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(dpsq_sa_l_pw, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(mulsaq_s_l_pw, 0, void, tl, tl, i32, env)
+#endif
+DEF_HELPER_FLAGS_4(maq_s_w_phl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(maq_s_w_phr, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_phl, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_phr, 0, void, i32, tl, tl, env)
+DEF_HELPER_FLAGS_3(mul_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mul_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mulq_s_ph, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mulq_s_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(mulq_rs_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_4(mulsa_w_ph, 0, void, i32, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_4(maq_s_w_qhll, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_w_qhlr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_w_qhrl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_w_qhrr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhll, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhlr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhrl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_sa_w_qhrr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_l_pwl, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(maq_s_l_pwr, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmadd, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmaddu, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmsub, 0, void, tl, tl, i32, env)
+DEF_HELPER_FLAGS_4(dmsubu, 0, void, tl, tl, i32, env)
+#endif
+
+/* DSP Bit/Manipulation Sub-class insns */
+DEF_HELPER_FLAGS_1(bitrev, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_3(insv, 0, tl, env, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dinsv, 0, tl, env, tl, tl)
+#endif
+
+/* DSP Compare-Pick Sub-class insns */
+DEF_HELPER_FLAGS_3(cmpu_eq_qb, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_lt_qb, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_le_qb, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_2(cmpgu_eq_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_lt_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_le_qb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(cmp_eq_ph, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_lt_ph, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_le_ph, 0, void, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(cmpu_eq_ob, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_lt_ob, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpu_le_ob, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpgdu_eq_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpgdu_lt_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmpgdu_le_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_2(cmpgu_eq_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_lt_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(cmpgu_le_ob, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_3(cmp_eq_qh, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_lt_qh, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_le_qh, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_eq_pw, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_lt_pw, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_3(cmp_le_pw, 0, void, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(pick_qb, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(pick_ph, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(pick_ob, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(pick_qh, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(pick_pw, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_2(packrl_ph, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_2(packrl_pw, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+#endif
+
+/* DSP Accumulator and DSPControl Access Sub-class insns */
+DEF_HELPER_FLAGS_3(extr_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(extr_r_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(extr_rs_w, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dextr_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_r_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_rs_w, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_l, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_r_l, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextr_rs_l, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(extr_s_h, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dextr_s_h, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(extp, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(extpdp, 0, tl, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dextp, 0, tl, tl, tl, env)
+DEF_HELPER_FLAGS_3(dextpdp, 0, tl, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(shilo, 0, void, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dshilo, 0, void, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(mthlip, 0, void, tl, tl, env)
+#if defined(TARGET_MIPS64)
+DEF_HELPER_FLAGS_3(dmthlip, 0, void, tl, tl, env)
+#endif
+DEF_HELPER_FLAGS_3(wrdsp, 0, void, tl, tl, env)
+DEF_HELPER_FLAGS_2(rddsp, 0, tl, tl, env)
+
+/* MIPS SIMD Architecture */
+DEF_HELPER_4(msa_andi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ori_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_nori_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_xori_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmnzi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmzi_b, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bseli_b, void, env, i32, i32, i32)
+DEF_HELPER_5(msa_shf_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_addvi_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_subvi_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_maxi_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_maxi_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_mini_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_mini_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_ceqi_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clti_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clti_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clei_s_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_5(msa_clei_u_df, void, env, i32, i32, i32, s32)
+DEF_HELPER_4(msa_ldi_df, void, env, i32, i32, s32)
+
+DEF_HELPER_5(msa_slli_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srai_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srli_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bclri_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bseti_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bnegi_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsli_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsri_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_sat_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_sat_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srari_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srlri_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_sll_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_sra_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srl_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bclr_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bset_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_bneg_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsl_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_binsr_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_addv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_subv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_max_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_max_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_min_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_min_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_max_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_min_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ceq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_clt_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_clt_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_cle_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_cle_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_add_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_adds_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_adds_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_adds_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ave_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ave_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_aver_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_aver_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_subs_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_subs_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_subsus_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_subsuu_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_asub_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_asub_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mulv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_maddv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_msubv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_div_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_div_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mod_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mod_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_dotp_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_dotp_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_dpadd_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_dpadd_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_dpsub_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_dpsub_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_sld_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_splat_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_pckev_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_pckod_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ilvl_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ilvr_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ilvev_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ilvod_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_vshf_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srar_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_srlr_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_hadd_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_hadd_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_hsub_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_hsub_u_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_5(msa_sldi_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_splati_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_copy_s_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_copy_u_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_insert_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_insve_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_3(msa_ctcmsa, void, env, tl, i32)
+DEF_HELPER_2(msa_cfcmsa, tl, env, i32)
+DEF_HELPER_3(msa_move_v, void, env, i32, i32)
+
+DEF_HELPER_5(msa_fcaf_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcun_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fceq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcueq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fclt_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcult_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcle_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcule_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsaf_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsun_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fseq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsueq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fslt_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsult_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsle_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsule_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fadd_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsub_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmul_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fdiv_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmadd_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmsub_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fexp2_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fexdo_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_ftq_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmin_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmin_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmax_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fmax_a_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcor_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcune_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fcne_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mul_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_madd_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_msub_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsor_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsune_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_fsne_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_mulr_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_maddr_q_df, void, env, i32, i32, i32, i32)
+DEF_HELPER_5(msa_msubr_q_df, void, env, i32, i32, i32, i32)
+
+DEF_HELPER_4(msa_and_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_or_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_nor_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_xor_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmnz_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bmz_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_bsel_v, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fill_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_pcnt_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_nloc_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_nlzc_df, void, env, i32, i32, i32)
+
+DEF_HELPER_4(msa_fclass_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftrunc_s_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftrunc_u_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fsqrt_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_frsqrt_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_frcp_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_frint_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_flog2_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fexupl_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_fexupr_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffql_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffqr_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftint_s_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ftint_u_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffint_s_df, void, env, i32, i32, i32)
+DEF_HELPER_4(msa_ffint_u_df, void, env, i32, i32, i32)
+
+#define MSALDST_PROTO(type) \
+DEF_HELPER_3(msa_ld_ ## type, void, env, i32, tl) \
+DEF_HELPER_3(msa_st_ ## type, void, env, i32, tl)
+MSALDST_PROTO(b)
+MSALDST_PROTO(h)
+MSALDST_PROTO(w)
+MSALDST_PROTO(d)
+#undef MSALDST_PROTO
+
+DEF_HELPER_3(cache, void, env, tl, i32)
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
new file mode 100644
index 0000000000..dcf5fbba0c
--- /dev/null
+++ b/target/mips/kvm.c
@@ -0,0 +1,1060 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012-2014 Imagination Technologies Ltd.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "sysemu/cpus.h"
+#include "kvm_mips.h"
+#include "exec/memattrs.h"
+
+#define DEBUG_KVM 0
+
+#define DPRINTF(fmt, ...) \
+ do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0)
+
+static int kvm_mips_fpu_cap;
+static int kvm_mips_msa_cap;
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static void kvm_mips_update_state(void *opaque, int running, RunState state);
+
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
+{
+ return cs->cpu_index;
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ /* MIPS has 128 signals */
+ kvm_set_sigmask_len(s, 16);
+
+ kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU);
+ kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA);
+
+ DPRINTF("%s\n", __func__);
+ return 0;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int ret = 0;
+
+ qemu_add_vm_change_state_handler(kvm_mips_update_state, cs);
+
+ if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0);
+ if (ret < 0) {
+ /* mark unsupported so it gets disabled on reset */
+ kvm_mips_fpu_cap = 0;
+ ret = 0;
+ }
+ }
+
+ if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0);
+ if (ret < 0) {
+ /* mark unsupported so it gets disabled on reset */
+ kvm_mips_msa_cap = 0;
+ ret = 0;
+ }
+ }
+
+ DPRINTF("%s\n", __func__);
+ return ret;
+}
+
+void kvm_mips_reset_vcpu(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) {
+ fprintf(stderr, "Warning: KVM does not support FPU, disabling\n");
+ env->CP0_Config1 &= ~(1 << CP0C1_FP);
+ }
+ if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) {
+ fprintf(stderr, "Warning: KVM does not support MSA, disabling\n");
+ env->CP0_Config3 &= ~(1 << CP0C3_MSAP);
+ }
+
+ DPRINTF("%s\n", __func__);
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ DPRINTF("%s\n", __func__);
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ DPRINTF("%s\n", __func__);
+ return 0;
+}
+
+static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu)
+{
+ CPUMIPSState *env = &cpu->env;
+
+ return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP));
+}
+
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ int r;
+ struct kvm_mips_interrupt intr;
+
+ qemu_mutex_lock_iothread();
+
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_mips_io_interrupts_pending(cpu)) {
+ intr.cpu = -1;
+ intr.irq = 2;
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+ if (r < 0) {
+ error_report("%s: cpu %d: failed to inject IRQ %x",
+ __func__, cs->cpu_index, intr.irq);
+ }
+ }
+
+ qemu_mutex_unlock_iothread();
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ int ret;
+
+ DPRINTF("%s\n", __func__);
+ switch (run->exit_reason) {
+ default:
+ error_report("%s: unknown exit reason %d",
+ __func__, run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
+{
+ DPRINTF("%s\n", __func__);
+ return true;
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *cs, int code, void *addr)
+{
+ DPRINTF("%s\n", __func__);
+ return 1;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+ DPRINTF("%s\n", __func__);
+ return 1;
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_mips_interrupt intr;
+
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+ intr.cpu = -1;
+
+ if (level) {
+ intr.irq = irq;
+ } else {
+ intr.irq = -irq;
+ }
+
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+
+ return 0;
+}
+
+int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level)
+{
+ CPUState *cs = current_cpu;
+ CPUState *dest_cs = CPU(cpu);
+ struct kvm_mips_interrupt intr;
+
+ if (!kvm_enabled()) {
+ return 0;
+ }
+
+ intr.cpu = dest_cs->cpu_index;
+
+ if (level) {
+ intr.irq = irq;
+ } else {
+ intr.irq = -irq;
+ }
+
+ DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq);
+
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
+
+ return 0;
+}
+
+#define MIPS_CP0_32(_R, _S) \
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
+
+#define MIPS_CP0_64(_R, _S) \
+ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
+
+#define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
+#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
+#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
+#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
+#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
+#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
+#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
+#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
+#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
+#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
+#define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
+#define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
+#define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
+#define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
+#define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
+#define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
+#define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
+#define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
+#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
+#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
+#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+
+static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id,
+ uint32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id,
+ target_ulong *addr)
+{
+ uint64_t val64 = *addr;
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)&val64
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id,
+ int64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id,
+ uint64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id,
+ uint32_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id,
+ target_ulong *addr)
+{
+ int ret;
+ uint64_t val64 = 0;
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)&val64
+ };
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+ if (ret >= 0) {
+ *addr = val64;
+ }
+ return ret;
+}
+
+static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id,
+ int64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id,
+ uint64_t *addr)
+{
+ struct kvm_one_reg cp0reg = {
+ .id = reg_id,
+ .addr = (uintptr_t)addr
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg);
+}
+
+#define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M)
+#define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \
+ (1U << CP0C1_FP))
+#define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M)
+#define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \
+ (1U << CP0C3_MSAP))
+#define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M)
+#define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \
+ (1U << CP0C5_UFE) | \
+ (1U << CP0C5_FRE) | \
+ (1U << CP0C5_UFR))
+
+static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id,
+ int32_t *addr, int32_t mask)
+{
+ int err;
+ int32_t tmp, change;
+
+ err = kvm_mips_get_one_reg(cs, reg_id, &tmp);
+ if (err < 0) {
+ return err;
+ }
+
+ /* only change bits in mask */
+ change = (*addr ^ tmp) & mask;
+ if (!change) {
+ return 0;
+ }
+
+ tmp = tmp ^ change;
+ return kvm_mips_put_one_reg(cs, reg_id, &tmp);
+}
+
+/*
+ * We freeze the KVM timer when either the VM clock is stopped or the state is
+ * saved (the state is dirty).
+ */
+
+/*
+ * Save the state of the KVM timer when VM clock is stopped or state is synced
+ * to QEMU.
+ */
+static int kvm_mips_save_count(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ uint64_t count_ctl;
+ int err, ret = 0;
+
+ /* freeze KVM timer */
+ err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err);
+ ret = err;
+ } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
+ count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+
+ /* read CP0_Cause */
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* read CP0_Count */
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+/*
+ * Restore the state of the KVM timer when VM clock is restarted or state is
+ * synced to KVM.
+ */
+static int kvm_mips_restore_count(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ uint64_t count_ctl;
+ int err_dc, err, ret = 0;
+
+ /* check the timer is frozen */
+ err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err_dc < 0) {
+ DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc);
+ ret = err_dc;
+ } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) {
+ /* freeze timer (sets COUNT_RESUME for us) */
+ count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC;
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+
+ /* load CP0_Cause */
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* load CP0_Count */
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* resume KVM timer */
+ if (err_dc >= 0) {
+ count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC;
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl);
+ if (err < 0) {
+ DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Handle the VM clock being started or stopped
+ */
+static void kvm_mips_update_state(void *opaque, int running, RunState state)
+{
+ CPUState *cs = opaque;
+ int ret;
+ uint64_t count_resume;
+
+ /*
+ * If state is already dirty (synced to QEMU) then the KVM timer state is
+ * already saved and can be restored when it is synced back to KVM.
+ */
+ if (!running) {
+ if (!cs->kvm_vcpu_dirty) {
+ ret = kvm_mips_save_count(cs);
+ if (ret < 0) {
+ fprintf(stderr, "Failed saving count\n");
+ }
+ }
+ } else {
+ /* Set clock restore time to now */
+ count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME,
+ &count_resume);
+ if (ret < 0) {
+ fprintf(stderr, "Failed setting COUNT_RESUME\n");
+ return;
+ }
+
+ if (!cs->kvm_vcpu_dirty) {
+ ret = kvm_mips_restore_count(cs);
+ if (ret < 0) {
+ fprintf(stderr, "Failed restoring count\n");
+ }
+ }
+ }
+}
+
+static int kvm_mips_put_fpu_registers(CPUState *cs, int level)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+ unsigned int i;
+
+ /* Only put FPU state if we're emulating a CPU with an FPU */
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ /* FPU Control Registers */
+ if (level == KVM_PUT_FULL_STATE) {
+ err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
+ &env->active_fpu.fcr0);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+ err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
+ &env->active_fpu.fcr31);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /*
+ * FPU register state is a subset of MSA vector state, so don't put FPU
+ * registers if we're emulating a CPU with MSA.
+ */
+ if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
+ /* Floating point registers */
+ for (i = 0; i < 32; ++i) {
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
+ &env->active_fpu.fpr[i].d);
+ } else {
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
+ &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
+ }
+ if (err < 0) {
+ DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+ }
+
+ /* Only put MSA state if we're emulating a CPU with MSA */
+ if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
+ /* MSA Control Registers */
+ if (level == KVM_PUT_FULL_STATE) {
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR,
+ &env->msair);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
+ &env->active_tc.msacsr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* Vector registers (includes FP registers) */
+ for (i = 0; i < 32; ++i) {
+ /* Big endian MSA not supported by QEMU yet anyway */
+ err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
+ env->active_fpu.fpr[i].wr.d);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int kvm_mips_get_fpu_registers(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+ unsigned int i;
+
+ /* Only get FPU state if we're emulating a CPU with an FPU */
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ /* FPU Control Registers */
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR,
+ &env->active_fpu.fcr0);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR,
+ &env->active_fpu.fcr31);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err);
+ ret = err;
+ } else {
+ restore_fp_status(env);
+ }
+
+ /*
+ * FPU register state is a subset of MSA vector state, so don't save FPU
+ * registers if we're emulating a CPU with MSA.
+ */
+ if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) {
+ /* Floating point registers */
+ for (i = 0; i < 32; ++i) {
+ if (env->CP0_Status & (1 << CP0St_FR)) {
+ err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i),
+ &env->active_fpu.fpr[i].d);
+ } else {
+ err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i),
+ &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]);
+ }
+ if (err < 0) {
+ DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+ }
+
+ /* Only get MSA state if we're emulating a CPU with MSA */
+ if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
+ /* MSA Control Registers */
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR,
+ &env->msair);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR,
+ &env->active_tc.msacsr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err);
+ ret = err;
+ } else {
+ restore_msa_fp_status(env);
+ }
+
+ /* Vector registers (includes FP registers) */
+ for (i = 0; i < 32; ++i) {
+ /* Big endian MSA not supported by QEMU yet anyway */
+ err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i),
+ env->active_fpu.fpr[i].wr.d);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err);
+ ret = err;
+ }
+ }
+ }
+
+ return ret;
+}
+
+
+static int kvm_mips_put_cp0_registers(CPUState *cs, int level)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+
+ (void)level;
+
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
+ &env->CP0_Context);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
+ &env->active_tc.CP0_UserLocal);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
+ &env->CP0_PageMask);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
+ &env->CP0_BadVAddr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* If VM clock stopped then state will be restored when it is restarted */
+ if (runstate_is_running()) {
+ err = kvm_mips_restore_count(cs);
+ if (err < 0) {
+ ret = err;
+ }
+ }
+
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
+ &env->CP0_EntryHi);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
+ &env->CP0_Compare);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG,
+ &env->CP0_Config0,
+ KVM_REG_MIPS_CP0_CONFIG_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1,
+ &env->CP0_Config1,
+ KVM_REG_MIPS_CP0_CONFIG1_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2,
+ &env->CP0_Config2,
+ KVM_REG_MIPS_CP0_CONFIG2_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3,
+ &env->CP0_Config3,
+ KVM_REG_MIPS_CP0_CONFIG3_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4,
+ &env->CP0_Config4,
+ KVM_REG_MIPS_CP0_CONFIG4_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5,
+ &env->CP0_Config5,
+ KVM_REG_MIPS_CP0_CONFIG5_MASK);
+ if (err < 0) {
+ DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
+ &env->CP0_ErrorEPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+static int kvm_mips_get_cp0_registers(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int err, ret = 0;
+
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT,
+ &env->CP0_Context);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL,
+ &env->active_tc.CP0_UserLocal);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK,
+ &env->CP0_PageMask);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR,
+ &env->CP0_BadVAddr);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI,
+ &env->CP0_EntryHi);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE,
+ &env->CP0_Compare);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ /* If VM clock stopped then state was already saved when it was stopped */
+ if (runstate_is_running()) {
+ err = kvm_mips_save_count(cs);
+ if (err < 0) {
+ ret = err;
+ }
+ }
+
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err);
+ ret = err;
+ }
+ err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC,
+ &env->CP0_ErrorEPC);
+ if (err < 0) {
+ DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err);
+ ret = err;
+ }
+
+ return ret;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret;
+ int i;
+
+ /* Set the registers based on QEMU's view of things */
+ for (i = 0; i < 32; i++) {
+ regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
+ }
+
+ regs.hi = (int64_t)(target_long)env->active_tc.HI[0];
+ regs.lo = (int64_t)(target_long)env->active_tc.LO[0];
+ regs.pc = (int64_t)(target_long)env->active_tc.PC;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_mips_put_cp0_registers(cs, level);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = kvm_mips_put_fpu_registers(cs, level);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return ret;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int ret = 0;
+ struct kvm_regs regs;
+ int i;
+
+ /* Get the current register set as KVM seems it */
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ env->active_tc.gpr[i] = regs.gpr[i];
+ }
+
+ env->active_tc.HI[0] = regs.hi;
+ env->active_tc.LO[0] = regs.lo;
+ env->active_tc.PC = regs.pc;
+
+ kvm_mips_get_cp0_registers(cs);
+ kvm_mips_get_fpu_registers(cs);
+
+ return ret;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
diff --git a/target/mips/kvm_mips.h b/target/mips/kvm_mips.h
new file mode 100644
index 0000000000..ae957f37f0
--- /dev/null
+++ b/target/mips/kvm_mips.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012-2014 Imagination Technologies Ltd.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+*/
+
+#ifndef KVM_MIPS_H
+#define KVM_MIPS_H
+
+/**
+ * kvm_mips_reset_vcpu:
+ * @cpu: MIPSCPU
+ *
+ * Called at reset time to set kernel registers to their initial values.
+ */
+void kvm_mips_reset_vcpu(MIPSCPU *cpu);
+
+int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level);
+int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level);
+
+#endif /* KVM_MIPS_H */
diff --git a/target/mips/lmi_helper.c b/target/mips/lmi_helper.c
new file mode 100644
index 0000000000..fb1245b39d
--- /dev/null
+++ b/target/mips/lmi_helper.c
@@ -0,0 +1,745 @@
+/*
+ * Loongson Multimedia Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2011 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/* If the byte ordering doesn't matter, i.e. all columns are treated
+ identically, then this union can be used directly. If byte ordering
+ does matter, we generally ignore dumping to memory. */
+typedef union {
+ uint8_t ub[8];
+ int8_t sb[8];
+ uint16_t uh[4];
+ int16_t sh[4];
+ uint32_t uw[2];
+ int32_t sw[2];
+ uint64_t d;
+} LMIValue;
+
+/* Some byte ordering issues can be mitigated by XORing in the following. */
+#ifdef HOST_WORDS_BIGENDIAN
+# define BYTE_ORDER_XOR(N) N
+#else
+# define BYTE_ORDER_XOR(N) 0
+#endif
+
+#define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x)
+#define SATUB(x) (x > 0xff ? 0xff : x)
+
+#define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x)
+#define SATUH(x) (x > 0xffff ? 0xffff : x)
+
+#define SATSW(x) \
+ (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x)
+#define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x)
+
+uint64_t helper_paddsb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.sb[i] + vt.sb[i];
+ vs.sb[i] = SATSB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddusb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] + vt.ub[i];
+ vs.ub[i] = SATUB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.sh[i] + vt.sh[i];
+ vs.sh[i] = SATSH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddush(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.uh[i] + vt.uh[i];
+ vs.uh[i] = SATUH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ vs.ub[i] += vt.ub[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] += vt.uh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] += vt.uw[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubsb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.sb[i] - vt.sb[i];
+ vs.sb[i] = SATSB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubusb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] - vt.ub[i];
+ vs.ub[i] = SATUB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.sh[i] - vt.sh[i];
+ vs.sh[i] = SATSH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubush(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.uh[i] - vt.uh[i];
+ vs.uh[i] = SATUH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ vs.ub[i] -= vt.ub[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] -= vt.uh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] -= vt.uw[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_pshufh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs;
+ unsigned i;
+
+ vs.d = fs;
+ vd.d = 0;
+ for (i = 0; i < 4; i++, ft >>= 2) {
+ vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host];
+ }
+ return vd.d;
+}
+
+uint64_t helper_packsswh(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ int64_t tmp;
+
+ tmp = (int32_t)(fs >> 0);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 0;
+
+ tmp = (int32_t)(fs >> 32);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 16;
+
+ tmp = (int32_t)(ft >> 0);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 32;
+
+ tmp = (int32_t)(ft >> 32);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 48;
+
+ return fd;
+}
+
+uint64_t helper_packsshb(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = fs >> (i * 16);
+ tmp = SATSB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8);
+ }
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = ft >> (i * 16);
+ tmp = SATSB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
+ }
+
+ return fd;
+}
+
+uint64_t helper_packushb(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = fs >> (i * 16);
+ tmp = SATUB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8);
+ }
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = ft >> (i * 16);
+ tmp = SATUB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
+ }
+
+ return fd;
+}
+
+uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft)
+{
+ return (fs & 0xffffffff) | (ft << 32);
+}
+
+uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft)
+{
+ return (fs >> 32) | (ft & ~0xffffffffull);
+}
+
+uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.uh[0 ^ host] = vs.uh[0 ^ host];
+ vd.uh[1 ^ host] = vt.uh[0 ^ host];
+ vd.uh[2 ^ host] = vs.uh[1 ^ host];
+ vd.uh[3 ^ host] = vt.uh[1 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.uh[0 ^ host] = vs.uh[2 ^ host];
+ vd.uh[1 ^ host] = vt.uh[2 ^ host];
+ vd.uh[2 ^ host] = vs.uh[3 ^ host];
+ vd.uh[3 ^ host] = vt.uh[3 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(7);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.ub[0 ^ host] = vs.ub[0 ^ host];
+ vd.ub[1 ^ host] = vt.ub[0 ^ host];
+ vd.ub[2 ^ host] = vs.ub[1 ^ host];
+ vd.ub[3 ^ host] = vt.ub[1 ^ host];
+ vd.ub[4 ^ host] = vs.ub[2 ^ host];
+ vd.ub[5 ^ host] = vt.ub[2 ^ host];
+ vd.ub[6 ^ host] = vs.ub[3 ^ host];
+ vd.ub[7 ^ host] = vt.ub[3 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(7);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.ub[0 ^ host] = vs.ub[4 ^ host];
+ vd.ub[1 ^ host] = vt.ub[4 ^ host];
+ vd.ub[2 ^ host] = vs.ub[5 ^ host];
+ vd.ub[3 ^ host] = vt.ub[5 ^ host];
+ vd.ub[4 ^ host] = vs.ub[6 ^ host];
+ vd.ub[5 ^ host] = vt.ub[6 ^ host];
+ vd.ub[6 ^ host] = vs.ub[7 ^ host];
+ vd.ub[7 ^ host] = vt.ub[7 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_pavgh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pavgb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pminsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaxub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pminub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; i++) {
+ vs.uw[i] = -(vs.uw[i] == vt.uw[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; i++) {
+ vs.uw[i] = -(vs.uw[i] > vt.uw[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = -(vs.uh[i] == vt.uh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = -(vs.uh[i] > vt.uh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = -(vs.ub[i] == vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = -(vs.ub[i] > vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psllw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] <<= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrlw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psraw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ ft = 31;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.sw[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psllh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] <<= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrlh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrah(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ ft = 15;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.sh[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmullh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.sh[i] *= vt.sh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmulhh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int32_t r = vs.sh[i] * vt.sh[i];
+ vs.sh[i] = r >> 16;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ uint32_t r = vs.uh[i] * vt.uh[i];
+ vs.uh[i] = r >> 16;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vs, vt;
+ uint32_t p0, p1;
+
+ vs.d = fs;
+ vt.d = ft;
+ p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host];
+ p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host];
+ p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host];
+ p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host];
+
+ return ((uint64_t)p1 << 32) | p0;
+}
+
+uint64_t helper_pasubub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] - vt.ub[i];
+ vs.ub[i] = (r < 0 ? -r : r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_biadd(uint64_t fs)
+{
+ unsigned i, fd;
+
+ for (i = fd = 0; i < 8; ++i) {
+ fd += (fs >> (i * 8)) & 0xff;
+ }
+ return fd & 0xffff;
+}
+
+uint64_t helper_pmovmskb(uint64_t fs)
+{
+ unsigned fd = 0;
+
+ fd |= ((fs >> 7) & 1) << 0;
+ fd |= ((fs >> 15) & 1) << 1;
+ fd |= ((fs >> 23) & 1) << 2;
+ fd |= ((fs >> 31) & 1) << 3;
+ fd |= ((fs >> 39) & 1) << 4;
+ fd |= ((fs >> 47) & 1) << 5;
+ fd |= ((fs >> 55) & 1) << 6;
+ fd |= ((fs >> 63) & 1) << 7;
+
+ return fd & 0xff;
+}
diff --git a/target/mips/machine.c b/target/mips/machine.c
new file mode 100644
index 0000000000..d20d948457
--- /dev/null
+++ b/target/mips/machine.c
@@ -0,0 +1,302 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "migration/cpu.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ MIPSCPU *cpu = opaque;
+ CPUMIPSState *env = &cpu->env;
+
+ restore_fp_status(env);
+ restore_msa_fp_status(env);
+ compute_hflags(env);
+ restore_pamask(env);
+
+ return 0;
+}
+
+/* FPU state */
+
+static int get_fpr(QEMUFile *f, void *pv, size_t size)
+{
+ int i;
+ fpr_t *v = pv;
+ /* Restore entire MSA vector register */
+ for (i = 0; i < MSA_WRLEN/64; i++) {
+ qemu_get_sbe64s(f, &v->wr.d[i]);
+ }
+ return 0;
+}
+
+static void put_fpr(QEMUFile *f, void *pv, size_t size)
+{
+ int i;
+ fpr_t *v = pv;
+ /* Save entire MSA vector register */
+ for (i = 0; i < MSA_WRLEN/64; i++) {
+ qemu_put_sbe64s(f, &v->wr.d[i]);
+ }
+}
+
+const VMStateInfo vmstate_info_fpr = {
+ .name = "fpr",
+ .get = get_fpr,
+ .put = put_fpr,
+};
+
+#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t)
+
+#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
+ VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
+
+static VMStateField vmstate_fpu_fields[] = {
+ VMSTATE_FPR_ARRAY(fpr, CPUMIPSFPUContext, 32),
+ VMSTATE_UINT32(fcr0, CPUMIPSFPUContext),
+ VMSTATE_UINT32(fcr31, CPUMIPSFPUContext),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+const VMStateDescription vmstate_inactive_fpu = {
+ .name = "cpu/inactive_fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_fpu_fields
+};
+
+/* TC state */
+
+static VMStateField vmstate_tc_fields[] = {
+ VMSTATE_UINTTL_ARRAY(gpr, TCState, 32),
+ VMSTATE_UINTTL(PC, TCState),
+ VMSTATE_UINTTL_ARRAY(HI, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL_ARRAY(LO, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL_ARRAY(ACX, TCState, MIPS_DSP_ACC),
+ VMSTATE_UINTTL(DSPControl, TCState),
+ VMSTATE_INT32(CP0_TCStatus, TCState),
+ VMSTATE_INT32(CP0_TCBind, TCState),
+ VMSTATE_UINTTL(CP0_TCHalt, TCState),
+ VMSTATE_UINTTL(CP0_TCContext, TCState),
+ VMSTATE_UINTTL(CP0_TCSchedule, TCState),
+ VMSTATE_UINTTL(CP0_TCScheFBack, TCState),
+ VMSTATE_INT32(CP0_Debug_tcstatus, TCState),
+ VMSTATE_UINTTL(CP0_UserLocal, TCState),
+ VMSTATE_INT32(msacsr, TCState),
+ VMSTATE_END_OF_LIST()
+};
+
+const VMStateDescription vmstate_tc = {
+ .name = "cpu/tc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_tc_fields
+};
+
+const VMStateDescription vmstate_inactive_tc = {
+ .name = "cpu/inactive_tc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = vmstate_tc_fields
+};
+
+/* MVP state */
+
+const VMStateDescription vmstate_mvp = {
+ .name = "cpu/mvp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(CP0_MVPControl, CPUMIPSMVPContext),
+ VMSTATE_INT32(CP0_MVPConf0, CPUMIPSMVPContext),
+ VMSTATE_INT32(CP0_MVPConf1, CPUMIPSMVPContext),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* TLB state */
+
+static int get_tlb(QEMUFile *f, void *pv, size_t size)
+{
+ r4k_tlb_t *v = pv;
+ uint16_t flags;
+
+ qemu_get_betls(f, &v->VPN);
+ qemu_get_be32s(f, &v->PageMask);
+ qemu_get_be16s(f, &v->ASID);
+ qemu_get_be16s(f, &flags);
+ v->G = (flags >> 10) & 1;
+ v->C0 = (flags >> 7) & 3;
+ v->C1 = (flags >> 4) & 3;
+ v->V0 = (flags >> 3) & 1;
+ v->V1 = (flags >> 2) & 1;
+ v->D0 = (flags >> 1) & 1;
+ v->D1 = (flags >> 0) & 1;
+ v->EHINV = (flags >> 15) & 1;
+ v->RI1 = (flags >> 14) & 1;
+ v->RI0 = (flags >> 13) & 1;
+ v->XI1 = (flags >> 12) & 1;
+ v->XI0 = (flags >> 11) & 1;
+ qemu_get_be64s(f, &v->PFN[0]);
+ qemu_get_be64s(f, &v->PFN[1]);
+
+ return 0;
+}
+
+static void put_tlb(QEMUFile *f, void *pv, size_t size)
+{
+ r4k_tlb_t *v = pv;
+
+ uint16_t asid = v->ASID;
+ uint16_t flags = ((v->EHINV << 15) |
+ (v->RI1 << 14) |
+ (v->RI0 << 13) |
+ (v->XI1 << 12) |
+ (v->XI0 << 11) |
+ (v->G << 10) |
+ (v->C0 << 7) |
+ (v->C1 << 4) |
+ (v->V0 << 3) |
+ (v->V1 << 2) |
+ (v->D0 << 1) |
+ (v->D1 << 0));
+
+ qemu_put_betls(f, &v->VPN);
+ qemu_put_be32s(f, &v->PageMask);
+ qemu_put_be16s(f, &asid);
+ qemu_put_be16s(f, &flags);
+ qemu_put_be64s(f, &v->PFN[0]);
+ qemu_put_be64s(f, &v->PFN[1]);
+}
+
+const VMStateInfo vmstate_info_tlb = {
+ .name = "tlb_entry",
+ .get = get_tlb,
+ .put = put_tlb,
+};
+
+#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, r4k_tlb_t)
+
+#define VMSTATE_TLB_ARRAY(_f, _s, _n) \
+ VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0)
+
+const VMStateDescription vmstate_tlb = {
+ .name = "cpu/tlb",
+ .version_id = 2,
+ .minimum_version_id = 2,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(nb_tlb, CPUMIPSTLBContext),
+ VMSTATE_UINT32(tlb_in_use, CPUMIPSTLBContext),
+ VMSTATE_TLB_ARRAY(mmu.r4k.tlb, CPUMIPSTLBContext, MIPS_TLB_MAX),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+/* MIPS CPU state */
+
+const VMStateDescription vmstate_mips_cpu = {
+ .name = "cpu",
+ .version_id = 8,
+ .minimum_version_id = 8,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ /* Active TC */
+ VMSTATE_STRUCT(env.active_tc, MIPSCPU, 1, vmstate_tc, TCState),
+
+ /* Active FPU */
+ VMSTATE_STRUCT(env.active_fpu, MIPSCPU, 1, vmstate_fpu,
+ CPUMIPSFPUContext),
+
+ /* MVP */
+ VMSTATE_STRUCT_POINTER(env.mvp, MIPSCPU, vmstate_mvp,
+ CPUMIPSMVPContext),
+
+ /* TLB */
+ VMSTATE_STRUCT_POINTER(env.tlb, MIPSCPU, vmstate_tlb,
+ CPUMIPSTLBContext),
+
+ /* CPU metastate */
+ VMSTATE_UINT32(env.current_tc, MIPSCPU),
+ VMSTATE_UINT32(env.current_fpu, MIPSCPU),
+ VMSTATE_INT32(env.error_code, MIPSCPU),
+ VMSTATE_UINTTL(env.btarget, MIPSCPU),
+ VMSTATE_UINTTL(env.bcond, MIPSCPU),
+
+ /* Remaining CP0 registers */
+ VMSTATE_INT32(env.CP0_Index, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Random, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEControl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEConf0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEConf1, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_YQMask, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_VPESchedule, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_VPEScheFBack, MIPSCPU),
+ VMSTATE_INT32(env.CP0_VPEOpt, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_EntryLo0, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_EntryLo1, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_Context, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PageMask, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PageGrain, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Wired, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf2, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf3, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSConf4, MIPSCPU),
+ VMSTATE_INT32(env.CP0_HWREna, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_BadVAddr, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstr, MIPSCPU),
+ VMSTATE_UINT32(env.CP0_BadInstrP, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Count, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_EntryHi, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Compare, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Status, MIPSCPU),
+ VMSTATE_INT32(env.CP0_IntCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSCtl, MIPSCPU),
+ VMSTATE_INT32(env.CP0_SRSMap, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Cause, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_EPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_PRid, MIPSCPU),
+ VMSTATE_INT32(env.CP0_EBase, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config0, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config1, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config2, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config3, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config6, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Config7, MIPSCPU),
+ VMSTATE_UINT64_ARRAY(env.CP0_MAAR, MIPSCPU, MIPS_MAAR_MAX),
+ VMSTATE_INT32(env.CP0_MAARI, MIPSCPU),
+ VMSTATE_UINT64(env.lladdr, MIPSCPU),
+ VMSTATE_UINTTL_ARRAY(env.CP0_WatchLo, MIPSCPU, 8),
+ VMSTATE_INT32_ARRAY(env.CP0_WatchHi, MIPSCPU, 8),
+ VMSTATE_UINTTL(env.CP0_XContext, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Framemask, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Debug, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_DEPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_Performance0, MIPSCPU),
+ VMSTATE_UINT64(env.CP0_TagLo, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DataLo, MIPSCPU),
+ VMSTATE_INT32(env.CP0_TagHi, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DataHi, MIPSCPU),
+ VMSTATE_UINTTL(env.CP0_ErrorEPC, MIPSCPU),
+ VMSTATE_INT32(env.CP0_DESAVE, MIPSCPU),
+ VMSTATE_UINTTL_ARRAY(env.CP0_KScratch, MIPSCPU, MIPS_KSCRATCH_NUM),
+
+ /* Inactive TC */
+ VMSTATE_STRUCT_ARRAY(env.tcs, MIPSCPU, MIPS_SHADOW_SET_MAX, 1,
+ vmstate_inactive_tc, TCState),
+ VMSTATE_STRUCT_ARRAY(env.fpus, MIPSCPU, MIPS_FPU_MAX, 1,
+ vmstate_inactive_fpu, CPUMIPSFPUContext),
+
+ VMSTATE_END_OF_LIST()
+ },
+};
diff --git a/target/mips/mips-defs.h b/target/mips/mips-defs.h
new file mode 100644
index 0000000000..047554ee45
--- /dev/null
+++ b/target/mips/mips-defs.h
@@ -0,0 +1,91 @@
+#ifndef QEMU_MIPS_DEFS_H
+#define QEMU_MIPS_DEFS_H
+
+/* If we want to use host float regs... */
+//#define USE_HOST_FLOAT_REGS
+
+/* Real pages are variable size... */
+#define TARGET_PAGE_BITS 12
+#define MIPS_TLB_MAX 128
+
+#if defined(TARGET_MIPS64)
+#define TARGET_LONG_BITS 64
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
+#define TARGET_VIRT_ADDR_SPACE_BITS 48
+#else
+#define TARGET_LONG_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 40
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#endif
+
+/* Masks used to mark instructions to indicate which ISA level they
+ were introduced in. */
+#define ISA_MIPS1 0x00000001
+#define ISA_MIPS2 0x00000002
+#define ISA_MIPS3 0x00000004
+#define ISA_MIPS4 0x00000008
+#define ISA_MIPS5 0x00000010
+#define ISA_MIPS32 0x00000020
+#define ISA_MIPS32R2 0x00000040
+#define ISA_MIPS64 0x00000080
+#define ISA_MIPS64R2 0x00000100
+#define ISA_MIPS32R3 0x00000200
+#define ISA_MIPS64R3 0x00000400
+#define ISA_MIPS32R5 0x00000800
+#define ISA_MIPS64R5 0x00001000
+#define ISA_MIPS32R6 0x00002000
+#define ISA_MIPS64R6 0x00004000
+
+/* MIPS ASEs. */
+#define ASE_MIPS16 0x00010000
+#define ASE_MIPS3D 0x00020000
+#define ASE_MDMX 0x00040000
+#define ASE_DSP 0x00080000
+#define ASE_DSPR2 0x00100000
+#define ASE_MT 0x00200000
+#define ASE_SMARTMIPS 0x00400000
+#define ASE_MICROMIPS 0x00800000
+#define ASE_MSA 0x01000000
+
+/* Chip specific instructions. */
+#define INSN_LOONGSON2E 0x20000000
+#define INSN_LOONGSON2F 0x40000000
+#define INSN_VR54XX 0x80000000
+
+/* MIPS CPU defines. */
+#define CPU_MIPS1 (ISA_MIPS1)
+#define CPU_MIPS2 (CPU_MIPS1 | ISA_MIPS2)
+#define CPU_MIPS3 (CPU_MIPS2 | ISA_MIPS3)
+#define CPU_MIPS4 (CPU_MIPS3 | ISA_MIPS4)
+#define CPU_VR54XX (CPU_MIPS4 | INSN_VR54XX)
+#define CPU_LOONGSON2E (CPU_MIPS3 | INSN_LOONGSON2E)
+#define CPU_LOONGSON2F (CPU_MIPS3 | INSN_LOONGSON2F)
+
+#define CPU_MIPS5 (CPU_MIPS4 | ISA_MIPS5)
+
+/* MIPS Technologies "Release 1" */
+#define CPU_MIPS32 (CPU_MIPS2 | ISA_MIPS32)
+#define CPU_MIPS64 (CPU_MIPS5 | CPU_MIPS32 | ISA_MIPS64)
+
+/* MIPS Technologies "Release 2" */
+#define CPU_MIPS32R2 (CPU_MIPS32 | ISA_MIPS32R2)
+#define CPU_MIPS64R2 (CPU_MIPS64 | CPU_MIPS32R2 | ISA_MIPS64R2)
+
+/* MIPS Technologies "Release 3" */
+#define CPU_MIPS32R3 (CPU_MIPS32R2 | ISA_MIPS32R3)
+#define CPU_MIPS64R3 (CPU_MIPS64R2 | CPU_MIPS32R3 | ISA_MIPS64R3)
+
+/* MIPS Technologies "Release 5" */
+#define CPU_MIPS32R5 (CPU_MIPS32R3 | ISA_MIPS32R5)
+#define CPU_MIPS64R5 (CPU_MIPS64R3 | CPU_MIPS32R5 | ISA_MIPS64R5)
+
+/* MIPS Technologies "Release 6" */
+#define CPU_MIPS32R6 (CPU_MIPS32R5 | ISA_MIPS32R6)
+#define CPU_MIPS64R6 (CPU_MIPS64R5 | CPU_MIPS32R6 | ISA_MIPS64R6)
+
+/* Strictly follow the architecture standard:
+ - Disallow "special" instruction handling for PMON/SPIM.
+ Note that we still maintain Count/Compare to match the host clock. */
+//#define MIPS_STRICT_STANDARD 1
+
+#endif /* QEMU_MIPS_DEFS_H */
diff --git a/target/mips/mips-semi.c b/target/mips/mips-semi.c
new file mode 100644
index 0000000000..a7aefbaefc
--- /dev/null
+++ b/target/mips/mips-semi.c
@@ -0,0 +1,374 @@
+/*
+ * Unified Hosting Interface syscalls.
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "exec/helper-proto.h"
+#include "exec/softmmu-semi.h"
+#include "exec/semihost.h"
+
+typedef enum UHIOp {
+ UHI_exit = 1,
+ UHI_open = 2,
+ UHI_close = 3,
+ UHI_read = 4,
+ UHI_write = 5,
+ UHI_lseek = 6,
+ UHI_unlink = 7,
+ UHI_fstat = 8,
+ UHI_argc = 9,
+ UHI_argnlen = 10,
+ UHI_argn = 11,
+ UHI_plog = 13,
+ UHI_assert = 14,
+ UHI_pread = 19,
+ UHI_pwrite = 20,
+ UHI_link = 22
+} UHIOp;
+
+typedef struct UHIStat {
+ int16_t uhi_st_dev;
+ uint16_t uhi_st_ino;
+ uint32_t uhi_st_mode;
+ uint16_t uhi_st_nlink;
+ uint16_t uhi_st_uid;
+ uint16_t uhi_st_gid;
+ int16_t uhi_st_rdev;
+ uint64_t uhi_st_size;
+ uint64_t uhi_st_atime;
+ uint64_t uhi_st_spare1;
+ uint64_t uhi_st_mtime;
+ uint64_t uhi_st_spare2;
+ uint64_t uhi_st_ctime;
+ uint64_t uhi_st_spare3;
+ uint64_t uhi_st_blksize;
+ uint64_t uhi_st_blocks;
+ uint64_t uhi_st_spare4[2];
+} UHIStat;
+
+enum UHIOpenFlags {
+ UHIOpen_RDONLY = 0x0,
+ UHIOpen_WRONLY = 0x1,
+ UHIOpen_RDWR = 0x2,
+ UHIOpen_APPEND = 0x8,
+ UHIOpen_CREAT = 0x200,
+ UHIOpen_TRUNC = 0x400,
+ UHIOpen_EXCL = 0x800
+};
+
+/* Errno values taken from asm-mips/errno.h */
+static uint16_t host_to_mips_errno[] = {
+ [ENAMETOOLONG] = 78,
+#ifdef EOVERFLOW
+ [EOVERFLOW] = 79,
+#endif
+#ifdef ELOOP
+ [ELOOP] = 90,
+#endif
+};
+
+static int errno_mips(int err)
+{
+ if (err < 0 || err >= ARRAY_SIZE(host_to_mips_errno)) {
+ return EINVAL;
+ } else if (host_to_mips_errno[err]) {
+ return host_to_mips_errno[err];
+ } else {
+ return err;
+ }
+}
+
+static int copy_stat_to_target(CPUMIPSState *env, const struct stat *src,
+ target_ulong vaddr)
+{
+ hwaddr len = sizeof(struct UHIStat);
+ UHIStat *dst = lock_user(VERIFY_WRITE, vaddr, len, 0);
+ if (!dst) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ dst->uhi_st_dev = tswap16(src->st_dev);
+ dst->uhi_st_ino = tswap16(src->st_ino);
+ dst->uhi_st_mode = tswap32(src->st_mode);
+ dst->uhi_st_nlink = tswap16(src->st_nlink);
+ dst->uhi_st_uid = tswap16(src->st_uid);
+ dst->uhi_st_gid = tswap16(src->st_gid);
+ dst->uhi_st_rdev = tswap16(src->st_rdev);
+ dst->uhi_st_size = tswap64(src->st_size);
+ dst->uhi_st_atime = tswap64(src->st_atime);
+ dst->uhi_st_mtime = tswap64(src->st_mtime);
+ dst->uhi_st_ctime = tswap64(src->st_ctime);
+#ifdef _WIN32
+ dst->uhi_st_blksize = 0;
+ dst->uhi_st_blocks = 0;
+#else
+ dst->uhi_st_blksize = tswap64(src->st_blksize);
+ dst->uhi_st_blocks = tswap64(src->st_blocks);
+#endif
+ unlock_user(dst, vaddr, len);
+ return 0;
+}
+
+static int get_open_flags(target_ulong target_flags)
+{
+ int open_flags = 0;
+
+ if (target_flags & UHIOpen_RDWR) {
+ open_flags |= O_RDWR;
+ } else if (target_flags & UHIOpen_WRONLY) {
+ open_flags |= O_WRONLY;
+ } else {
+ open_flags |= O_RDONLY;
+ }
+
+ open_flags |= (target_flags & UHIOpen_APPEND) ? O_APPEND : 0;
+ open_flags |= (target_flags & UHIOpen_CREAT) ? O_CREAT : 0;
+ open_flags |= (target_flags & UHIOpen_TRUNC) ? O_TRUNC : 0;
+ open_flags |= (target_flags & UHIOpen_EXCL) ? O_EXCL : 0;
+
+ return open_flags;
+}
+
+static int write_to_file(CPUMIPSState *env, target_ulong fd, target_ulong vaddr,
+ target_ulong len, target_ulong offset)
+{
+ int num_of_bytes;
+ void *dst = lock_user(VERIFY_READ, vaddr, len, 1);
+ if (!dst) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (offset) {
+#ifdef _WIN32
+ num_of_bytes = 0;
+#else
+ num_of_bytes = pwrite(fd, dst, len, offset);
+#endif
+ } else {
+ num_of_bytes = write(fd, dst, len);
+ }
+
+ unlock_user(dst, vaddr, 0);
+ return num_of_bytes;
+}
+
+static int read_from_file(CPUMIPSState *env, target_ulong fd,
+ target_ulong vaddr, target_ulong len,
+ target_ulong offset)
+{
+ int num_of_bytes;
+ void *dst = lock_user(VERIFY_WRITE, vaddr, len, 0);
+ if (!dst) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (offset) {
+#ifdef _WIN32
+ num_of_bytes = 0;
+#else
+ num_of_bytes = pread(fd, dst, len, offset);
+#endif
+ } else {
+ num_of_bytes = read(fd, dst, len);
+ }
+
+ unlock_user(dst, vaddr, len);
+ return num_of_bytes;
+}
+
+static int copy_argn_to_target(CPUMIPSState *env, int arg_num,
+ target_ulong vaddr)
+{
+ int strsize = strlen(semihosting_get_arg(arg_num)) + 1;
+ char *dst = lock_user(VERIFY_WRITE, vaddr, strsize, 0);
+ if (!dst) {
+ return -1;
+ }
+
+ strcpy(dst, semihosting_get_arg(arg_num));
+
+ unlock_user(dst, vaddr, strsize);
+ return 0;
+}
+
+#define GET_TARGET_STRING(p, addr) \
+ do { \
+ p = lock_user_string(addr); \
+ if (!p) { \
+ gpr[2] = -1; \
+ gpr[3] = EFAULT; \
+ goto uhi_done; \
+ } \
+ } while (0)
+
+#define GET_TARGET_STRINGS_2(p, addr, p2, addr2) \
+ do { \
+ p = lock_user_string(addr); \
+ if (!p) { \
+ gpr[2] = -1; \
+ gpr[3] = EFAULT; \
+ goto uhi_done; \
+ } \
+ p2 = lock_user_string(addr2); \
+ if (!p2) { \
+ unlock_user(p, addr, 0); \
+ gpr[2] = -1; \
+ gpr[3] = EFAULT; \
+ goto uhi_done; \
+ } \
+ } while (0)
+
+#define FREE_TARGET_STRING(p, gpr) \
+ do { \
+ unlock_user(p, gpr, 0); \
+ } while (0)
+
+void helper_do_semihosting(CPUMIPSState *env)
+{
+ target_ulong *gpr = env->active_tc.gpr;
+ const UHIOp op = gpr[25];
+ char *p, *p2;
+
+ switch (op) {
+ case UHI_exit:
+ qemu_log("UHI(%d): exit(%d)\n", op, (int)gpr[4]);
+ exit(gpr[4]);
+ case UHI_open:
+ GET_TARGET_STRING(p, gpr[4]);
+ if (!strcmp("/dev/stdin", p)) {
+ gpr[2] = 0;
+ } else if (!strcmp("/dev/stdout", p)) {
+ gpr[2] = 1;
+ } else if (!strcmp("/dev/stderr", p)) {
+ gpr[2] = 2;
+ } else {
+ gpr[2] = open(p, get_open_flags(gpr[5]), gpr[6]);
+ gpr[3] = errno_mips(errno);
+ }
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+ case UHI_close:
+ if (gpr[4] < 3) {
+ /* ignore closing stdin/stdout/stderr */
+ gpr[2] = 0;
+ goto uhi_done;
+ }
+ gpr[2] = close(gpr[4]);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_read:
+ gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], 0);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_write:
+ gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], 0);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_lseek:
+ gpr[2] = lseek(gpr[4], gpr[5], gpr[6]);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_unlink:
+ GET_TARGET_STRING(p, gpr[4]);
+ gpr[2] = remove(p);
+ gpr[3] = errno_mips(errno);
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+ case UHI_fstat:
+ {
+ struct stat sbuf;
+ memset(&sbuf, 0, sizeof(sbuf));
+ gpr[2] = fstat(gpr[4], &sbuf);
+ gpr[3] = errno_mips(errno);
+ if (gpr[2]) {
+ goto uhi_done;
+ }
+ gpr[2] = copy_stat_to_target(env, &sbuf, gpr[5]);
+ gpr[3] = errno_mips(errno);
+ }
+ break;
+ case UHI_argc:
+ gpr[2] = semihosting_get_argc();
+ break;
+ case UHI_argnlen:
+ if (gpr[4] >= semihosting_get_argc()) {
+ gpr[2] = -1;
+ goto uhi_done;
+ }
+ gpr[2] = strlen(semihosting_get_arg(gpr[4]));
+ break;
+ case UHI_argn:
+ if (gpr[4] >= semihosting_get_argc()) {
+ gpr[2] = -1;
+ goto uhi_done;
+ }
+ gpr[2] = copy_argn_to_target(env, gpr[4], gpr[5]);
+ break;
+ case UHI_plog:
+ GET_TARGET_STRING(p, gpr[4]);
+ p2 = strstr(p, "%d");
+ if (p2) {
+ int char_num = p2 - p;
+ char *buf = g_malloc(char_num + 1);
+ strncpy(buf, p, char_num);
+ buf[char_num] = '\0';
+ gpr[2] = printf("%s%d%s", buf, (int)gpr[5], p2 + 2);
+ g_free(buf);
+ } else {
+ gpr[2] = printf("%s", p);
+ }
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+ case UHI_assert:
+ GET_TARGET_STRINGS_2(p, gpr[4], p2, gpr[5]);
+ printf("assertion '");
+ printf("\"%s\"", p);
+ printf("': file \"%s\", line %d\n", p2, (int)gpr[6]);
+ FREE_TARGET_STRING(p2, gpr[5]);
+ FREE_TARGET_STRING(p, gpr[4]);
+ abort();
+ break;
+ case UHI_pread:
+ gpr[2] = read_from_file(env, gpr[4], gpr[5], gpr[6], gpr[7]);
+ gpr[3] = errno_mips(errno);
+ break;
+ case UHI_pwrite:
+ gpr[2] = write_to_file(env, gpr[4], gpr[5], gpr[6], gpr[7]);
+ gpr[3] = errno_mips(errno);
+ break;
+#ifndef _WIN32
+ case UHI_link:
+ GET_TARGET_STRINGS_2(p, gpr[4], p2, gpr[5]);
+ gpr[2] = link(p, p2);
+ gpr[3] = errno_mips(errno);
+ FREE_TARGET_STRING(p2, gpr[5]);
+ FREE_TARGET_STRING(p, gpr[4]);
+ break;
+#endif
+ default:
+ fprintf(stderr, "Unknown UHI operation %d\n", op);
+ abort();
+ }
+uhi_done:
+ return;
+}
diff --git a/target/mips/msa_helper.c b/target/mips/msa_helper.c
new file mode 100644
index 0000000000..1fdb0d9792
--- /dev/null
+++ b/target/mips/msa_helper.c
@@ -0,0 +1,3453 @@
+/*
+ * MIPS SIMD Architecture Module Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2014 Imagination Technologies
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+/* Data format min and max values */
+#define DF_BITS(df) (1 << ((df) + 3))
+
+#define DF_MAX_INT(df) (int64_t)((1LL << (DF_BITS(df) - 1)) - 1)
+#define M_MAX_INT(m) (int64_t)((1LL << ((m) - 1)) - 1)
+
+#define DF_MIN_INT(df) (int64_t)(-(1LL << (DF_BITS(df) - 1)))
+#define M_MIN_INT(m) (int64_t)(-(1LL << ((m) - 1)))
+
+#define DF_MAX_UINT(df) (uint64_t)(-1ULL >> (64 - DF_BITS(df)))
+#define M_MAX_UINT(m) (uint64_t)(-1ULL >> (64 - (m)))
+
+#define UNSIGNED(x, df) ((x) & DF_MAX_UINT(df))
+#define SIGNED(x, df) \
+ ((((int64_t)x) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)))
+
+/* Element-by-element access macros */
+#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
+
+static inline void msa_move_v(wr_t *pwd, wr_t *pws)
+{
+ uint32_t i;
+
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = pws->d[i];
+ }
+}
+
+#define MSA_FN_IMM8(FUNC, DEST, OPERATION) \
+void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
+ uint32_t i8) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ DEST = OPERATION; \
+ } \
+}
+
+MSA_FN_IMM8(andi_b, pwd->b[i], pws->b[i] & i8)
+MSA_FN_IMM8(ori_b, pwd->b[i], pws->b[i] | i8)
+MSA_FN_IMM8(nori_b, pwd->b[i], ~(pws->b[i] | i8))
+MSA_FN_IMM8(xori_b, pwd->b[i], pws->b[i] ^ i8)
+
+#define BIT_MOVE_IF_NOT_ZERO(dest, arg1, arg2, df) \
+ UNSIGNED(((dest & (~arg2)) | (arg1 & arg2)), df)
+MSA_FN_IMM8(bmnzi_b, pwd->b[i],
+ BIT_MOVE_IF_NOT_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
+
+#define BIT_MOVE_IF_ZERO(dest, arg1, arg2, df) \
+ UNSIGNED((dest & arg2) | (arg1 & (~arg2)), df)
+MSA_FN_IMM8(bmzi_b, pwd->b[i],
+ BIT_MOVE_IF_ZERO(pwd->b[i], pws->b[i], i8, DF_BYTE))
+
+#define BIT_SELECT(dest, arg1, arg2, df) \
+ UNSIGNED((arg1 & (~dest)) | (arg2 & dest), df)
+MSA_FN_IMM8(bseli_b, pwd->b[i],
+ BIT_SELECT(pwd->b[i], pws->b[i], i8, DF_BYTE))
+
+#undef MSA_FN_IMM8
+
+#define SHF_POS(i, imm) (((i) & 0xfc) + (((imm) >> (2 * ((i) & 0x03))) & 0x03))
+
+void helper_msa_shf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t imm)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwx->b[i] = pws->b[SHF_POS(i, imm)];
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwx->h[i] = pws->h[SHF_POS(i, imm)];
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwx->w[i] = pws->w[SHF_POS(i, imm)];
+ }
+ break;
+ default:
+ assert(0);
+ }
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FN_VECTOR(FUNC, DEST, OPERATION) \
+void helper_msa_ ## FUNC(CPUMIPSState *env, uint32_t wd, uint32_t ws, \
+ uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ uint32_t i; \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ DEST = OPERATION; \
+ } \
+}
+
+MSA_FN_VECTOR(and_v, pwd->d[i], pws->d[i] & pwt->d[i])
+MSA_FN_VECTOR(or_v, pwd->d[i], pws->d[i] | pwt->d[i])
+MSA_FN_VECTOR(nor_v, pwd->d[i], ~(pws->d[i] | pwt->d[i]))
+MSA_FN_VECTOR(xor_v, pwd->d[i], pws->d[i] ^ pwt->d[i])
+MSA_FN_VECTOR(bmnz_v, pwd->d[i],
+ BIT_MOVE_IF_NOT_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
+MSA_FN_VECTOR(bmz_v, pwd->d[i],
+ BIT_MOVE_IF_ZERO(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
+MSA_FN_VECTOR(bsel_v, pwd->d[i],
+ BIT_SELECT(pwd->d[i], pws->d[i], pwt->d[i], DF_DOUBLE))
+#undef BIT_MOVE_IF_NOT_ZERO
+#undef BIT_MOVE_IF_ZERO
+#undef BIT_SELECT
+#undef MSA_FN_VECTOR
+
+static inline int64_t msa_addv_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 + arg2;
+}
+
+static inline int64_t msa_subv_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 - arg2;
+}
+
+static inline int64_t msa_ceq_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 == arg2 ? -1 : 0;
+}
+
+static inline int64_t msa_cle_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 <= arg2 ? -1 : 0;
+}
+
+static inline int64_t msa_cle_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 <= u_arg2 ? -1 : 0;
+}
+
+static inline int64_t msa_clt_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 < arg2 ? -1 : 0;
+}
+
+static inline int64_t msa_clt_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 < u_arg2 ? -1 : 0;
+}
+
+static inline int64_t msa_max_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 > arg2 ? arg1 : arg2;
+}
+
+static inline int64_t msa_max_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 > u_arg2 ? arg1 : arg2;
+}
+
+static inline int64_t msa_min_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 < arg2 ? arg1 : arg2;
+}
+
+static inline int64_t msa_min_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg1 < u_arg2 ? arg1 : arg2;
+}
+
+#define MSA_BINOP_IMM_DF(helper, func) \
+void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws, int32_t u5) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_BINOP_IMM_DF(addvi, addv)
+MSA_BINOP_IMM_DF(subvi, subv)
+MSA_BINOP_IMM_DF(ceqi, ceq)
+MSA_BINOP_IMM_DF(clei_s, cle_s)
+MSA_BINOP_IMM_DF(clei_u, cle_u)
+MSA_BINOP_IMM_DF(clti_s, clt_s)
+MSA_BINOP_IMM_DF(clti_u, clt_u)
+MSA_BINOP_IMM_DF(maxi_s, max_s)
+MSA_BINOP_IMM_DF(maxi_u, max_u)
+MSA_BINOP_IMM_DF(mini_s, min_s)
+MSA_BINOP_IMM_DF(mini_u, min_u)
+#undef MSA_BINOP_IMM_DF
+
+void helper_msa_ldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ int32_t s10)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwd->b[i] = (int8_t)s10;
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwd->h[i] = (int16_t)s10;
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwd->w[i] = (int32_t)s10;
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = (int64_t)s10;
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+/* Data format bit position and unsigned values */
+#define BIT_POSITION(x, df) ((uint64_t)(x) % DF_BITS(df))
+
+static inline int64_t msa_sll_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return arg1 << b_arg2;
+}
+
+static inline int64_t msa_sra_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return arg1 >> b_arg2;
+}
+
+static inline int64_t msa_srl_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return u_arg1 >> b_arg2;
+}
+
+static inline int64_t msa_bclr_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return UNSIGNED(arg1 & (~(1LL << b_arg2)), df);
+}
+
+static inline int64_t msa_bset_df(uint32_t df, int64_t arg1,
+ int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return UNSIGNED(arg1 | (1LL << b_arg2), df);
+}
+
+static inline int64_t msa_bneg_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ return UNSIGNED(arg1 ^ (1LL << b_arg2), df);
+}
+
+static inline int64_t msa_binsl_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_dest = UNSIGNED(dest, df);
+ int32_t sh_d = BIT_POSITION(arg2, df) + 1;
+ int32_t sh_a = DF_BITS(df) - sh_d;
+ if (sh_d == DF_BITS(df)) {
+ return u_arg1;
+ } else {
+ return UNSIGNED(UNSIGNED(u_dest << sh_d, df) >> sh_d, df) |
+ UNSIGNED(UNSIGNED(u_arg1 >> sh_a, df) << sh_a, df);
+ }
+}
+
+static inline int64_t msa_binsr_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_dest = UNSIGNED(dest, df);
+ int32_t sh_d = BIT_POSITION(arg2, df) + 1;
+ int32_t sh_a = DF_BITS(df) - sh_d;
+ if (sh_d == DF_BITS(df)) {
+ return u_arg1;
+ } else {
+ return UNSIGNED(UNSIGNED(u_dest >> sh_d, df) << sh_d, df) |
+ UNSIGNED(UNSIGNED(u_arg1 << sh_a, df) >> sh_a, df);
+ }
+}
+
+static inline int64_t msa_sat_s_df(uint32_t df, int64_t arg, uint32_t m)
+{
+ return arg < M_MIN_INT(m+1) ? M_MIN_INT(m+1) :
+ arg > M_MAX_INT(m+1) ? M_MAX_INT(m+1) :
+ arg;
+}
+
+static inline int64_t msa_sat_u_df(uint32_t df, int64_t arg, uint32_t m)
+{
+ uint64_t u_arg = UNSIGNED(arg, df);
+ return u_arg < M_MAX_UINT(m+1) ? u_arg :
+ M_MAX_UINT(m+1);
+}
+
+static inline int64_t msa_srar_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ if (b_arg2 == 0) {
+ return arg1;
+ } else {
+ int64_t r_bit = (arg1 >> (b_arg2 - 1)) & 1;
+ return (arg1 >> b_arg2) + r_bit;
+ }
+}
+
+static inline int64_t msa_srlr_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ int32_t b_arg2 = BIT_POSITION(arg2, df);
+ if (b_arg2 == 0) {
+ return u_arg1;
+ } else {
+ uint64_t r_bit = (u_arg1 >> (b_arg2 - 1)) & 1;
+ return (u_arg1 >> b_arg2) + r_bit;
+ }
+}
+
+#define MSA_BINOP_IMMU_DF(helper, func) \
+void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
+ uint32_t ws, uint32_t u5) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], u5); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], u5); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], u5); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], u5); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_BINOP_IMMU_DF(slli, sll)
+MSA_BINOP_IMMU_DF(srai, sra)
+MSA_BINOP_IMMU_DF(srli, srl)
+MSA_BINOP_IMMU_DF(bclri, bclr)
+MSA_BINOP_IMMU_DF(bseti, bset)
+MSA_BINOP_IMMU_DF(bnegi, bneg)
+MSA_BINOP_IMMU_DF(sat_s, sat_s)
+MSA_BINOP_IMMU_DF(sat_u, sat_u)
+MSA_BINOP_IMMU_DF(srari, srar)
+MSA_BINOP_IMMU_DF(srlri, srlr)
+#undef MSA_BINOP_IMMU_DF
+
+#define MSA_TEROP_IMMU_DF(helper, func) \
+void helper_msa_ ## helper ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws, uint32_t u5) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
+ u5); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
+ u5); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
+ u5); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
+ u5); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_TEROP_IMMU_DF(binsli, binsl)
+MSA_TEROP_IMMU_DF(binsri, binsr)
+#undef MSA_TEROP_IMMU_DF
+
+static inline int64_t msa_max_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ return abs_arg1 > abs_arg2 ? arg1 : arg2;
+}
+
+static inline int64_t msa_min_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ return abs_arg1 < abs_arg2 ? arg1 : arg2;
+}
+
+static inline int64_t msa_add_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ return abs_arg1 + abs_arg2;
+}
+
+static inline int64_t msa_adds_a_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t max_int = (uint64_t)DF_MAX_INT(df);
+ uint64_t abs_arg1 = arg1 >= 0 ? arg1 : -arg1;
+ uint64_t abs_arg2 = arg2 >= 0 ? arg2 : -arg2;
+ if (abs_arg1 > max_int || abs_arg2 > max_int) {
+ return (int64_t)max_int;
+ } else {
+ return (abs_arg1 < max_int - abs_arg2) ? abs_arg1 + abs_arg2 : max_int;
+ }
+}
+
+static inline int64_t msa_adds_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t max_int = DF_MAX_INT(df);
+ int64_t min_int = DF_MIN_INT(df);
+ if (arg1 < 0) {
+ return (min_int - arg1 < arg2) ? arg1 + arg2 : min_int;
+ } else {
+ return (arg2 < max_int - arg1) ? arg1 + arg2 : max_int;
+ }
+}
+
+static inline uint64_t msa_adds_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t max_uint = DF_MAX_UINT(df);
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return (u_arg1 < max_uint - u_arg2) ? u_arg1 + u_arg2 : max_uint;
+}
+
+static inline int64_t msa_ave_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ /* signed shift */
+ return (arg1 >> 1) + (arg2 >> 1) + (arg1 & arg2 & 1);
+}
+
+static inline uint64_t msa_ave_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ /* unsigned shift */
+ return (u_arg1 >> 1) + (u_arg2 >> 1) + (u_arg1 & u_arg2 & 1);
+}
+
+static inline int64_t msa_aver_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ /* signed shift */
+ return (arg1 >> 1) + (arg2 >> 1) + ((arg1 | arg2) & 1);
+}
+
+static inline uint64_t msa_aver_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ /* unsigned shift */
+ return (u_arg1 >> 1) + (u_arg2 >> 1) + ((u_arg1 | u_arg2) & 1);
+}
+
+static inline int64_t msa_subs_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t max_int = DF_MAX_INT(df);
+ int64_t min_int = DF_MIN_INT(df);
+ if (arg2 > 0) {
+ return (min_int + arg2 < arg1) ? arg1 - arg2 : min_int;
+ } else {
+ return (arg1 < max_int + arg2) ? arg1 - arg2 : max_int;
+ }
+}
+
+static inline int64_t msa_subs_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return (u_arg1 > u_arg2) ? u_arg1 - u_arg2 : 0;
+}
+
+static inline int64_t msa_subsus_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t max_uint = DF_MAX_UINT(df);
+ if (arg2 >= 0) {
+ uint64_t u_arg2 = (uint64_t)arg2;
+ return (u_arg1 > u_arg2) ?
+ (int64_t)(u_arg1 - u_arg2) :
+ 0;
+ } else {
+ uint64_t u_arg2 = (uint64_t)(-arg2);
+ return (u_arg1 < max_uint - u_arg2) ?
+ (int64_t)(u_arg1 + u_arg2) :
+ (int64_t)max_uint;
+ }
+}
+
+static inline int64_t msa_subsuu_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ int64_t max_int = DF_MAX_INT(df);
+ int64_t min_int = DF_MIN_INT(df);
+ if (u_arg1 > u_arg2) {
+ return u_arg1 - u_arg2 < (uint64_t)max_int ?
+ (int64_t)(u_arg1 - u_arg2) :
+ max_int;
+ } else {
+ return u_arg2 - u_arg1 < (uint64_t)(-min_int) ?
+ (int64_t)(u_arg1 - u_arg2) :
+ min_int;
+ }
+}
+
+static inline int64_t msa_asub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ /* signed compare */
+ return (arg1 < arg2) ?
+ (uint64_t)(arg2 - arg1) : (uint64_t)(arg1 - arg2);
+}
+
+static inline uint64_t msa_asub_u_df(uint32_t df, uint64_t arg1, uint64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ /* unsigned compare */
+ return (u_arg1 < u_arg2) ?
+ (uint64_t)(u_arg2 - u_arg1) : (uint64_t)(u_arg1 - u_arg2);
+}
+
+static inline int64_t msa_mulv_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return arg1 * arg2;
+}
+
+static inline int64_t msa_div_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
+ return DF_MIN_INT(df);
+ }
+ return arg2 ? arg1 / arg2 : 0;
+}
+
+static inline int64_t msa_div_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg2 ? u_arg1 / u_arg2 : 0;
+}
+
+static inline int64_t msa_mod_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ if (arg1 == DF_MIN_INT(df) && arg2 == -1) {
+ return 0;
+ }
+ return arg2 ? arg1 % arg2 : 0;
+}
+
+static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ uint64_t u_arg1 = UNSIGNED(arg1, df);
+ uint64_t u_arg2 = UNSIGNED(arg2, df);
+ return u_arg2 ? u_arg1 % u_arg2 : 0;
+}
+
+#define SIGNED_EVEN(a, df) \
+ ((((int64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
+
+#define UNSIGNED_EVEN(a, df) \
+ ((((uint64_t)(a)) << (64 - DF_BITS(df)/2)) >> (64 - DF_BITS(df)/2))
+
+#define SIGNED_ODD(a, df) \
+ ((((int64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
+
+#define UNSIGNED_ODD(a, df) \
+ ((((uint64_t)(a)) << (64 - DF_BITS(df))) >> (64 - DF_BITS(df)/2))
+
+#define SIGNED_EXTRACT(e, o, a, df) \
+ do { \
+ e = SIGNED_EVEN(a, df); \
+ o = SIGNED_ODD(a, df); \
+ } while (0);
+
+#define UNSIGNED_EXTRACT(e, o, a, df) \
+ do { \
+ e = UNSIGNED_EVEN(a, df); \
+ o = UNSIGNED_ODD(a, df); \
+ } while (0);
+
+static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+static inline int64_t msa_dotp_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+#define CONCATENATE_AND_SLIDE(s, k) \
+ do { \
+ for (i = 0; i < s; i++) { \
+ v[i] = pws->b[s * k + i]; \
+ v[i + s] = pwd->b[s * k + i]; \
+ } \
+ for (i = 0; i < s; i++) { \
+ pwd->b[s * k + i] = v[i + n]; \
+ } \
+ } while (0)
+
+static inline void msa_sld_df(uint32_t df, wr_t *pwd,
+ wr_t *pws, target_ulong rt)
+{
+ uint32_t n = rt % DF_ELEMENTS(df);
+ uint8_t v[64];
+ uint32_t i, k;
+
+ switch (df) {
+ case DF_BYTE:
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_BYTE), 0);
+ break;
+ case DF_HALF:
+ for (k = 0; k < 2; k++) {
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_HALF), k);
+ }
+ break;
+ case DF_WORD:
+ for (k = 0; k < 4; k++) {
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_WORD), k);
+ }
+ break;
+ case DF_DOUBLE:
+ for (k = 0; k < 8; k++) {
+ CONCATENATE_AND_SLIDE(DF_ELEMENTS(DF_DOUBLE), k);
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+static inline int64_t msa_hadd_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return SIGNED_ODD(arg1, df) + SIGNED_EVEN(arg2, df);
+}
+
+static inline int64_t msa_hadd_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return UNSIGNED_ODD(arg1, df) + UNSIGNED_EVEN(arg2, df);
+}
+
+static inline int64_t msa_hsub_s_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return SIGNED_ODD(arg1, df) - SIGNED_EVEN(arg2, df);
+}
+
+static inline int64_t msa_hsub_u_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ return UNSIGNED_ODD(arg1, df) - UNSIGNED_EVEN(arg2, df);
+}
+
+static inline int64_t msa_mul_q_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t q_max = DF_MAX_INT(df);
+
+ if (arg1 == q_min && arg2 == q_min) {
+ return q_max;
+ }
+ return (arg1 * arg2) >> (DF_BITS(df) - 1);
+}
+
+static inline int64_t msa_mulr_q_df(uint32_t df, int64_t arg1, int64_t arg2)
+{
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t r_bit = 1 << (DF_BITS(df) - 2);
+
+ if (arg1 == q_min && arg2 == q_min) {
+ return q_max;
+ }
+ return (arg1 * arg2 + r_bit) >> (DF_BITS(df) - 1);
+}
+
+#define MSA_BINOP_DF(func) \
+void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws, uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i], pwt->b[i]); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i], pwt->h[i]); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i], pwt->w[i]); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i], pwt->d[i]); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_BINOP_DF(sll)
+MSA_BINOP_DF(sra)
+MSA_BINOP_DF(srl)
+MSA_BINOP_DF(bclr)
+MSA_BINOP_DF(bset)
+MSA_BINOP_DF(bneg)
+MSA_BINOP_DF(addv)
+MSA_BINOP_DF(subv)
+MSA_BINOP_DF(max_s)
+MSA_BINOP_DF(max_u)
+MSA_BINOP_DF(min_s)
+MSA_BINOP_DF(min_u)
+MSA_BINOP_DF(max_a)
+MSA_BINOP_DF(min_a)
+MSA_BINOP_DF(ceq)
+MSA_BINOP_DF(clt_s)
+MSA_BINOP_DF(clt_u)
+MSA_BINOP_DF(cle_s)
+MSA_BINOP_DF(cle_u)
+MSA_BINOP_DF(add_a)
+MSA_BINOP_DF(adds_a)
+MSA_BINOP_DF(adds_s)
+MSA_BINOP_DF(adds_u)
+MSA_BINOP_DF(ave_s)
+MSA_BINOP_DF(ave_u)
+MSA_BINOP_DF(aver_s)
+MSA_BINOP_DF(aver_u)
+MSA_BINOP_DF(subs_s)
+MSA_BINOP_DF(subs_u)
+MSA_BINOP_DF(subsus_u)
+MSA_BINOP_DF(subsuu_s)
+MSA_BINOP_DF(asub_s)
+MSA_BINOP_DF(asub_u)
+MSA_BINOP_DF(mulv)
+MSA_BINOP_DF(div_s)
+MSA_BINOP_DF(div_u)
+MSA_BINOP_DF(mod_s)
+MSA_BINOP_DF(mod_u)
+MSA_BINOP_DF(dotp_s)
+MSA_BINOP_DF(dotp_u)
+MSA_BINOP_DF(srar)
+MSA_BINOP_DF(srlr)
+MSA_BINOP_DF(hadd_s)
+MSA_BINOP_DF(hadd_u)
+MSA_BINOP_DF(hsub_s)
+MSA_BINOP_DF(hsub_u)
+
+MSA_BINOP_DF(mul_q)
+MSA_BINOP_DF(mulr_q)
+#undef MSA_BINOP_DF
+
+void helper_msa_sld_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t rt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_sld_df(df, pwd, pws, env->active_tc.gpr[rt]);
+}
+
+static inline int64_t msa_maddv_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ return dest + arg1 * arg2;
+}
+
+static inline int64_t msa_msubv_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ return dest - arg1 * arg2;
+}
+
+static inline int64_t msa_dpadd_s_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+static inline int64_t msa_dpadd_u_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest + (even_arg1 * even_arg2) + (odd_arg1 * odd_arg2);
+}
+
+static inline int64_t msa_dpsub_s_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ SIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ SIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
+}
+
+static inline int64_t msa_dpsub_u_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t even_arg1;
+ int64_t even_arg2;
+ int64_t odd_arg1;
+ int64_t odd_arg2;
+ UNSIGNED_EXTRACT(even_arg1, odd_arg1, arg1, df);
+ UNSIGNED_EXTRACT(even_arg2, odd_arg2, arg2, df);
+ return dest - ((even_arg1 * even_arg2) + (odd_arg1 * odd_arg2));
+}
+
+static inline int64_t msa_madd_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+static inline int64_t msa_msub_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+static inline int64_t msa_maddr_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t r_bit = 1 << (DF_BITS(df) - 2);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) + q_prod + r_bit) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+static inline int64_t msa_msubr_q_df(uint32_t df, int64_t dest, int64_t arg1,
+ int64_t arg2)
+{
+ int64_t q_prod, q_ret;
+
+ int64_t q_max = DF_MAX_INT(df);
+ int64_t q_min = DF_MIN_INT(df);
+ int64_t r_bit = 1 << (DF_BITS(df) - 2);
+
+ q_prod = arg1 * arg2;
+ q_ret = ((dest << (DF_BITS(df) - 1)) - q_prod + r_bit) >> (DF_BITS(df) - 1);
+
+ return (q_ret < q_min) ? q_min : (q_max < q_ret) ? q_max : q_ret;
+}
+
+#define MSA_TEROP_DF(func) \
+void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, uint32_t wd, \
+ uint32_t ws, uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pwd->b[i], pws->b[i], \
+ pwt->b[i]); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pwd->h[i], pws->h[i], \
+ pwt->h[i]); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pwd->w[i], pws->w[i], \
+ pwt->w[i]); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pwd->d[i], pws->d[i], \
+ pwt->d[i]); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_TEROP_DF(maddv)
+MSA_TEROP_DF(msubv)
+MSA_TEROP_DF(dpadd_s)
+MSA_TEROP_DF(dpadd_u)
+MSA_TEROP_DF(dpsub_s)
+MSA_TEROP_DF(dpsub_u)
+MSA_TEROP_DF(binsl)
+MSA_TEROP_DF(binsr)
+MSA_TEROP_DF(madd_q)
+MSA_TEROP_DF(msub_q)
+MSA_TEROP_DF(maddr_q)
+MSA_TEROP_DF(msubr_q)
+#undef MSA_TEROP_DF
+
+static inline void msa_splat_df(uint32_t df, wr_t *pwd,
+ wr_t *pws, target_ulong rt)
+{
+ uint32_t n = rt % DF_ELEMENTS(df);
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwd->b[i] = pws->b[n];
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwd->h[i] = pws->h[n];
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwd->w[i] = pws->w[n];
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = pws->d[n];
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t rt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_splat_df(df, pwd, pws, env->active_tc.gpr[rt]);
+}
+
+#define MSA_DO_B MSA_DO(b)
+#define MSA_DO_H MSA_DO(h)
+#define MSA_DO_W MSA_DO(w)
+#define MSA_DO_D MSA_DO(d)
+
+#define MSA_LOOP_B MSA_LOOP(B)
+#define MSA_LOOP_H MSA_LOOP(H)
+#define MSA_LOOP_W MSA_LOOP(W)
+#define MSA_LOOP_D MSA_LOOP(D)
+
+#define MSA_LOOP_COND_B MSA_LOOP_COND(DF_BYTE)
+#define MSA_LOOP_COND_H MSA_LOOP_COND(DF_HALF)
+#define MSA_LOOP_COND_W MSA_LOOP_COND(DF_WORD)
+#define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE)
+
+#define MSA_LOOP(DF) \
+ for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \
+ MSA_DO_ ## DF \
+ }
+
+#define MSA_FN_DF(FUNC) \
+void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \
+ uint32_t ws, uint32_t wt) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr); \
+ wr_t wx, *pwx = &wx; \
+ uint32_t i; \
+ switch (df) { \
+ case DF_BYTE: \
+ MSA_LOOP_B \
+ break; \
+ case DF_HALF: \
+ MSA_LOOP_H \
+ break; \
+ case DF_WORD: \
+ MSA_LOOP_W \
+ break; \
+ case DF_DOUBLE: \
+ MSA_LOOP_D \
+ break; \
+ default: \
+ assert(0); \
+ } \
+ msa_move_v(pwd, pwx); \
+}
+
+#define MSA_LOOP_COND(DF) \
+ (DF_ELEMENTS(DF) / 2)
+
+#define Rb(pwr, i) (pwr->b[i])
+#define Lb(pwr, i) (pwr->b[i + DF_ELEMENTS(DF_BYTE)/2])
+#define Rh(pwr, i) (pwr->h[i])
+#define Lh(pwr, i) (pwr->h[i + DF_ELEMENTS(DF_HALF)/2])
+#define Rw(pwr, i) (pwr->w[i])
+#define Lw(pwr, i) (pwr->w[i + DF_ELEMENTS(DF_WORD)/2])
+#define Rd(pwr, i) (pwr->d[i])
+#define Ld(pwr, i) (pwr->d[i + DF_ELEMENTS(DF_DOUBLE)/2])
+
+#define MSA_DO(DF) \
+ do { \
+ R##DF(pwx, i) = pwt->DF[2*i]; \
+ L##DF(pwx, i) = pws->DF[2*i]; \
+ } while (0);
+MSA_FN_DF(pckev_df)
+#undef MSA_DO
+
+#define MSA_DO(DF) \
+ do { \
+ R##DF(pwx, i) = pwt->DF[2*i+1]; \
+ L##DF(pwx, i) = pws->DF[2*i+1]; \
+ } while (0);
+MSA_FN_DF(pckod_df)
+#undef MSA_DO
+
+#define MSA_DO(DF) \
+ do { \
+ pwx->DF[2*i] = L##DF(pwt, i); \
+ pwx->DF[2*i+1] = L##DF(pws, i); \
+ } while (0);
+MSA_FN_DF(ilvl_df)
+#undef MSA_DO
+
+#define MSA_DO(DF) \
+ do { \
+ pwx->DF[2*i] = R##DF(pwt, i); \
+ pwx->DF[2*i+1] = R##DF(pws, i); \
+ } while (0);
+MSA_FN_DF(ilvr_df)
+#undef MSA_DO
+
+#define MSA_DO(DF) \
+ do { \
+ pwx->DF[2*i] = pwt->DF[2*i]; \
+ pwx->DF[2*i+1] = pws->DF[2*i]; \
+ } while (0);
+MSA_FN_DF(ilvev_df)
+#undef MSA_DO
+
+#define MSA_DO(DF) \
+ do { \
+ pwx->DF[2*i] = pwt->DF[2*i+1]; \
+ pwx->DF[2*i+1] = pws->DF[2*i+1]; \
+ } while (0);
+MSA_FN_DF(ilvod_df)
+#undef MSA_DO
+#undef MSA_LOOP_COND
+
+#define MSA_LOOP_COND(DF) \
+ (DF_ELEMENTS(DF))
+
+#define MSA_DO(DF) \
+ do { \
+ uint32_t n = DF_ELEMENTS(df); \
+ uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \
+ pwx->DF[i] = \
+ (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \
+ } while (0);
+MSA_FN_DF(vshf_df)
+#undef MSA_DO
+#undef MSA_LOOP_COND
+#undef MSA_FN_DF
+
+void helper_msa_sldi_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_sld_df(df, pwd, pws, n);
+}
+
+void helper_msa_splati_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_splat_df(df, pwd, pws, n);
+}
+
+void helper_msa_copy_s_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= DF_ELEMENTS(df);
+
+ switch (df) {
+ case DF_BYTE:
+ env->active_tc.gpr[rd] = (int8_t)env->active_fpu.fpr[ws].wr.b[n];
+ break;
+ case DF_HALF:
+ env->active_tc.gpr[rd] = (int16_t)env->active_fpu.fpr[ws].wr.h[n];
+ break;
+ case DF_WORD:
+ env->active_tc.gpr[rd] = (int32_t)env->active_fpu.fpr[ws].wr.w[n];
+ break;
+#ifdef TARGET_MIPS64
+ case DF_DOUBLE:
+ env->active_tc.gpr[rd] = (int64_t)env->active_fpu.fpr[ws].wr.d[n];
+ break;
+#endif
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_copy_u_df(CPUMIPSState *env, uint32_t df, uint32_t rd,
+ uint32_t ws, uint32_t n)
+{
+ n %= DF_ELEMENTS(df);
+
+ switch (df) {
+ case DF_BYTE:
+ env->active_tc.gpr[rd] = (uint8_t)env->active_fpu.fpr[ws].wr.b[n];
+ break;
+ case DF_HALF:
+ env->active_tc.gpr[rd] = (uint16_t)env->active_fpu.fpr[ws].wr.h[n];
+ break;
+ case DF_WORD:
+ env->active_tc.gpr[rd] = (uint32_t)env->active_fpu.fpr[ws].wr.w[n];
+ break;
+#ifdef TARGET_MIPS64
+ case DF_DOUBLE:
+ env->active_tc.gpr[rd] = (uint64_t)env->active_fpu.fpr[ws].wr.d[n];
+ break;
+#endif
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_insert_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t rs_num, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ target_ulong rs = env->active_tc.gpr[rs_num];
+
+ switch (df) {
+ case DF_BYTE:
+ pwd->b[n] = (int8_t)rs;
+ break;
+ case DF_HALF:
+ pwd->h[n] = (int16_t)rs;
+ break;
+ case DF_WORD:
+ pwd->w[n] = (int32_t)rs;
+ break;
+ case DF_DOUBLE:
+ pwd->d[n] = (int64_t)rs;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_insve_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t n)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ switch (df) {
+ case DF_BYTE:
+ pwd->b[n] = (int8_t)pws->b[0];
+ break;
+ case DF_HALF:
+ pwd->h[n] = (int16_t)pws->h[0];
+ break;
+ case DF_WORD:
+ pwd->w[n] = (int32_t)pws->w[0];
+ break;
+ case DF_DOUBLE:
+ pwd->d[n] = (int64_t)pws->d[0];
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void helper_msa_ctcmsa(CPUMIPSState *env, target_ulong elm, uint32_t cd)
+{
+ switch (cd) {
+ case 0:
+ break;
+ case 1:
+ env->active_tc.msacsr = (int32_t)elm & MSACSR_MASK;
+ restore_msa_fp_status(env);
+ /* check exception */
+ if ((GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)
+ & GET_FP_CAUSE(env->active_tc.msacsr)) {
+ do_raise_exception(env, EXCP_MSAFPE, GETPC());
+ }
+ break;
+ }
+}
+
+target_ulong helper_msa_cfcmsa(CPUMIPSState *env, uint32_t cs)
+{
+ switch (cs) {
+ case 0:
+ return env->msair;
+ case 1:
+ return env->active_tc.msacsr & MSACSR_MASK;
+ }
+ return 0;
+}
+
+void helper_msa_move_v(CPUMIPSState *env, uint32_t wd, uint32_t ws)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+
+ msa_move_v(pwd, pws);
+}
+
+static inline int64_t msa_pcnt_df(uint32_t df, int64_t arg)
+{
+ uint64_t x;
+
+ x = UNSIGNED(arg, df);
+
+ x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL);
+ x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
+ x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL);
+ x = (x & 0x00FF00FF00FF00FFULL) + ((x >> 8) & 0x00FF00FF00FF00FFULL);
+ x = (x & 0x0000FFFF0000FFFFULL) + ((x >> 16) & 0x0000FFFF0000FFFFULL);
+ x = (x & 0x00000000FFFFFFFFULL) + ((x >> 32));
+
+ return x;
+}
+
+static inline int64_t msa_nlzc_df(uint32_t df, int64_t arg)
+{
+ uint64_t x, y;
+ int n, c;
+
+ x = UNSIGNED(arg, df);
+ n = DF_BITS(df);
+ c = DF_BITS(df) / 2;
+
+ do {
+ y = x >> c;
+ if (y != 0) {
+ n = n - c;
+ x = y;
+ }
+ c = c >> 1;
+ } while (c != 0);
+
+ return n - x;
+}
+
+static inline int64_t msa_nloc_df(uint32_t df, int64_t arg)
+{
+ return msa_nlzc_df(df, UNSIGNED((~arg), df));
+}
+
+void helper_msa_fill_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t rs)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_BYTE:
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) {
+ pwd->b[i] = (int8_t)env->active_tc.gpr[rs];
+ }
+ break;
+ case DF_HALF:
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) {
+ pwd->h[i] = (int16_t)env->active_tc.gpr[rs];
+ }
+ break;
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ pwd->w[i] = (int32_t)env->active_tc.gpr[rs];
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ pwd->d[i] = (int64_t)env->active_tc.gpr[rs];
+ }
+ break;
+ default:
+ assert(0);
+ }
+}
+
+#define MSA_UNOP_DF(func) \
+void helper_msa_ ## func ## _df(CPUMIPSState *env, uint32_t df, \
+ uint32_t wd, uint32_t ws) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr); \
+ uint32_t i; \
+ \
+ switch (df) { \
+ case DF_BYTE: \
+ for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { \
+ pwd->b[i] = msa_ ## func ## _df(df, pws->b[i]); \
+ } \
+ break; \
+ case DF_HALF: \
+ for (i = 0; i < DF_ELEMENTS(DF_HALF); i++) { \
+ pwd->h[i] = msa_ ## func ## _df(df, pws->h[i]); \
+ } \
+ break; \
+ case DF_WORD: \
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) { \
+ pwd->w[i] = msa_ ## func ## _df(df, pws->w[i]); \
+ } \
+ break; \
+ case DF_DOUBLE: \
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) { \
+ pwd->d[i] = msa_ ## func ## _df(df, pws->d[i]); \
+ } \
+ break; \
+ default: \
+ assert(0); \
+ } \
+}
+
+MSA_UNOP_DF(nlzc)
+MSA_UNOP_DF(nloc)
+MSA_UNOP_DF(pcnt)
+#undef MSA_UNOP_DF
+
+#define FLOAT_ONE32 make_float32(0x3f8 << 20)
+#define FLOAT_ONE64 make_float64(0x3ffULL << 52)
+
+#define FLOAT_SNAN16(s) (float16_default_nan(s) ^ 0x0220)
+ /* 0x7c20 */
+#define FLOAT_SNAN32(s) (float32_default_nan(s) ^ 0x00400020)
+ /* 0x7f800020 */
+#define FLOAT_SNAN64(s) (float64_default_nan(s) ^ 0x0008000000000020ULL)
+ /* 0x7ff0000000000020 */
+
+static inline void clear_msacsr_cause(CPUMIPSState *env)
+{
+ SET_FP_CAUSE(env->active_tc.msacsr, 0);
+}
+
+static inline void check_msacsr_cause(CPUMIPSState *env, uintptr_t retaddr)
+{
+ if ((GET_FP_CAUSE(env->active_tc.msacsr) &
+ (GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED)) == 0) {
+ UPDATE_FP_FLAGS(env->active_tc.msacsr,
+ GET_FP_CAUSE(env->active_tc.msacsr));
+ } else {
+ do_raise_exception(env, EXCP_MSAFPE, retaddr);
+ }
+}
+
+/* Flush-to-zero use cases for update_msacsr() */
+#define CLEAR_FS_UNDERFLOW 1
+#define CLEAR_IS_INEXACT 2
+#define RECIPROCAL_INEXACT 4
+
+static inline int update_msacsr(CPUMIPSState *env, int action, int denormal)
+{
+ int ieee_ex;
+
+ int c;
+ int cause;
+ int enable;
+
+ ieee_ex = get_float_exception_flags(&env->active_tc.msa_fp_status);
+
+ /* QEMU softfloat does not signal all underflow cases */
+ if (denormal) {
+ ieee_ex |= float_flag_underflow;
+ }
+
+ c = ieee_ex_to_mips(ieee_ex);
+ enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
+
+ /* Set Inexact (I) when flushing inputs to zero */
+ if ((ieee_ex & float_flag_input_denormal) &&
+ (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
+ if (action & CLEAR_IS_INEXACT) {
+ c &= ~FP_INEXACT;
+ } else {
+ c |= FP_INEXACT;
+ }
+ }
+
+ /* Set Inexact (I) and Underflow (U) when flushing outputs to zero */
+ if ((ieee_ex & float_flag_output_denormal) &&
+ (env->active_tc.msacsr & MSACSR_FS_MASK) != 0) {
+ c |= FP_INEXACT;
+ if (action & CLEAR_FS_UNDERFLOW) {
+ c &= ~FP_UNDERFLOW;
+ } else {
+ c |= FP_UNDERFLOW;
+ }
+ }
+
+ /* Set Inexact (I) when Overflow (O) is not enabled */
+ if ((c & FP_OVERFLOW) != 0 && (enable & FP_OVERFLOW) == 0) {
+ c |= FP_INEXACT;
+ }
+
+ /* Clear Exact Underflow when Underflow (U) is not enabled */
+ if ((c & FP_UNDERFLOW) != 0 && (enable & FP_UNDERFLOW) == 0 &&
+ (c & FP_INEXACT) == 0) {
+ c &= ~FP_UNDERFLOW;
+ }
+
+ /* Reciprocal operations set only Inexact when valid and not
+ divide by zero */
+ if ((action & RECIPROCAL_INEXACT) &&
+ (c & (FP_INVALID | FP_DIV0)) == 0) {
+ c = FP_INEXACT;
+ }
+
+ cause = c & enable; /* all current enabled exceptions */
+
+ if (cause == 0) {
+ /* No enabled exception, update the MSACSR Cause
+ with all current exceptions */
+ SET_FP_CAUSE(env->active_tc.msacsr,
+ (GET_FP_CAUSE(env->active_tc.msacsr) | c));
+ } else {
+ /* Current exceptions are enabled */
+ if ((env->active_tc.msacsr & MSACSR_NX_MASK) == 0) {
+ /* Exception(s) will trap, update MSACSR Cause
+ with all enabled exceptions */
+ SET_FP_CAUSE(env->active_tc.msacsr,
+ (GET_FP_CAUSE(env->active_tc.msacsr) | c));
+ }
+ }
+
+ return c;
+}
+
+static inline int get_enabled_exceptions(const CPUMIPSState *env, int c)
+{
+ int enable = GET_FP_ENABLE(env->active_tc.msacsr) | FP_UNIMPLEMENTED;
+ return c & enable;
+}
+
+static inline float16 float16_from_float32(int32_t a, flag ieee,
+ float_status *status)
+{
+ float16 f_val;
+
+ f_val = float32_to_float16((float32)a, ieee, status);
+ f_val = float16_maybe_silence_nan(f_val, status);
+
+ return a < 0 ? (f_val | (1 << 15)) : f_val;
+}
+
+static inline float32 float32_from_float64(int64_t a, float_status *status)
+{
+ float32 f_val;
+
+ f_val = float64_to_float32((float64)a, status);
+ f_val = float32_maybe_silence_nan(f_val, status);
+
+ return a < 0 ? (f_val | (1 << 31)) : f_val;
+}
+
+static inline float32 float32_from_float16(int16_t a, flag ieee,
+ float_status *status)
+{
+ float32 f_val;
+
+ f_val = float16_to_float32((float16)a, ieee, status);
+ f_val = float32_maybe_silence_nan(f_val, status);
+
+ return a < 0 ? (f_val | (1 << 31)) : f_val;
+}
+
+static inline float64 float64_from_float32(int32_t a, float_status *status)
+{
+ float64 f_val;
+
+ f_val = float32_to_float64((float64)a, status);
+ f_val = float64_maybe_silence_nan(f_val, status);
+
+ return a < 0 ? (f_val | (1ULL << 63)) : f_val;
+}
+
+static inline float32 float32_from_q16(int16_t a, float_status *status)
+{
+ float32 f_val;
+
+ /* conversion as integer and scaling */
+ f_val = int32_to_float32(a, status);
+ f_val = float32_scalbn(f_val, -15, status);
+
+ return f_val;
+}
+
+static inline float64 float64_from_q32(int32_t a, float_status *status)
+{
+ float64 f_val;
+
+ /* conversion as integer and scaling */
+ f_val = int32_to_float64(a, status);
+ f_val = float64_scalbn(f_val, -31, status);
+
+ return f_val;
+}
+
+static inline int16_t float32_to_q16(float32 a, float_status *status)
+{
+ int32_t q_val;
+ int32_t q_min = 0xffff8000;
+ int32_t q_max = 0x00007fff;
+
+ int ieee_ex;
+
+ if (float32_is_any_nan(a)) {
+ float_raise(float_flag_invalid, status);
+ return 0;
+ }
+
+ /* scaling */
+ a = float32_scalbn(a, 15, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_overflow) {
+ float_raise(float_flag_inexact, status);
+ return (int32_t)a < 0 ? q_min : q_max;
+ }
+
+ /* conversion to int */
+ q_val = float32_to_int32(a, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_invalid) {
+ set_float_exception_flags(ieee_ex & (~float_flag_invalid)
+ , status);
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int32_t)a < 0 ? q_min : q_max;
+ }
+
+ if (q_val < q_min) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int16_t)q_min;
+ }
+
+ if (q_max < q_val) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int16_t)q_max;
+ }
+
+ return (int16_t)q_val;
+}
+
+static inline int32_t float64_to_q32(float64 a, float_status *status)
+{
+ int64_t q_val;
+ int64_t q_min = 0xffffffff80000000LL;
+ int64_t q_max = 0x000000007fffffffLL;
+
+ int ieee_ex;
+
+ if (float64_is_any_nan(a)) {
+ float_raise(float_flag_invalid, status);
+ return 0;
+ }
+
+ /* scaling */
+ a = float64_scalbn(a, 31, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_overflow) {
+ float_raise(float_flag_inexact, status);
+ return (int64_t)a < 0 ? q_min : q_max;
+ }
+
+ /* conversion to integer */
+ q_val = float64_to_int64(a, status);
+
+ ieee_ex = get_float_exception_flags(status);
+ set_float_exception_flags(ieee_ex & (~float_flag_underflow)
+ , status);
+
+ if (ieee_ex & float_flag_invalid) {
+ set_float_exception_flags(ieee_ex & (~float_flag_invalid)
+ , status);
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int64_t)a < 0 ? q_min : q_max;
+ }
+
+ if (q_val < q_min) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int32_t)q_min;
+ }
+
+ if (q_max < q_val) {
+ float_raise(float_flag_overflow | float_flag_inexact, status);
+ return (int32_t)q_max;
+ }
+
+ return (int32_t)q_val;
+}
+
+#define MSA_FLOAT_COND(DEST, OP, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ int64_t cond; \
+ set_float_exception_flags(0, status); \
+ if (!QUIET) { \
+ cond = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ } else { \
+ cond = float ## BITS ## _ ## OP ## _quiet(ARG1, ARG2, status); \
+ } \
+ DEST = cond ? M_MAX_UINT(BITS) : 0; \
+ c = update_msacsr(env, CLEAR_IS_INEXACT, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_AF(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
+ if ((DEST & M_MAX_UINT(BITS)) == M_MAX_UINT(BITS)) { \
+ DEST = 0; \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_UEQ(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, eq, ARG1, ARG2, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_NE(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_UNE(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG2, ARG1, BITS, QUIET); \
+ } \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_ULE(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_ULT(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, unordered, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, lt, ARG1, ARG2, BITS, QUIET); \
+ } \
+ } while (0)
+
+#define MSA_FLOAT_OR(DEST, ARG1, ARG2, BITS, QUIET) \
+ do { \
+ MSA_FLOAT_COND(DEST, le, ARG1, ARG2, BITS, QUIET); \
+ if (DEST == 0) { \
+ MSA_FLOAT_COND(DEST, le, ARG2, ARG1, BITS, QUIET); \
+ } \
+ } while (0)
+
+static inline void compare_af(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_AF(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_AF(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_un(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], unordered, pws->w[i], pwt->w[i], 32,
+ quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], unordered, pws->d[i], pwt->d[i], 64,
+ quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_eq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], eq, pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], eq, pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ueq(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UEQ(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UEQ(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_lt(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], lt, pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], lt, pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ult(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_ULT(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_ULT(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_le(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_COND(pwx->w[i], le, pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_COND(pwx->d[i], le, pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ule(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_ULE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_ULE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_or(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_OR(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_OR(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_une(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+static inline void compare_ne(CPUMIPSState *env, wr_t *pwd, wr_t *pws,
+ wr_t *pwt, uint32_t df, int quiet,
+ uintptr_t retaddr)
+{
+ wr_t wx, *pwx = &wx;
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_NE(pwx->w[i], pws->w[i], pwt->w[i], 32, quiet);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_NE(pwx->d[i], pws->d[i], pwt->d[i], 64, quiet);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, retaddr);
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fcaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_af(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_un(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fceq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_eq(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ueq(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fclt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_lt(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ult(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_le(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ule(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fsaf_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_af(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsun_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_un(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fseq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_eq(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsueq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ueq(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fslt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_lt(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsult_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ult(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsle_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_le(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsule_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ule(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fcor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_or(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_une(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fcne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ne(env, pwd, pws, pwt, df, 1, GETPC());
+}
+
+void helper_msa_fsor_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_or(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsune_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_une(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+void helper_msa_fsne_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ compare_ne(env, pwd, pws, pwt, df, 0, GETPC());
+}
+
+#define float16_is_zero(ARG) 0
+#define float16_is_zero_or_denormal(ARG) 0
+
+#define IS_DENORMAL(ARG, BITS) \
+ (!float ## BITS ## _is_zero(ARG) \
+ && float ## BITS ## _is_zero_or_denormal(ARG))
+
+#define MSA_FLOAT_BINOP(DEST, OP, ARG1, ARG2, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_fadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], add, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], add, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], sub, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], sub, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmul_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], mul, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], mul, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fdiv_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], div, pws->w[i], pwt->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], div, pws->d[i], pwt->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_MULADD(DEST, ARG1, ARG2, ARG3, NEGATE, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _muladd(ARG2, ARG3, ARG1, NEGATE, status); \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_fmadd_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
+ pws->w[i], pwt->w[i], 0, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
+ pws->d[i], pwt->d[i], 0, 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmsub_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_MULADD(pwx->w[i], pwd->w[i],
+ pws->w[i], pwt->w[i],
+ float_muladd_negate_product, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_MULADD(pwx->d[i], pwd->d[i],
+ pws->d[i], pwt->d[i],
+ float_muladd_negate_product, 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fexp2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_BINOP(pwx->w[i], scalbn, pws->w[i],
+ pwt->w[i] > 0x200 ? 0x200 :
+ pwt->w[i] < -0x200 ? -0x200 : pwt->w[i],
+ 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_BINOP(pwx->d[i], scalbn, pws->d[i],
+ pwt->d[i] > 0x1000 ? 0x1000 :
+ pwt->d[i] < -0x1000 ? -0x1000 : pwt->d[i],
+ 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_UNOP(DEST, OP, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG, status); \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_fexdo_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ /* Half precision floats come in two formats: standard
+ IEEE and "ARM" format. The latter gains extra exponent
+ range by omitting the NaN/Inf encodings. */
+ flag ieee = 1;
+
+ MSA_FLOAT_BINOP(Lh(pwx, i), from_float32, pws->w[i], ieee, 16);
+ MSA_FLOAT_BINOP(Rh(pwx, i), from_float32, pwt->w[i], ieee, 16);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(Lw(pwx, i), from_float64, pws->d[i], 32);
+ MSA_FLOAT_UNOP(Rw(pwx, i), from_float64, pwt->d[i], 32);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_UNOP_XD(DEST, OP, ARG, BITS, XBITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG, status); \
+ c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## XBITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_ftq_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP_XD(Lh(pwx, i), to_q16, pws->w[i], 32, 16);
+ MSA_FLOAT_UNOP_XD(Rh(pwx, i), to_q16, pwt->w[i], 32, 16);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP_XD(Lw(pwx, i), to_q32, pws->d[i], 64, 32);
+ MSA_FLOAT_UNOP_XD(Rw(pwx, i), to_q32, pwt->d[i], 64, 32);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define NUMBER_QNAN_PAIR(ARG1, ARG2, BITS, STATUS) \
+ !float ## BITS ## _is_any_nan(ARG1) \
+ && float ## BITS ## _is_quiet_nan(ARG2, STATUS)
+
+#define MSA_FLOAT_MAXOP(DEST, OP, ARG1, ARG2, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG1, ARG2, status); \
+ c = update_msacsr(env, 0, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+#define FMAXMIN_A(F, G, X, _S, _T, BITS, STATUS) \
+ do { \
+ uint## BITS ##_t S = _S, T = _T; \
+ uint## BITS ##_t as, at, xs, xt, xd; \
+ if (NUMBER_QNAN_PAIR(S, T, BITS, STATUS)) { \
+ T = S; \
+ } \
+ else if (NUMBER_QNAN_PAIR(T, S, BITS, STATUS)) { \
+ S = T; \
+ } \
+ as = float## BITS ##_abs(S); \
+ at = float## BITS ##_abs(T); \
+ MSA_FLOAT_MAXOP(xs, F, S, T, BITS); \
+ MSA_FLOAT_MAXOP(xt, G, S, T, BITS); \
+ MSA_FLOAT_MAXOP(xd, F, as, at, BITS); \
+ X = (as == at || xd == float## BITS ##_abs(xs)) ? xs : xt; \
+ } while (0)
+
+void helper_msa_fmin_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pws->w[i], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[i], min, pwt->w[i], pwt->w[i], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[i], min, pws->w[i], pwt->w[i], 32);
+ }
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pws->d[i], 64);
+ } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[i], min, pwt->d[i], pwt->d[i], 64);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->d[i], min, pws->d[i], pwt->d[i], 64);
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmin_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ FMAXMIN_A(min, max, pwx->w[i], pws->w[i], pwt->w[i], 32, status);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ FMAXMIN_A(min, max, pwx->d[i], pws->d[i], pwt->d[i], 64, status);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmax_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ if (NUMBER_QNAN_PAIR(pws->w[i], pwt->w[i], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pws->w[i], 32);
+ } else if (NUMBER_QNAN_PAIR(pwt->w[i], pws->w[i], 32, status)) {
+ MSA_FLOAT_MAXOP(pwx->w[i], max, pwt->w[i], pwt->w[i], 32);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->w[i], max, pws->w[i], pwt->w[i], 32);
+ }
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ if (NUMBER_QNAN_PAIR(pws->d[i], pwt->d[i], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pws->d[i], 64);
+ } else if (NUMBER_QNAN_PAIR(pwt->d[i], pws->d[i], 64, status)) {
+ MSA_FLOAT_MAXOP(pwx->d[i], max, pwt->d[i], pwt->d[i], 64);
+ } else {
+ MSA_FLOAT_MAXOP(pwx->d[i], max, pws->d[i], pwt->d[i], 64);
+ }
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fmax_a_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws, uint32_t wt)
+{
+ float_status *status = &env->active_tc.msa_fp_status;
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ wr_t *pwt = &(env->active_fpu.fpr[wt].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ FMAXMIN_A(max, min, pwx->w[i], pws->w[i], pwt->w[i], 32, status);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ FMAXMIN_A(max, min, pwx->d[i], pws->d[i], pwt->d[i], 64, status);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fclass_df(CPUMIPSState *env, uint32_t df,
+ uint32_t wd, uint32_t ws)
+{
+ float_status* status = &env->active_tc.msa_fp_status;
+
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ if (df == DF_WORD) {
+ pwd->w[0] = float_class_s(pws->w[0], status);
+ pwd->w[1] = float_class_s(pws->w[1], status);
+ pwd->w[2] = float_class_s(pws->w[2], status);
+ pwd->w[3] = float_class_s(pws->w[3], status);
+ } else {
+ pwd->d[0] = float_class_d(pws->d[0], status);
+ pwd->d[1] = float_class_d(pws->d[1], status);
+ }
+}
+
+#define MSA_FLOAT_UNOP0(DEST, OP, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## OP(ARG, status); \
+ c = update_msacsr(env, CLEAR_FS_UNDERFLOW, 0); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } else if (float ## BITS ## _is_any_nan(ARG)) { \
+ DEST = 0; \
+ } \
+ } while (0)
+
+void helper_msa_ftrunc_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_int32_round_to_zero, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_int64_round_to_zero, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ftrunc_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_uint32_round_to_zero, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_uint64_round_to_zero, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], sqrt, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], sqrt, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_RECIPROCAL(DEST, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ DEST = float ## BITS ## _ ## div(FLOAT_ONE ## BITS, ARG, status); \
+ c = update_msacsr(env, float ## BITS ## _is_infinity(ARG) || \
+ float ## BITS ## _is_quiet_nan(DEST, status) ? \
+ 0 : RECIPROCAL_INEXACT, \
+ IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_frsqrt_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->w[i], float32_sqrt(pws->w[i],
+ &env->active_tc.msa_fp_status), 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->d[i], float64_sqrt(pws->d[i],
+ &env->active_tc.msa_fp_status), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_frcp_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->w[i], pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_RECIPROCAL(pwx->d[i], pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_frint_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], round_to_int, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], round_to_int, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define MSA_FLOAT_LOGB(DEST, ARG, BITS) \
+ do { \
+ float_status *status = &env->active_tc.msa_fp_status; \
+ int c; \
+ \
+ set_float_exception_flags(0, status); \
+ set_float_rounding_mode(float_round_down, status); \
+ DEST = float ## BITS ## _ ## log2(ARG, status); \
+ DEST = float ## BITS ## _ ## round_to_int(DEST, status); \
+ set_float_rounding_mode(ieee_rm[(env->active_tc.msacsr & \
+ MSACSR_RM_MASK) >> MSACSR_RM], \
+ status); \
+ \
+ set_float_exception_flags(get_float_exception_flags(status) & \
+ (~float_flag_inexact), \
+ status); \
+ \
+ c = update_msacsr(env, 0, IS_DENORMAL(DEST, BITS)); \
+ \
+ if (get_enabled_exceptions(env, c)) { \
+ DEST = ((FLOAT_SNAN ## BITS(status) >> 6) << 6) | c; \
+ } \
+ } while (0)
+
+void helper_msa_flog2_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_LOGB(pwx->w[i], pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_LOGB(pwx->d[i], pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fexupl_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ /* Half precision floats come in two formats: standard
+ IEEE and "ARM" format. The latter gains extra exponent
+ range by omitting the NaN/Inf encodings. */
+ flag ieee = 1;
+
+ MSA_FLOAT_BINOP(pwx->w[i], from_float16, Lh(pws, i), ieee, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_float32, Lw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_fexupr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ /* Half precision floats come in two formats: standard
+ IEEE and "ARM" format. The latter gains extra exponent
+ range by omitting the NaN/Inf encodings. */
+ flag ieee = 1;
+
+ MSA_FLOAT_BINOP(pwx->w[i], from_float16, Rh(pws, i), ieee, 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_float32, Rw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ffql_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_q16, Lh(pws, i), 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_q32, Lw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ffqr_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_q16, Rh(pws, i), 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_q32, Rw(pws, i), 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ftint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_int32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_int64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ftint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP0(pwx->w[i], to_uint32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP0(pwx->d[i], to_uint64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+#define float32_from_int32 int32_to_float32
+#define float32_from_uint32 uint32_to_float32
+
+#define float64_from_int64 int64_to_float64
+#define float64_from_uint64 uint64_to_float64
+
+void helper_msa_ffint_s_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_int32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_int64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
+
+void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd,
+ uint32_t ws)
+{
+ wr_t wx, *pwx = &wx;
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
+ wr_t *pws = &(env->active_fpu.fpr[ws].wr);
+ uint32_t i;
+
+ clear_msacsr_cause(env);
+
+ switch (df) {
+ case DF_WORD:
+ for (i = 0; i < DF_ELEMENTS(DF_WORD); i++) {
+ MSA_FLOAT_UNOP(pwx->w[i], from_uint32, pws->w[i], 32);
+ }
+ break;
+ case DF_DOUBLE:
+ for (i = 0; i < DF_ELEMENTS(DF_DOUBLE); i++) {
+ MSA_FLOAT_UNOP(pwx->d[i], from_uint64, pws->d[i], 64);
+ }
+ break;
+ default:
+ assert(0);
+ }
+
+ check_msacsr_cause(env, GETPC());
+
+ msa_move_v(pwd, pwx);
+}
diff --git a/target/mips/op_helper.c b/target/mips/op_helper.c
new file mode 100644
index 0000000000..7af4c2f084
--- /dev/null
+++ b/target/mips/op_helper.c
@@ -0,0 +1,4196 @@
+/*
+ * MIPS emulation helpers for qemu.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "sysemu/kvm.h"
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
+ int error_code)
+{
+ do_raise_exception_err(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
+{
+ do_raise_exception(env, exception, GETPC());
+}
+
+void helper_raise_exception_debug(CPUMIPSState *env)
+{
+ do_raise_exception(env, EXCP_DEBUG, 0);
+}
+
+static void raise_exception(CPUMIPSState *env, uint32_t exception)
+{
+ do_raise_exception(env, exception, 0);
+}
+
+#if defined(CONFIG_USER_ONLY)
+#define HELPER_LD(name, insn, type) \
+static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
+ int mem_idx, uintptr_t retaddr) \
+{ \
+ return (type) cpu_##insn##_data_ra(env, addr, retaddr); \
+}
+#else
+#define HELPER_LD(name, insn, type) \
+static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
+ int mem_idx, uintptr_t retaddr) \
+{ \
+ switch (mem_idx) \
+ { \
+ case 0: return (type) cpu_##insn##_kernel_ra(env, addr, retaddr); \
+ case 1: return (type) cpu_##insn##_super_ra(env, addr, retaddr); \
+ default: \
+ case 2: return (type) cpu_##insn##_user_ra(env, addr, retaddr); \
+ } \
+}
+#endif
+HELPER_LD(lw, ldl, int32_t)
+#if defined(TARGET_MIPS64)
+HELPER_LD(ld, ldq, int64_t)
+#endif
+#undef HELPER_LD
+
+#if defined(CONFIG_USER_ONLY)
+#define HELPER_ST(name, insn, type) \
+static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
+ type val, int mem_idx, uintptr_t retaddr) \
+{ \
+ cpu_##insn##_data_ra(env, addr, val, retaddr); \
+}
+#else
+#define HELPER_ST(name, insn, type) \
+static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
+ type val, int mem_idx, uintptr_t retaddr) \
+{ \
+ switch (mem_idx) \
+ { \
+ case 0: cpu_##insn##_kernel_ra(env, addr, val, retaddr); break; \
+ case 1: cpu_##insn##_super_ra(env, addr, val, retaddr); break; \
+ default: \
+ case 2: cpu_##insn##_user_ra(env, addr, val, retaddr); break; \
+ } \
+}
+#endif
+HELPER_ST(sb, stb, uint8_t)
+HELPER_ST(sw, stl, uint32_t)
+#if defined(TARGET_MIPS64)
+HELPER_ST(sd, stq, uint64_t)
+#endif
+#undef HELPER_ST
+
+target_ulong helper_clo (target_ulong arg1)
+{
+ return clo32(arg1);
+}
+
+target_ulong helper_clz (target_ulong arg1)
+{
+ return clz32(arg1);
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dclo (target_ulong arg1)
+{
+ return clo64(arg1);
+}
+
+target_ulong helper_dclz (target_ulong arg1)
+{
+ return clz64(arg1);
+}
+#endif /* TARGET_MIPS64 */
+
+/* 64 bits arithmetic for 32 bits hosts */
+static inline uint64_t get_HILO(CPUMIPSState *env)
+{
+ return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
+}
+
+static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
+{
+ env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
+ return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
+}
+
+static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
+{
+ target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
+ env->active_tc.HI[0] = (int32_t)(HILO >> 32);
+ return tmp;
+}
+
+/* Multiplication variants of the vr54xx. */
+target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2));
+}
+
+target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
+ (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
+ (int64_t)(int32_t)arg2);
+}
+
+target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
+ (uint64_t)(uint32_t)arg2);
+}
+
+static inline target_ulong bitswap(target_ulong v)
+{
+ v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
+ ((v & (target_ulong)0x5555555555555555ULL) << 1);
+ v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
+ ((v & (target_ulong)0x3333333333333333ULL) << 2);
+ v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
+ ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
+ return v;
+}
+
+#ifdef TARGET_MIPS64
+target_ulong helper_dbitswap(target_ulong rt)
+{
+ return bitswap(rt);
+}
+#endif
+
+target_ulong helper_bitswap(target_ulong rt)
+{
+ return (int32_t)bitswap(rt);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+static inline hwaddr do_translate_address(CPUMIPSState *env,
+ target_ulong address,
+ int rw, uintptr_t retaddr)
+{
+ hwaddr lladdr;
+ CPUState *cs = CPU(mips_env_get_cpu(env));
+
+ lladdr = cpu_mips_translate_address(env, address, rw);
+
+ if (lladdr == -1LL) {
+ cpu_loop_exit_restore(cs, retaddr);
+ } else {
+ return lladdr;
+ }
+}
+
+#define HELPER_LD_ATOMIC(name, insn, almask) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
+{ \
+ if (arg & almask) { \
+ env->CP0_BadVAddr = arg; \
+ do_raise_exception(env, EXCP_AdEL, GETPC()); \
+ } \
+ env->lladdr = do_translate_address(env, arg, 0, GETPC()); \
+ env->llval = do_##insn(env, arg, mem_idx, GETPC()); \
+ return env->llval; \
+}
+HELPER_LD_ATOMIC(ll, lw, 0x3)
+#ifdef TARGET_MIPS64
+HELPER_LD_ATOMIC(lld, ld, 0x7)
+#endif
+#undef HELPER_LD_ATOMIC
+
+#define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
+target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
+ target_ulong arg2, int mem_idx) \
+{ \
+ target_long tmp; \
+ \
+ if (arg2 & almask) { \
+ env->CP0_BadVAddr = arg2; \
+ do_raise_exception(env, EXCP_AdES, GETPC()); \
+ } \
+ if (do_translate_address(env, arg2, 1, GETPC()) == env->lladdr) { \
+ tmp = do_##ld_insn(env, arg2, mem_idx, GETPC()); \
+ if (tmp == env->llval) { \
+ do_##st_insn(env, arg2, arg1, mem_idx, GETPC()); \
+ return 1; \
+ } \
+ } \
+ return 0; \
+}
+HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
+#ifdef TARGET_MIPS64
+HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
+#endif
+#undef HELPER_ST_ATOMIC
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define GET_LMASK(v) ((v) & 3)
+#define GET_OFFSET(addr, offset) (addr + (offset))
+#else
+#define GET_LMASK(v) (((v) & 3) ^ 3)
+#define GET_OFFSET(addr, offset) (addr - (offset))
+#endif
+
+void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
+
+ if (GET_LMASK(arg2) <= 2) {
+ do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK(arg2) <= 1) {
+ do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK(arg2) == 0) {
+ do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx,
+ GETPC());
+ }
+}
+
+void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
+
+ if (GET_LMASK(arg2) >= 1) {
+ do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK(arg2) >= 2) {
+ do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK(arg2) == 3) {
+ do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
+ GETPC());
+ }
+}
+
+#if defined(TARGET_MIPS64)
+/* "half" load and stores. We must do the memory access inline,
+ or fault handling won't work. */
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define GET_LMASK64(v) ((v) & 7)
+#else
+#define GET_LMASK64(v) (((v) & 7) ^ 7)
+#endif
+
+void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
+
+ if (GET_LMASK64(arg2) <= 6) {
+ do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) <= 5) {
+ do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) <= 4) {
+ do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) <= 3) {
+ do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) <= 2) {
+ do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) <= 1) {
+ do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) <= 0) {
+ do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx,
+ GETPC());
+ }
+}
+
+void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
+ int mem_idx)
+{
+ do_sb(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
+
+ if (GET_LMASK64(arg2) >= 1) {
+ do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) >= 2) {
+ do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) >= 3) {
+ do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) >= 4) {
+ do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) >= 5) {
+ do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) >= 6) {
+ do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx,
+ GETPC());
+ }
+
+ if (GET_LMASK64(arg2) == 7) {
+ do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx,
+ GETPC());
+ }
+}
+#endif /* TARGET_MIPS64 */
+
+static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
+
+void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ env->active_tc.gpr[multiple_regs[i]] =
+ (target_long)do_lw(env, addr, mem_idx, GETPC());
+ addr += 4;
+ }
+ }
+
+ if (do_r31) {
+ env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx,
+ GETPC());
+ }
+}
+
+void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
+ GETPC());
+ addr += 4;
+ }
+ }
+
+ if (do_r31) {
+ do_sw(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
+ }
+}
+
+#if defined(TARGET_MIPS64)
+void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx,
+ GETPC());
+ addr += 8;
+ }
+ }
+
+ if (do_r31) {
+ env->active_tc.gpr[31] = do_ld(env, addr, mem_idx, GETPC());
+ }
+}
+
+void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
+ uint32_t mem_idx)
+{
+ target_ulong base_reglist = reglist & 0xf;
+ target_ulong do_r31 = reglist & 0x10;
+
+ if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
+ target_ulong i;
+
+ for (i = 0; i < base_reglist; i++) {
+ do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx,
+ GETPC());
+ addr += 8;
+ }
+ }
+
+ if (do_r31) {
+ do_sd(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
+ }
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+/* SMP helpers. */
+static bool mips_vpe_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ /* If the VPE is halted but otherwise active, it means it's waiting for
+ an interrupt. */
+ return cpu->halted && mips_vpe_active(env);
+}
+
+static bool mips_vp_is_wfi(MIPSCPU *c)
+{
+ CPUState *cpu = CPU(c);
+ CPUMIPSState *env = &c->env;
+
+ return cpu->halted && mips_vp_active(env);
+}
+
+static inline void mips_vpe_wake(MIPSCPU *c)
+{
+ /* Don't set ->halted = 0 directly, let it be done via cpu_has_work
+ because there might be other conditions that state that c should
+ be sleeping. */
+ cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
+}
+
+static inline void mips_vpe_sleep(MIPSCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ /* The VPE was shut off, really go to bed.
+ Reset any old _WAKE requests. */
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+}
+
+static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
+ mips_vpe_wake(cpu);
+ }
+}
+
+static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
+{
+ CPUMIPSState *c = &cpu->env;
+
+ /* FIXME: TC reschedule. */
+ if (!mips_vpe_active(c)) {
+ mips_vpe_sleep(cpu);
+ }
+}
+
+/**
+ * mips_cpu_map_tc:
+ * @env: CPU from which mapping is performed.
+ * @tc: Should point to an int with the value of the global TC index.
+ *
+ * This function will transform @tc into a local index within the
+ * returned #CPUMIPSState.
+ */
+/* FIXME: This code assumes that all VPEs have the same number of TCs,
+ which depends on runtime setup. Can probably be fixed by
+ walking the list of CPUMIPSStates. */
+static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
+{
+ MIPSCPU *cpu;
+ CPUState *cs;
+ CPUState *other_cs;
+ int vpe_idx;
+ int tc_idx = *tc;
+
+ if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
+ /* Not allowed to address other CPUs. */
+ *tc = env->current_tc;
+ return env;
+ }
+
+ cs = CPU(mips_env_get_cpu(env));
+ vpe_idx = tc_idx / cs->nr_threads;
+ *tc = tc_idx % cs->nr_threads;
+ other_cs = qemu_get_cpu(vpe_idx);
+ if (other_cs == NULL) {
+ return env;
+ }
+ cpu = MIPS_CPU(other_cs);
+ return &cpu->env;
+}
+
+/* The per VPE CP0_Status register shares some fields with the per TC
+ CP0_TCStatus registers. These fields are wired to the same registers,
+ so changes to either of them should be reflected on both registers.
+
+ Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
+
+ These helper call synchronizes the regs for a given cpu. */
+
+/* Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. */
+/* static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
+ int tc); */
+
+/* Called for updates to CP0_TCStatus. */
+static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
+ target_ulong v)
+{
+ uint32_t status;
+ uint32_t tcu, tmx, tasid, tksu;
+ uint32_t mask = ((1U << CP0St_CU3)
+ | (1 << CP0St_CU2)
+ | (1 << CP0St_CU1)
+ | (1 << CP0St_CU0)
+ | (1 << CP0St_MX)
+ | (3 << CP0St_KSU));
+
+ tcu = (v >> CP0TCSt_TCU0) & 0xf;
+ tmx = (v >> CP0TCSt_TMX) & 0x1;
+ tasid = v & cpu->CP0_EntryHi_ASID_mask;
+ tksu = (v >> CP0TCSt_TKSU) & 0x3;
+
+ status = tcu << CP0St_CU0;
+ status |= tmx << CP0St_MX;
+ status |= tksu << CP0St_KSU;
+
+ cpu->CP0_Status &= ~mask;
+ cpu->CP0_Status |= status;
+
+ /* Sync the TASID with EntryHi. */
+ cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
+ cpu->CP0_EntryHi |= tasid;
+
+ compute_hflags(cpu);
+}
+
+/* Called for updates to CP0_EntryHi. */
+static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
+{
+ int32_t *tcst;
+ uint32_t asid, v = cpu->CP0_EntryHi;
+
+ asid = v & cpu->CP0_EntryHi_ASID_mask;
+
+ if (tc == cpu->current_tc) {
+ tcst = &cpu->active_tc.CP0_TCStatus;
+ } else {
+ tcst = &cpu->tcs[tc].CP0_TCStatus;
+ }
+
+ *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
+ *tcst |= asid;
+}
+
+/* CP0 helpers */
+target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPControl;
+}
+
+target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf0;
+}
+
+target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
+{
+ return env->mvp->CP0_MVPConf1;
+}
+
+target_ulong helper_mfc0_random(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_random(env);
+}
+
+target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCStatus;
+}
+
+target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.CP0_TCStatus;
+ else
+ return other->tcs[other_tc].CP0_TCStatus;
+}
+
+target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCBind;
+}
+
+target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.CP0_TCBind;
+ else
+ return other->tcs[other_tc].CP0_TCBind;
+}
+
+target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.PC;
+ else
+ return other->tcs[other_tc].PC;
+}
+
+target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.CP0_TCHalt;
+ else
+ return other->tcs[other_tc].CP0_TCHalt;
+}
+
+target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.CP0_TCContext;
+ else
+ return other->tcs[other_tc].CP0_TCContext;
+}
+
+target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.CP0_TCSchedule;
+ else
+ return other->tcs[other_tc].CP0_TCSchedule;
+}
+
+target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.CP0_TCScheFBack;
+ else
+ return other->tcs[other_tc].CP0_TCScheFBack;
+}
+
+target_ulong helper_mfc0_count(CPUMIPSState *env)
+{
+ return (int32_t)cpu_mips_get_count(env);
+}
+
+target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EntryHi;
+}
+
+target_ulong helper_mftc0_cause(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ int32_t tccause;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ tccause = other->CP0_Cause;
+ } else {
+ tccause = other->CP0_Cause;
+ }
+
+ return tccause;
+}
+
+target_ulong helper_mftc0_status(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_Status;
+}
+
+target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
+{
+ return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
+}
+
+target_ulong helper_mfc0_maar(CPUMIPSState *env)
+{
+ return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_mfhc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI] >> 32;
+}
+
+target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return (int32_t)env->CP0_WatchLo[sel];
+}
+
+target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchHi[sel];
+}
+
+target_ulong helper_mfc0_debug(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Debug;
+ if (env->hflags & MIPS_HFLAG_DM)
+ t0 |= 1 << CP0DB_DM;
+
+ return t0;
+}
+
+target_ulong helper_mftc0_debug(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ int32_t tcstatus;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ tcstatus = other->active_tc.CP0_Debug_tcstatus;
+ else
+ tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+#if defined(TARGET_MIPS64)
+target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
+{
+ return env->active_tc.PC;
+}
+
+target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCHalt;
+}
+
+target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCContext;
+}
+
+target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCSchedule;
+}
+
+target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
+{
+ return env->active_tc.CP0_TCScheFBack;
+}
+
+target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
+{
+ return env->lladdr >> env->CP0_LLAddr_shift;
+}
+
+target_ulong helper_dmfc0_maar(CPUMIPSState *env)
+{
+ return env->CP0_MAAR[env->CP0_MAARI];
+}
+
+target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
+{
+ return env->CP0_WatchLo[sel];
+}
+#endif /* TARGET_MIPS64 */
+
+void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t index_p = env->CP0_Index & 0x80000000;
+ uint32_t tlb_index = arg1 & 0x7fffffff;
+ if (tlb_index < env->tlb->nb_tlb) {
+ if (env->insn_flags & ISA_MIPS32R6) {
+ index_p |= arg1 & 0x80000000;
+ }
+ env->CP0_Index = index_p | tlb_index;
+ }
+}
+
+void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
+ mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
+ (1 << CP0MVPCo_EVP);
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (1 << CP0MVPCo_STLB);
+ newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
+
+ // TODO: Enable/disable shared TLB, enable/disable VPEs.
+
+ env->mvp->CP0_MVPControl = newval;
+}
+
+void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /* Yield scheduler intercept not implemented. */
+ /* Gating storage scheduler intercept not implemented. */
+
+ // TODO: Enable/disable TCs.
+
+ env->CP0_VPEControl = newval;
+}
+
+void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask;
+ uint32_t newval;
+
+ mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
+ (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
+ newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
+
+ /* TODO: Enable/disable TCs. */
+
+ other->CP0_VPEControl = newval;
+}
+
+target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ /* FIXME: Mask away return zero on read bits. */
+ return other->CP0_VPEControl;
+}
+
+target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_VPEConf0;
+}
+
+void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
+ if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
+ mask |= (0xff << CP0VPEC0_XTC);
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ }
+ newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ // TODO: TC exclusive handling due to ERL/EXL.
+
+ env->CP0_VPEConf0 = newval;
+}
+
+void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+ newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
+
+ /* TODO: TC exclusive handling due to ERL/EXL. */
+ other->CP0_VPEConf0 = newval;
+}
+
+void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0;
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
+ (0xff << CP0VPEC1_NCP1);
+ newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
+
+ /* UDI not implemented. */
+ /* CP2 not implemented. */
+
+ // TODO: Handle FPU (CP1) binding.
+
+ env->CP0_VPEConf1 = newval;
+}
+
+void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
+{
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_YQMask = 0x00000000;
+}
+
+void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_VPEOpt = arg1 & 0x0000ffff;
+}
+
+#define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
+
+void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+#define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
+
+void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = env->CP0_TCStatus_rw_bitmask;
+ uint32_t newval;
+
+ newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
+
+ env->active_tc.CP0_TCStatus = newval;
+ sync_c0_tcstatus(env, env->current_tc, newval);
+}
+
+void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.CP0_TCStatus = arg1;
+ else
+ other->tcs[other_tc].CP0_TCStatus = arg1;
+ sync_c0_tcstatus(other, other_tc, arg1);
+}
+
+void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+
+ if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (1 << CP0TCBd_CurVPE);
+ newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ env->active_tc.CP0_TCBind = newval;
+}
+
+void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = (1 << CP0TCBd_TBE);
+ uint32_t newval;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
+ mask |= (1 << CP0TCBd_CurVPE);
+ if (other_tc == other->current_tc) {
+ newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
+ other->active_tc.CP0_TCBind = newval;
+ } else {
+ newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
+ other->tcs[other_tc].CP0_TCBind = newval;
+ }
+}
+
+void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.PC = arg1;
+ env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ env->lladdr = 0ULL;
+ /* MIPS16 not implemented. */
+}
+
+void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc) {
+ other->active_tc.PC = arg1;
+ other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->lladdr = 0ULL;
+ /* MIPS16 not implemented. */
+ } else {
+ other->tcs[other_tc].PC = arg1;
+ other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
+ other->lladdr = 0ULL;
+ /* MIPS16 not implemented. */
+ }
+}
+
+void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
+ env->active_tc.CP0_TCHalt = arg1 & 0x1;
+
+ // TODO: Halt TC / Restart (if allocated+active) TC.
+ if (env->active_tc.CP0_TCHalt & 1) {
+ mips_tc_sleep(cpu, env->current_tc);
+ } else {
+ mips_tc_wake(cpu, env->current_tc);
+ }
+}
+
+void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ MIPSCPU *other_cpu = mips_env_get_cpu(other);
+
+ // TODO: Halt TC / Restart (if allocated+active) TC.
+
+ if (other_tc == other->current_tc)
+ other->active_tc.CP0_TCHalt = arg1;
+ else
+ other->tcs[other_tc].CP0_TCHalt = arg1;
+
+ if (arg1 & 1) {
+ mips_tc_sleep(other_cpu, other_tc);
+ } else {
+ mips_tc_wake(other_cpu, other_tc);
+ }
+}
+
+void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCContext = arg1;
+}
+
+void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.CP0_TCContext = arg1;
+ else
+ other->tcs[other_tc].CP0_TCContext = arg1;
+}
+
+void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCSchedule = arg1;
+}
+
+void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.CP0_TCSchedule = arg1;
+ else
+ other->tcs[other_tc].CP0_TCSchedule = arg1;
+}
+
+void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ env->active_tc.CP0_TCScheFBack = arg1;
+}
+
+void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.CP0_TCScheFBack = arg1;
+ else
+ other->tcs[other_tc].CP0_TCScheFBack = arg1;
+}
+
+void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
+{
+ /* 1k pages not implemented */
+ target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
+ env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
+ | (rxi << (CP0EnLo_XI - 30));
+}
+
+#if defined(TARGET_MIPS64)
+void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
+{
+ uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
+ env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
+}
+#endif
+
+void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
+}
+
+void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
+{
+ uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1);
+ if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) ||
+ (mask == 0x0000 || mask == 0x0003 || mask == 0x000F ||
+ mask == 0x003F || mask == 0x00FF || mask == 0x03FF ||
+ mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) {
+ env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
+ }
+}
+
+void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
+{
+ /* SmartMIPS not implemented */
+ /* 1k pages not implemented */
+ env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
+ (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
+ compute_hflags(env);
+ restore_pamask(env);
+}
+
+void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ISA_MIPS32R6) {
+ if (arg1 < env->tlb->nb_tlb) {
+ env->CP0_Wired = arg1;
+ }
+ } else {
+ env->CP0_Wired = arg1 % env->tlb->nb_tlb;
+ }
+}
+
+void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
+}
+
+void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
+}
+
+void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
+}
+
+void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
+}
+
+void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
+}
+
+void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = 0x0000000F;
+
+ if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
+ (env->insn_flags & ISA_MIPS32R6)) {
+ mask |= (1 << 4);
+ }
+ if (env->insn_flags & ISA_MIPS32R6) {
+ mask |= (1 << 5);
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
+ mask |= (1 << 29);
+
+ if (arg1 & (1 << 29)) {
+ env->hflags |= MIPS_HFLAG_HWRENA_ULR;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
+ }
+ }
+
+ env->CP0_HWREna = arg1 & mask;
+}
+
+void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_count(env, arg1);
+}
+
+void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong old, val, mask;
+ mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
+ if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
+ mask |= 1 << CP0EnHi_EHINV;
+ }
+
+ /* 1k pages not implemented */
+#if defined(TARGET_MIPS64)
+ if (env->insn_flags & ISA_MIPS32R6) {
+ int entryhi_r = extract64(arg1, 62, 2);
+ int config0_at = extract32(env->CP0_Config0, 13, 2);
+ bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
+ if ((entryhi_r == 2) ||
+ (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
+ /* skip EntryHi.R field if new value is reserved */
+ mask &= ~(0x3ull << 62);
+ }
+ }
+ mask &= env->SEGMask;
+#endif
+ old = env->CP0_EntryHi;
+ val = (arg1 & mask) | (old & ~mask);
+ env->CP0_EntryHi = val;
+ if (env->CP0_Config3 & (1 << CP0C3_MT)) {
+ sync_c0_entryhi(env, env->current_tc);
+ }
+ /* If the ASID changes, flush qemu's TLB. */
+ if ((old & env->CP0_EntryHi_ASID_mask) !=
+ (val & env->CP0_EntryHi_ASID_mask)) {
+ cpu_mips_tlb_flush(env, 1);
+ }
+}
+
+void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_EntryHi = arg1;
+ sync_c0_entryhi(other, other_tc);
+}
+
+void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_compare(env, arg1);
+}
+
+void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ uint32_t val, old;
+
+ old = env->CP0_Status;
+ cpu_mips_store_status(env, arg1);
+ val = env->CP0_Status;
+
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
+ old, old & env->CP0_Cause & CP0Ca_IP_mask,
+ val, val & env->CP0_Cause & CP0Ca_IP_mask,
+ env->CP0_Cause);
+ switch (env->hflags & MIPS_HFLAG_KSU) {
+ case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
+ case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
+ case MIPS_HFLAG_KM: qemu_log("\n"); break;
+ default:
+ cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
+ sync_c0_status(env, other, other_tc);
+}
+
+void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
+}
+
+void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
+{
+ uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
+ env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
+}
+
+void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ cpu_mips_store_cause(env, arg1);
+}
+
+void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ cpu_mips_store_cause(other, arg1);
+}
+
+target_ulong helper_mftc0_epc(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EPC;
+}
+
+target_ulong helper_mftc0_ebase(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ return other->CP0_EBase;
+}
+
+void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
+}
+
+void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+ other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
+}
+
+target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ switch (idx) {
+ case 0: return other->CP0_Config0;
+ case 1: return other->CP0_Config1;
+ case 2: return other->CP0_Config2;
+ case 3: return other->CP0_Config3;
+ /* 4 and 5 are reserved. */
+ case 6: return other->CP0_Config6;
+ case 7: return other->CP0_Config7;
+ default:
+ break;
+ }
+ return 0;
+}
+
+void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
+}
+
+void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
+{
+ /* tertiary/secondary caches not implemented */
+ env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
+}
+
+void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->insn_flags & ASE_MICROMIPS) {
+ env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
+ (arg1 & (1 << CP0C3_ISA_ON_EXC));
+ }
+}
+
+void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
+ (arg1 & env->CP0_Config4_rw_bitmask);
+}
+
+void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
+ (arg1 & env->CP0_Config5_rw_bitmask);
+ compute_hflags(env);
+}
+
+void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
+{
+ target_long mask = env->CP0_LLAddr_rw_bitmask;
+ arg1 = arg1 << env->CP0_LLAddr_shift;
+ env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
+}
+
+#define MTC0_MAAR_MASK(env) \
+ ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
+
+void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
+}
+
+void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_MAAR[env->CP0_MAARI] =
+ (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
+ (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
+}
+
+void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
+{
+ int index = arg1 & 0x3f;
+ if (index == 0x3f) {
+ /* Software may write all ones to INDEX to determine the
+ maximum value supported. */
+ env->CP0_MAARI = MIPS_MAAR_MAX - 1;
+ } else if (index < MIPS_MAAR_MAX) {
+ env->CP0_MAARI = index;
+ }
+ /* Other than the all ones, if the
+ value written is not supported, then INDEX is unchanged
+ from its previous value. */
+}
+
+void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ /* Watch exceptions for instructions, data loads, data stores
+ not implemented. */
+ env->CP0_WatchLo[sel] = (arg1 & ~0x7);
+}
+
+void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
+ env->CP0_WatchHi[sel] = arg1 & mask;
+ env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
+}
+
+void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
+{
+ target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
+ env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
+}
+
+void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Framemask = arg1; /* XXX */
+}
+
+void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
+ if (arg1 & (1 << CP0DB_DM))
+ env->hflags |= MIPS_HFLAG_DM;
+ else
+ env->hflags &= ~MIPS_HFLAG_DM;
+}
+
+void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ /* XXX: Might be wrong, check with EJTAG spec. */
+ if (other_tc == other->current_tc)
+ other->active_tc.CP0_Debug_tcstatus = val;
+ else
+ other->tcs[other_tc].CP0_Debug_tcstatus = val;
+ other->CP0_Debug = (other->CP0_Debug &
+ ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
+ (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
+}
+
+void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_Performance0 = arg1 & 0x000007ff;
+}
+
+void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
+{
+ int32_t wst = arg1 & (1 << CP0EC_WST);
+ int32_t spr = arg1 & (1 << CP0EC_SPR);
+ int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
+
+ env->CP0_ErrCtl = wst | spr | itc;
+
+ if (itc && !wst && !spr) {
+ env->hflags |= MIPS_HFLAG_ITC_CACHE;
+ } else {
+ env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
+ }
+}
+
+void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
+{
+ if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
+ /* If CACHE instruction is configured for ITC tags then make all
+ CP0.TagLo bits writable. The actual write to ITC Configuration
+ Tag will take care of the read-only bits. */
+ env->CP0_TagLo = arg1;
+ } else {
+ env->CP0_TagLo = arg1 & 0xFFFFFCF6;
+ }
+}
+
+void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataLo = arg1; /* XXX */
+}
+
+void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_TagHi = arg1; /* XXX */
+}
+
+void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
+{
+ env->CP0_DataHi = arg1; /* XXX */
+}
+
+/* MIPS MT functions */
+target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.gpr[sel];
+ else
+ return other->tcs[other_tc].gpr[sel];
+}
+
+target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.LO[sel];
+ else
+ return other->tcs[other_tc].LO[sel];
+}
+
+target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.HI[sel];
+ else
+ return other->tcs[other_tc].HI[sel];
+}
+
+target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.ACX[sel];
+ else
+ return other->tcs[other_tc].ACX[sel];
+}
+
+target_ulong helper_mftdsp(CPUMIPSState *env)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ return other->active_tc.DSPControl;
+ else
+ return other->tcs[other_tc].DSPControl;
+}
+
+void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.gpr[sel] = arg1;
+ else
+ other->tcs[other_tc].gpr[sel] = arg1;
+}
+
+void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.LO[sel] = arg1;
+ else
+ other->tcs[other_tc].LO[sel] = arg1;
+}
+
+void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.HI[sel] = arg1;
+ else
+ other->tcs[other_tc].HI[sel] = arg1;
+}
+
+void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.ACX[sel] = arg1;
+ else
+ other->tcs[other_tc].ACX[sel] = arg1;
+}
+
+void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
+
+ if (other_tc == other->current_tc)
+ other->active_tc.DSPControl = arg1;
+ else
+ other->tcs[other_tc].DSPControl = arg1;
+}
+
+/* MIPS MT functions */
+target_ulong helper_dmt(void)
+{
+ // TODO
+ return 0;
+}
+
+target_ulong helper_emt(void)
+{
+ // TODO
+ return 0;
+}
+
+target_ulong helper_dvpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPEs except the one executing the dvpe. */
+ if (&other_cpu->env != env) {
+ other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ return prev;
+}
+
+target_ulong helper_evpe(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->mvp->CP0_MVPControl;
+
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+
+ if (&other_cpu->env != env
+ /* If the VPE is WFI, don't disturb its sleep. */
+ && !mips_vpe_is_wfi(other_cpu)) {
+ /* Enable the VPE. */
+ other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
+ mips_vpe_wake(other_cpu); /* And wake it up. */
+ }
+ }
+ return prev;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+void helper_fork(target_ulong arg1, target_ulong arg2)
+{
+ // arg1 = rt, arg2 = rs
+ // TODO: store to TC register
+}
+
+target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
+{
+ target_long arg1 = arg;
+
+ if (arg1 < 0) {
+ /* No scheduling policy implemented. */
+ if (arg1 != -2) {
+ if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
+ env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
+ do_raise_exception(env, EXCP_THREAD, GETPC());
+ }
+ }
+ } else if (arg1 == 0) {
+ if (0 /* TODO: TC underflow */) {
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ do_raise_exception(env, EXCP_THREAD, GETPC());
+ } else {
+ // TODO: Deallocate TC
+ }
+ } else if (arg1 > 0) {
+ /* Yield qualifier inputs not implemented. */
+ env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
+ env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
+ do_raise_exception(env, EXCP_THREAD, GETPC());
+ }
+ return env->CP0_YQMask;
+}
+
+/* R6 Multi-threading */
+#ifndef CONFIG_USER_ONLY
+target_ulong helper_dvp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ /* Turn off all VPs except the one executing the dvp. */
+ if (&other_cpu->env != env) {
+ mips_vpe_sleep(other_cpu);
+ }
+ }
+ env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
+
+target_ulong helper_evp(CPUMIPSState *env)
+{
+ CPUState *other_cs = first_cpu;
+ target_ulong prev = env->CP0_VPControl;
+
+ if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
+ CPU_FOREACH(other_cs) {
+ MIPSCPU *other_cpu = MIPS_CPU(other_cs);
+ if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
+ /* If the VP is WFI, don't disturb its sleep.
+ * Otherwise, wake it up. */
+ mips_vpe_wake(other_cpu);
+ }
+ }
+ env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
+ }
+ return prev;
+}
+#endif /* !CONFIG_USER_ONLY */
+
+#ifndef CONFIG_USER_ONLY
+/* TLB management */
+static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
+{
+ /* Discard entries from env->tlb[first] onwards. */
+ while (env->tlb->tlb_in_use > first) {
+ r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
+ }
+}
+
+static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
+{
+#if defined(TARGET_MIPS64)
+ return extract64(entrylo, 6, 54);
+#else
+ return extract64(entrylo, 6, 24) | /* PFN */
+ (extract64(entrylo, 32, 32) << 24); /* PFNX */
+#endif
+}
+
+static void r4k_fill_tlb(CPUMIPSState *env, int idx)
+{
+ r4k_tlb_t *tlb;
+
+ /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
+ tlb->EHINV = 1;
+ return;
+ }
+ tlb->EHINV = 0;
+ tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ tlb->VPN &= env->SEGMask;
+#endif
+ tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ tlb->PageMask = env->CP0_PageMask;
+ tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
+ tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
+ tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
+ tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
+ tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
+ tlb->PFN[0] = get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) << 12;
+ tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
+ tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
+ tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
+ tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
+ tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
+ tlb->PFN[1] = get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) << 12;
+}
+
+void r4k_helper_tlbinv(CPUMIPSState *env)
+{
+ int idx;
+ r4k_tlb_t *tlb;
+ uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ if (!tlb->G && tlb->ASID == ASID) {
+ tlb->EHINV = 1;
+ }
+ }
+ cpu_mips_tlb_flush(env, 1);
+}
+
+void r4k_helper_tlbinvf(CPUMIPSState *env)
+{
+ int idx;
+
+ for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
+ env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
+ }
+ cpu_mips_tlb_flush(env, 1);
+}
+
+void r4k_helper_tlbwi(CPUMIPSState *env)
+{
+ r4k_tlb_t *tlb;
+ int idx;
+ target_ulong VPN;
+ uint16_t ASID;
+ bool G, V0, D0, V1, D1;
+
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
+#if defined(TARGET_MIPS64)
+ VPN &= env->SEGMask;
+#endif
+ ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
+ V0 = (env->CP0_EntryLo0 & 2) != 0;
+ D0 = (env->CP0_EntryLo0 & 4) != 0;
+ V1 = (env->CP0_EntryLo1 & 2) != 0;
+ D1 = (env->CP0_EntryLo1 & 4) != 0;
+
+ /* Discard cached TLB entries, unless tlbwi is just upgrading access
+ permissions on the current entry. */
+ if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G ||
+ (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
+ (tlb->V1 && !V1) || (tlb->D1 && !D1)) {
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+ }
+
+ r4k_invalidate_tlb(env, idx, 0);
+ r4k_fill_tlb(env, idx);
+}
+
+void r4k_helper_tlbwr(CPUMIPSState *env)
+{
+ int r = cpu_mips_get_random(env);
+
+ r4k_invalidate_tlb(env, r, 1);
+ r4k_fill_tlb(env, r);
+}
+
+void r4k_helper_tlbp(CPUMIPSState *env)
+{
+ r4k_tlb_t *tlb;
+ target_ulong mask;
+ target_ulong tag;
+ target_ulong VPN;
+ uint16_t ASID;
+ int i;
+
+ ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ /* Check ASID, virtual page number & size */
+ if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) {
+ /* TLB match */
+ env->CP0_Index = i;
+ break;
+ }
+ }
+ if (i == env->tlb->nb_tlb) {
+ /* No match. Discard any shadow entries, if any of them match. */
+ for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
+ tlb = &env->tlb->mmu.r4k.tlb[i];
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ tag &= env->SEGMask;
+#endif
+ /* Check ASID, virtual page number & size */
+ if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
+ r4k_mips_tlb_flush_extra (env, i);
+ break;
+ }
+ }
+
+ env->CP0_Index |= 0x80000000;
+ }
+}
+
+static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
+{
+#if defined(TARGET_MIPS64)
+ return tlb_pfn << 6;
+#else
+ return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
+ (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
+#endif
+}
+
+void r4k_helper_tlbr(CPUMIPSState *env)
+{
+ r4k_tlb_t *tlb;
+ uint16_t ASID;
+ int idx;
+
+ ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
+ idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+
+ /* If this will change the current ASID, flush qemu's TLB. */
+ if (ASID != tlb->ASID)
+ cpu_mips_tlb_flush (env, 1);
+
+ r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+
+ if (tlb->EHINV) {
+ env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
+ env->CP0_PageMask = 0;
+ env->CP0_EntryLo0 = 0;
+ env->CP0_EntryLo1 = 0;
+ } else {
+ env->CP0_EntryHi = tlb->VPN | tlb->ASID;
+ env->CP0_PageMask = tlb->PageMask;
+ env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
+ ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
+ env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
+ ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
+ ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
+ get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
+ }
+}
+
+void helper_tlbwi(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwi(env);
+}
+
+void helper_tlbwr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbwr(env);
+}
+
+void helper_tlbp(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbp(env);
+}
+
+void helper_tlbr(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbr(env);
+}
+
+void helper_tlbinv(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinv(env);
+}
+
+void helper_tlbinvf(CPUMIPSState *env)
+{
+ env->tlb->helper_tlbinvf(env);
+}
+
+/* Specials */
+target_ulong helper_di(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 & ~(1 << CP0St_IE);
+ return t0;
+}
+
+target_ulong helper_ei(CPUMIPSState *env)
+{
+ target_ulong t0 = env->CP0_Status;
+
+ env->CP0_Status = t0 | (1 << CP0St_IE);
+ return t0;
+}
+
+static void debug_pre_eret(CPUMIPSState *env)
+{
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL))
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ if (env->hflags & MIPS_HFLAG_DM)
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ qemu_log("\n");
+ }
+}
+
+static void debug_post_eret(CPUMIPSState *env)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
+ qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
+ env->active_tc.PC, env->CP0_EPC);
+ if (env->CP0_Status & (1 << CP0St_ERL))
+ qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
+ if (env->hflags & MIPS_HFLAG_DM)
+ qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
+ switch (env->hflags & MIPS_HFLAG_KSU) {
+ case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
+ case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
+ case MIPS_HFLAG_KM: qemu_log("\n"); break;
+ default:
+ cpu_abort(CPU(cpu), "Invalid MMU mode!\n");
+ break;
+ }
+ }
+}
+
+static void set_pc(CPUMIPSState *env, target_ulong error_pc)
+{
+ env->active_tc.PC = error_pc & ~(target_ulong)1;
+ if (error_pc & 1) {
+ env->hflags |= MIPS_HFLAG_M16;
+ } else {
+ env->hflags &= ~(MIPS_HFLAG_M16);
+ }
+}
+
+static inline void exception_return(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+ if (env->CP0_Status & (1 << CP0St_ERL)) {
+ set_pc(env, env->CP0_ErrorEPC);
+ env->CP0_Status &= ~(1 << CP0St_ERL);
+ } else {
+ set_pc(env, env->CP0_EPC);
+ env->CP0_Status &= ~(1 << CP0St_EXL);
+ }
+ compute_hflags(env);
+ debug_post_eret(env);
+}
+
+void helper_eret(CPUMIPSState *env)
+{
+ exception_return(env);
+ env->lladdr = 1;
+}
+
+void helper_eretnc(CPUMIPSState *env)
+{
+ exception_return(env);
+}
+
+void helper_deret(CPUMIPSState *env)
+{
+ debug_pre_eret(env);
+ set_pc(env, env->CP0_DEPC);
+
+ env->hflags &= ~MIPS_HFLAG_DM;
+ compute_hflags(env);
+ debug_post_eret(env);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
+{
+ if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
+ return;
+ }
+ do_raise_exception(env, EXCP_RI, pc);
+}
+
+target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
+{
+ check_hwrena(env, 0, GETPC());
+ return env->CP0_EBase & 0x3ff;
+}
+
+target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
+{
+ check_hwrena(env, 1, GETPC());
+ return env->SYNCI_Step;
+}
+
+target_ulong helper_rdhwr_cc(CPUMIPSState *env)
+{
+ check_hwrena(env, 2, GETPC());
+#ifdef CONFIG_USER_ONLY
+ return env->CP0_Count;
+#else
+ return (int32_t)cpu_mips_get_count(env);
+#endif
+}
+
+target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
+{
+ check_hwrena(env, 3, GETPC());
+ return env->CCRes;
+}
+
+target_ulong helper_rdhwr_performance(CPUMIPSState *env)
+{
+ check_hwrena(env, 4, GETPC());
+ return env->CP0_Performance0;
+}
+
+target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
+{
+ check_hwrena(env, 5, GETPC());
+ return (env->CP0_Config5 >> CP0C5_XNP) & 1;
+}
+
+void helper_pmon(CPUMIPSState *env, int function)
+{
+ function /= 2;
+ switch (function) {
+ case 2: /* TODO: char inbyte(int waitflag); */
+ if (env->active_tc.gpr[4] == 0)
+ env->active_tc.gpr[2] = -1;
+ /* Fall through */
+ case 11: /* TODO: char inbyte (void); */
+ env->active_tc.gpr[2] = -1;
+ break;
+ case 3:
+ case 12:
+ printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
+ break;
+ case 17:
+ break;
+ case 158:
+ {
+ unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
+ printf("%s", fmt);
+ }
+ break;
+ }
+}
+
+void helper_wait(CPUMIPSState *env)
+{
+ CPUState *cs = CPU(mips_env_get_cpu(env));
+
+ cs->halted = 1;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+ /* Last instruction in the block, PC was updated before
+ - no need to recover PC and icount */
+ raise_exception(env, EXCP_HLT);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int error_code = 0;
+ int excp;
+
+ env->CP0_BadVAddr = addr;
+
+ if (access_type == MMU_DATA_STORE) {
+ excp = EXCP_AdES;
+ } else {
+ excp = EXCP_AdEL;
+ if (access_type == MMU_INST_FETCH) {
+ error_code |= EXCP_INST_NOTAVAIL;
+ }
+ }
+
+ do_raise_exception_err(env, excp, error_code, retaddr);
+}
+
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = mips_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (ret) {
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ do_raise_exception_err(env, cs->exception_index,
+ env->error_code, retaddr);
+ }
+}
+
+void mips_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int unused,
+ unsigned size)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+
+ /*
+ * Raising an exception with KVM enabled will crash because it won't be from
+ * the main execution loop so the longjmp won't have a matching setjmp.
+ * Until we can trigger a bus error exception through KVM lets just ignore
+ * the access.
+ */
+ if (kvm_enabled()) {
+ return;
+ }
+
+ if (is_exec) {
+ raise_exception(env, EXCP_IBE);
+ } else {
+ raise_exception(env, EXCP_DBE);
+ }
+}
+#endif /* !CONFIG_USER_ONLY */
+
+/* Complex FPU operations which may need stack space. */
+
+#define FLOAT_TWO32 make_float32(1 << 30)
+#define FLOAT_TWO64 make_float64(1ULL << 62)
+
+#define FP_TO_INT32_OVERFLOW 0x7fffffff
+#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
+
+/* convert MIPS rounding mode in FCR31 to IEEE library */
+unsigned int ieee_rm[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down
+};
+
+target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
+{
+ target_ulong arg1 = 0;
+
+ switch (reg) {
+ case 0:
+ arg1 = (int32_t)env->active_fpu.fcr0;
+ break;
+ case 1:
+ /* UFR Support - Read Status FR */
+ if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) {
+ if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
+ arg1 = (int32_t)
+ ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR);
+ } else {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ }
+ break;
+ case 5:
+ /* FRE Support - read Config5.FRE bit */
+ if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
+ if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
+ arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1;
+ } else {
+ helper_raise_exception(env, EXCP_RI);
+ }
+ }
+ break;
+ case 25:
+ arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
+ break;
+ case 26:
+ arg1 = env->active_fpu.fcr31 & 0x0003f07c;
+ break;
+ case 28:
+ arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
+ break;
+ default:
+ arg1 = (int32_t)env->active_fpu.fcr31;
+ break;
+ }
+
+ return arg1;
+}
+
+void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt)
+{
+ switch (fs) {
+ case 1:
+ /* UFR Alias - Reset Status FR */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
+ env->CP0_Status &= ~(1 << CP0St_FR);
+ compute_hflags(env);
+ } else {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ break;
+ case 4:
+ /* UNFR Alias - Set Status FR */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFR)) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ compute_hflags(env);
+ } else {
+ do_raise_exception(env, EXCP_RI, GETPC());
+ }
+ break;
+ case 5:
+ /* FRE Support - clear Config5.FRE bit */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
+ env->CP0_Config5 &= ~(1 << CP0C5_FRE);
+ compute_hflags(env);
+ } else {
+ helper_raise_exception(env, EXCP_RI);
+ }
+ break;
+ case 6:
+ /* FRE Support - set Config5.FRE bit */
+ if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) {
+ return;
+ }
+ if (env->CP0_Config5 & (1 << CP0C5_UFE)) {
+ env->CP0_Config5 |= (1 << CP0C5_FRE);
+ compute_hflags(env);
+ } else {
+ helper_raise_exception(env, EXCP_RI);
+ }
+ break;
+ case 25:
+ if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) {
+ return;
+ }
+ env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
+ ((arg1 & 0x1) << 23);
+ break;
+ case 26:
+ if (arg1 & 0x007c0000)
+ return;
+ env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
+ break;
+ case 28:
+ if (arg1 & 0x007c0000)
+ return;
+ env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
+ ((arg1 & 0x4) << 22);
+ break;
+ case 31:
+ env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) |
+ (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask));
+ break;
+ default:
+ return;
+ }
+ restore_fp_status(env);
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+ if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
+ do_raise_exception(env, EXCP_FPE, GETPC());
+}
+
+int ieee_ex_to_mips(int xcpt)
+{
+ int ret = 0;
+ if (xcpt) {
+ if (xcpt & float_flag_invalid) {
+ ret |= FP_INVALID;
+ }
+ if (xcpt & float_flag_overflow) {
+ ret |= FP_OVERFLOW;
+ }
+ if (xcpt & float_flag_underflow) {
+ ret |= FP_UNDERFLOW;
+ }
+ if (xcpt & float_flag_divbyzero) {
+ ret |= FP_DIV0;
+ }
+ if (xcpt & float_flag_inexact) {
+ ret |= FP_INEXACT;
+ }
+ }
+ return ret;
+}
+
+static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
+{
+ int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
+
+ SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
+
+ if (tmp) {
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+
+ if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) {
+ do_raise_exception(env, EXCP_FPE, pc);
+ } else {
+ UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
+ }
+ }
+}
+
+/* Float support.
+ Single precition routines have a "s" suffix, double precision a
+ "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
+ paired single lower "pl", paired single upper "pu". */
+
+/* unary operations, modifying fp status */
+uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt0;
+}
+
+uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
+{
+ fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst0;
+}
+
+uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
+ fdt2 = float64_maybe_silence_nan(fdt2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
+{
+ uint32_t fst2;
+ uint32_t fsth2;
+
+ fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+ uint32_t wth2;
+ int excp, excph;
+
+ wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ excp = get_float_exception_flags(&env->active_fpu.fp_status);
+ if (excp & (float_flag_overflow | float_flag_invalid)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
+ wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
+ excph = get_float_exception_flags(&env->active_fpu.fp_status);
+ if (excph & (float_flag_overflow | float_flag_invalid)) {
+ wth2 = FP_TO_INT32_OVERFLOW;
+ }
+
+ set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+
+ return ((uint64_t)wth2 << 32) | wt2;
+}
+
+uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t fst2;
+
+ fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
+ fst2 = float32_maybe_silence_nan(fst2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
+{
+ uint32_t fst2;
+
+ fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
+{
+ uint32_t fst2;
+
+ fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
+{
+ uint32_t wt2;
+
+ wt2 = wt0;
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
+{
+ uint32_t wt2;
+
+ wt2 = wth0;
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ dt2 = FP_TO_INT64_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & (float_flag_invalid | float_flag_overflow)) {
+ wt2 = FP_TO_INT32_OVERFLOW;
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_nearest_even,
+ &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint64_t dt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ dt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return dt2;
+}
+
+uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float64_is_any_nan(fdt0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t wt2;
+
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
+ restore_rounding_mode(env);
+ if (get_float_exception_flags(&env->active_fpu.fp_status)
+ & float_flag_invalid) {
+ if (float32_is_any_nan(fst0)) {
+ wt2 = 0;
+ }
+ }
+ update_fcr31(env, GETPC());
+ return wt2;
+}
+
+/* unary operations, not modifying fp status */
+#define FLOAT_UNOP(name) \
+uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
+{ \
+ return float64_ ## name(fdt0); \
+} \
+uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
+{ \
+ return float32_ ## name(fst0); \
+} \
+uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
+{ \
+ uint32_t wt0; \
+ uint32_t wth0; \
+ \
+ wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
+ wth0 = float32_ ## name(fdt0 >> 32); \
+ return ((uint64_t)wth0 << 32) | wt0; \
+}
+FLOAT_UNOP(abs)
+FLOAT_UNOP(chs)
+#undef FLOAT_UNOP
+
+/* MIPS specific unary operations */
+uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t fst2;
+ uint32_t fsth2;
+
+ fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint64_t fdt2;
+
+ fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
+ fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
+{
+ uint32_t fst2;
+
+ fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
+{
+ uint32_t fst2;
+ uint32_t fsth2;
+
+ fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
+ fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
+ fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+#define FLOAT_RINT(name, bits) \
+uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
+ uint ## bits ## _t fs) \
+{ \
+ uint ## bits ## _t fdret; \
+ \
+ fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \
+ update_fcr31(env, GETPC()); \
+ return fdret; \
+}
+
+FLOAT_RINT(rint_s, 32)
+FLOAT_RINT(rint_d, 64)
+#undef FLOAT_RINT
+
+#define FLOAT_CLASS_SIGNALING_NAN 0x001
+#define FLOAT_CLASS_QUIET_NAN 0x002
+#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
+#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
+#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
+#define FLOAT_CLASS_NEGATIVE_ZERO 0x020
+#define FLOAT_CLASS_POSITIVE_INFINITY 0x040
+#define FLOAT_CLASS_POSITIVE_NORMAL 0x080
+#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
+#define FLOAT_CLASS_POSITIVE_ZERO 0x200
+
+#define FLOAT_CLASS(name, bits) \
+uint ## bits ## _t float_ ## name (uint ## bits ## _t arg, \
+ float_status *status) \
+{ \
+ if (float ## bits ## _is_signaling_nan(arg, status)) { \
+ return FLOAT_CLASS_SIGNALING_NAN; \
+ } else if (float ## bits ## _is_quiet_nan(arg, status)) { \
+ return FLOAT_CLASS_QUIET_NAN; \
+ } else if (float ## bits ## _is_neg(arg)) { \
+ if (float ## bits ## _is_infinity(arg)) { \
+ return FLOAT_CLASS_NEGATIVE_INFINITY; \
+ } else if (float ## bits ## _is_zero(arg)) { \
+ return FLOAT_CLASS_NEGATIVE_ZERO; \
+ } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
+ return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
+ } else { \
+ return FLOAT_CLASS_NEGATIVE_NORMAL; \
+ } \
+ } else { \
+ if (float ## bits ## _is_infinity(arg)) { \
+ return FLOAT_CLASS_POSITIVE_INFINITY; \
+ } else if (float ## bits ## _is_zero(arg)) { \
+ return FLOAT_CLASS_POSITIVE_ZERO; \
+ } else if (float ## bits ## _is_zero_or_denormal(arg)) { \
+ return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
+ } else { \
+ return FLOAT_CLASS_POSITIVE_NORMAL; \
+ } \
+ } \
+} \
+ \
+uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
+ uint ## bits ## _t arg) \
+{ \
+ return float_ ## name(arg, &env->active_fpu.fp_status); \
+}
+
+FLOAT_CLASS(class_s, 32)
+FLOAT_CLASS(class_d, 64)
+#undef FLOAT_CLASS
+
+/* binary operations */
+#define FLOAT_BINOP(name) \
+uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ uint64_t dt2; \
+ \
+ dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
+ update_fcr31(env, GETPC()); \
+ return dt2; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
+ uint32_t fst0, uint32_t fst1) \
+{ \
+ uint32_t wt2; \
+ \
+ wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
+ update_fcr31(env, GETPC()); \
+ return wt2; \
+} \
+ \
+uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1) \
+{ \
+ uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
+ uint32_t fsth0 = fdt0 >> 32; \
+ uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
+ uint32_t fsth1 = fdt1 >> 32; \
+ uint32_t wt2; \
+ uint32_t wth2; \
+ \
+ wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
+ wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
+ update_fcr31(env, GETPC()); \
+ return ((uint64_t)wth2 << 32) | wt2; \
+}
+
+FLOAT_BINOP(add)
+FLOAT_BINOP(sub)
+FLOAT_BINOP(mul)
+FLOAT_BINOP(div)
+#undef FLOAT_BINOP
+
+/* MIPS specific binary operations */
+uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
+ fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
+{
+ fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
+ fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ uint32_t fst0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fst2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
+ fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
+ fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
+ fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
+ fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
+ fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fdt2;
+}
+
+uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
+{
+ fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
+ fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
+ fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return fst2;
+}
+
+uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
+{
+ uint32_t fst0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fst2 = fdt2 & 0XFFFFFFFF;
+ uint32_t fsth2 = fdt2 >> 32;
+
+ fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
+ fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
+ fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
+ fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
+ fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
+ fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fst0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fst1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fst2;
+ uint32_t fsth2;
+
+ fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
+ fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
+{
+ uint32_t fst0 = fdt0 & 0XFFFFFFFF;
+ uint32_t fsth0 = fdt0 >> 32;
+ uint32_t fst1 = fdt1 & 0XFFFFFFFF;
+ uint32_t fsth1 = fdt1 >> 32;
+ uint32_t fst2;
+ uint32_t fsth2;
+
+ fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
+ fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
+ update_fcr31(env, GETPC());
+ return ((uint64_t)fsth2 << 32) | fst2;
+}
+
+#define FLOAT_MINMAX(name, bits, minmaxfunc) \
+uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
+ uint ## bits ## _t fs, \
+ uint ## bits ## _t ft) \
+{ \
+ uint ## bits ## _t fdret; \
+ \
+ fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \
+ &env->active_fpu.fp_status); \
+ update_fcr31(env, GETPC()); \
+ return fdret; \
+}
+
+FLOAT_MINMAX(max_s, 32, maxnum)
+FLOAT_MINMAX(max_d, 64, maxnum)
+FLOAT_MINMAX(maxa_s, 32, maxnummag)
+FLOAT_MINMAX(maxa_d, 64, maxnummag)
+
+FLOAT_MINMAX(min_s, 32, minnum)
+FLOAT_MINMAX(min_d, 64, minnum)
+FLOAT_MINMAX(mina_s, 32, minnummag)
+FLOAT_MINMAX(mina_d, 64, minnummag)
+#undef FLOAT_MINMAX
+
+/* ternary operations */
+#define UNFUSED_FMA(prefix, a, b, c, flags) \
+{ \
+ a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
+ if ((flags) & float_muladd_negate_c) { \
+ a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
+ } else { \
+ a = prefix##_add(a, c, &env->active_fpu.fp_status); \
+ } \
+ if ((flags) & float_muladd_negate_result) { \
+ a = prefix##_chs(a); \
+ } \
+}
+
+/* FMA based operations */
+#define FLOAT_FMA(name, type) \
+uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
+ uint64_t fdt0, uint64_t fdt1, \
+ uint64_t fdt2) \
+{ \
+ UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
+ update_fcr31(env, GETPC()); \
+ return fdt0; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
+ uint32_t fst0, uint32_t fst1, \
+ uint32_t fst2) \
+{ \
+ UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
+ update_fcr31(env, GETPC()); \
+ return fst0; \
+} \
+ \
+uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
+ uint64_t fdt0, uint64_t fdt1, \
+ uint64_t fdt2) \
+{ \
+ uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
+ uint32_t fsth0 = fdt0 >> 32; \
+ uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
+ uint32_t fsth1 = fdt1 >> 32; \
+ uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
+ uint32_t fsth2 = fdt2 >> 32; \
+ \
+ UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
+ UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
+ update_fcr31(env, GETPC()); \
+ return ((uint64_t)fsth0 << 32) | fst0; \
+}
+FLOAT_FMA(madd, 0)
+FLOAT_FMA(msub, float_muladd_negate_c)
+FLOAT_FMA(nmadd, float_muladd_negate_result)
+FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c)
+#undef FLOAT_FMA
+
+#define FLOAT_FMADDSUB(name, bits, muladd_arg) \
+uint ## bits ## _t helper_float_ ## name (CPUMIPSState *env, \
+ uint ## bits ## _t fs, \
+ uint ## bits ## _t ft, \
+ uint ## bits ## _t fd) \
+{ \
+ uint ## bits ## _t fdret; \
+ \
+ fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \
+ &env->active_fpu.fp_status); \
+ update_fcr31(env, GETPC()); \
+ return fdret; \
+}
+
+FLOAT_FMADDSUB(maddf_s, 32, 0)
+FLOAT_FMADDSUB(maddf_d, 64, 0)
+FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product)
+FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product)
+#undef FLOAT_FMADDSUB
+
+/* compare operations */
+#define FOP_COND_D(op, cond) \
+void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ int c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+} \
+void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ int c; \
+ fdt0 = float64_abs(fdt0); \
+ fdt1 = float64_abs(fdt1); \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+}
+
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered_quiet() is still called. */
+FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
+FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
+FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered() is still called. */
+FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
+FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
+FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
+FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
+
+#define FOP_COND_S(op, cond) \
+void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1, int cc) \
+{ \
+ int c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+} \
+void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
+ uint32_t fst1, int cc) \
+{ \
+ int c; \
+ fst0 = float32_abs(fst0); \
+ fst1 = float32_abs(fst1); \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+}
+
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called. */
+FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
+FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
+FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called. */
+FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
+FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
+FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
+FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
+
+#define FOP_COND_PS(op, condl, condh) \
+void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ uint32_t fst0, fsth0, fst1, fsth1; \
+ int ch, cl; \
+ fst0 = fdt0 & 0XFFFFFFFF; \
+ fsth0 = fdt0 >> 32; \
+ fst1 = fdt1 & 0XFFFFFFFF; \
+ fsth1 = fdt1 >> 32; \
+ cl = condl; \
+ ch = condh; \
+ update_fcr31(env, GETPC()); \
+ if (cl) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+ if (ch) \
+ SET_FP_COND(cc + 1, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc + 1, env->active_fpu); \
+} \
+void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
+ uint64_t fdt1, int cc) \
+{ \
+ uint32_t fst0, fsth0, fst1, fsth1; \
+ int ch, cl; \
+ fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
+ fsth0 = float32_abs(fdt0 >> 32); \
+ fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
+ fsth1 = float32_abs(fdt1 >> 32); \
+ cl = condl; \
+ ch = condh; \
+ update_fcr31(env, GETPC()); \
+ if (cl) \
+ SET_FP_COND(cc, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc, env->active_fpu); \
+ if (ch) \
+ SET_FP_COND(cc + 1, env->active_fpu); \
+ else \
+ CLEAR_FP_COND(cc + 1, env->active_fpu); \
+}
+
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called. */
+FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
+ (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
+FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
+FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
+ float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
+ float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
+ float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
+ float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called. */
+FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
+ (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
+FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
+FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
+ float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
+ float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
+ float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
+FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
+ float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
+
+/* R6 compare operations */
+#define FOP_CONDN_D(op, cond) \
+uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState * env, uint64_t fdt0, \
+ uint64_t fdt1) \
+{ \
+ uint64_t c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) { \
+ return -1; \
+ } else { \
+ return 0; \
+ } \
+}
+
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered_quiet() is still called. */
+FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
+FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)))
+FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float64_unordered() is still called. */
+FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
+FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)))
+FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
+FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status)
+ || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
+
+#define FOP_CONDN_S(op, cond) \
+uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState * env, uint32_t fst0, \
+ uint32_t fst1) \
+{ \
+ uint64_t c; \
+ c = cond; \
+ update_fcr31(env, GETPC()); \
+ if (c) { \
+ return -1; \
+ } else { \
+ return 0; \
+ } \
+}
+
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered_quiet() is still called. */
+FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
+FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)))
+FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+/* NOTE: the comma operator will make "cond" to eval to false,
+ * but float32_unordered() is still called. */
+FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
+FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)))
+FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_le(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
+FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
+ || float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
+
+/* MSA */
+/* Data format min and max values */
+#define DF_BITS(df) (1 << ((df) + 3))
+
+/* Element-by-element access macros */
+#define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
+
+#if !defined(CONFIG_USER_ONLY)
+#define MEMOP_IDX(DF) \
+ TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
+ cpu_mmu_index(env, false));
+#else
+#define MEMOP_IDX(DF)
+#endif
+
+#define MSA_LD_DF(DF, TYPE, LD_INSN, ...) \
+void helper_msa_ld_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
+ target_ulong addr) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ wr_t wx; \
+ int i; \
+ MEMOP_IDX(DF) \
+ for (i = 0; i < DF_ELEMENTS(DF); i++) { \
+ wx.TYPE[i] = LD_INSN(env, addr + (i << DF), ##__VA_ARGS__); \
+ } \
+ memcpy(pwd, &wx, sizeof(wr_t)); \
+}
+
+#if !defined(CONFIG_USER_ONLY)
+MSA_LD_DF(DF_BYTE, b, helper_ret_ldub_mmu, oi, GETPC())
+MSA_LD_DF(DF_HALF, h, helper_ret_lduw_mmu, oi, GETPC())
+MSA_LD_DF(DF_WORD, w, helper_ret_ldul_mmu, oi, GETPC())
+MSA_LD_DF(DF_DOUBLE, d, helper_ret_ldq_mmu, oi, GETPC())
+#else
+MSA_LD_DF(DF_BYTE, b, cpu_ldub_data)
+MSA_LD_DF(DF_HALF, h, cpu_lduw_data)
+MSA_LD_DF(DF_WORD, w, cpu_ldl_data)
+MSA_LD_DF(DF_DOUBLE, d, cpu_ldq_data)
+#endif
+
+#define MSA_PAGESPAN(x) \
+ ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN/8 - 1) >= TARGET_PAGE_SIZE)
+
+static inline void ensure_writable_pages(CPUMIPSState *env,
+ target_ulong addr,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+#if !defined(CONFIG_USER_ONLY)
+ target_ulong page_addr;
+ if (unlikely(MSA_PAGESPAN(addr))) {
+ /* first page */
+ probe_write(env, addr, mmu_idx, retaddr);
+ /* second page */
+ page_addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ probe_write(env, page_addr, mmu_idx, retaddr);
+ }
+#endif
+}
+
+#define MSA_ST_DF(DF, TYPE, ST_INSN, ...) \
+void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
+ target_ulong addr) \
+{ \
+ wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
+ int mmu_idx = cpu_mmu_index(env, false); \
+ int i; \
+ MEMOP_IDX(DF) \
+ ensure_writable_pages(env, addr, mmu_idx, GETPC()); \
+ for (i = 0; i < DF_ELEMENTS(DF); i++) { \
+ ST_INSN(env, addr + (i << DF), pwd->TYPE[i], ##__VA_ARGS__); \
+ } \
+}
+
+#if !defined(CONFIG_USER_ONLY)
+MSA_ST_DF(DF_BYTE, b, helper_ret_stb_mmu, oi, GETPC())
+MSA_ST_DF(DF_HALF, h, helper_ret_stw_mmu, oi, GETPC())
+MSA_ST_DF(DF_WORD, w, helper_ret_stl_mmu, oi, GETPC())
+MSA_ST_DF(DF_DOUBLE, d, helper_ret_stq_mmu, oi, GETPC())
+#else
+MSA_ST_DF(DF_BYTE, b, cpu_stb_data)
+MSA_ST_DF(DF_HALF, h, cpu_stw_data)
+MSA_ST_DF(DF_WORD, w, cpu_stl_data)
+MSA_ST_DF(DF_DOUBLE, d, cpu_stq_data)
+#endif
+
+void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
+{
+#ifndef CONFIG_USER_ONLY
+ target_ulong index = addr & 0x1fffffff;
+ if (op == 9) {
+ /* Index Store Tag */
+ memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
+ 8, MEMTXATTRS_UNSPECIFIED);
+ } else if (op == 5) {
+ /* Index Load Tag */
+ memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
+ 8, MEMTXATTRS_UNSPECIFIED);
+ }
+#endif
+}
diff --git a/target/mips/translate.c b/target/mips/translate.c
new file mode 100644
index 0000000000..57b824ff2d
--- /dev/null
+++ b/target/mips/translate.c
@@ -0,0 +1,20423 @@
+/*
+ * MIPS32 emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2006 Marius Groeger (FPU operations)
+ * Copyright (c) 2006 Thiemo Seufer (MIPS32R2 support)
+ * Copyright (c) 2009 CodeSourcery (MIPS16 and microMIPS support)
+ * Copyright (c) 2012 Jia Liu & Dongxue Zhang (MIPS ASE DSP support)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "sysemu/kvm.h"
+#include "exec/semihost.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+#define MIPS_DEBUG_DISAS 0
+
+/* MIPS major opcodes */
+#define MASK_OP_MAJOR(op) (op & (0x3F << 26))
+
+enum {
+ /* indirect opcode tables */
+ OPC_SPECIAL = (0x00 << 26),
+ OPC_REGIMM = (0x01 << 26),
+ OPC_CP0 = (0x10 << 26),
+ OPC_CP1 = (0x11 << 26),
+ OPC_CP2 = (0x12 << 26),
+ OPC_CP3 = (0x13 << 26),
+ OPC_SPECIAL2 = (0x1C << 26),
+ OPC_SPECIAL3 = (0x1F << 26),
+ /* arithmetic with immediate */
+ OPC_ADDI = (0x08 << 26),
+ OPC_ADDIU = (0x09 << 26),
+ OPC_SLTI = (0x0A << 26),
+ OPC_SLTIU = (0x0B << 26),
+ /* logic with immediate */
+ OPC_ANDI = (0x0C << 26),
+ OPC_ORI = (0x0D << 26),
+ OPC_XORI = (0x0E << 26),
+ OPC_LUI = (0x0F << 26),
+ /* arithmetic with immediate */
+ OPC_DADDI = (0x18 << 26),
+ OPC_DADDIU = (0x19 << 26),
+ /* Jump and branches */
+ OPC_J = (0x02 << 26),
+ OPC_JAL = (0x03 << 26),
+ OPC_BEQ = (0x04 << 26), /* Unconditional if rs = rt = 0 (B) */
+ OPC_BEQL = (0x14 << 26),
+ OPC_BNE = (0x05 << 26),
+ OPC_BNEL = (0x15 << 26),
+ OPC_BLEZ = (0x06 << 26),
+ OPC_BLEZL = (0x16 << 26),
+ OPC_BGTZ = (0x07 << 26),
+ OPC_BGTZL = (0x17 << 26),
+ OPC_JALX = (0x1D << 26),
+ OPC_DAUI = (0x1D << 26),
+ /* Load and stores */
+ OPC_LDL = (0x1A << 26),
+ OPC_LDR = (0x1B << 26),
+ OPC_LB = (0x20 << 26),
+ OPC_LH = (0x21 << 26),
+ OPC_LWL = (0x22 << 26),
+ OPC_LW = (0x23 << 26),
+ OPC_LWPC = OPC_LW | 0x5,
+ OPC_LBU = (0x24 << 26),
+ OPC_LHU = (0x25 << 26),
+ OPC_LWR = (0x26 << 26),
+ OPC_LWU = (0x27 << 26),
+ OPC_SB = (0x28 << 26),
+ OPC_SH = (0x29 << 26),
+ OPC_SWL = (0x2A << 26),
+ OPC_SW = (0x2B << 26),
+ OPC_SDL = (0x2C << 26),
+ OPC_SDR = (0x2D << 26),
+ OPC_SWR = (0x2E << 26),
+ OPC_LL = (0x30 << 26),
+ OPC_LLD = (0x34 << 26),
+ OPC_LD = (0x37 << 26),
+ OPC_LDPC = OPC_LD | 0x5,
+ OPC_SC = (0x38 << 26),
+ OPC_SCD = (0x3C << 26),
+ OPC_SD = (0x3F << 26),
+ /* Floating point load/store */
+ OPC_LWC1 = (0x31 << 26),
+ OPC_LWC2 = (0x32 << 26),
+ OPC_LDC1 = (0x35 << 26),
+ OPC_LDC2 = (0x36 << 26),
+ OPC_SWC1 = (0x39 << 26),
+ OPC_SWC2 = (0x3A << 26),
+ OPC_SDC1 = (0x3D << 26),
+ OPC_SDC2 = (0x3E << 26),
+ /* Compact Branches */
+ OPC_BLEZALC = (0x06 << 26),
+ OPC_BGEZALC = (0x06 << 26),
+ OPC_BGEUC = (0x06 << 26),
+ OPC_BGTZALC = (0x07 << 26),
+ OPC_BLTZALC = (0x07 << 26),
+ OPC_BLTUC = (0x07 << 26),
+ OPC_BOVC = (0x08 << 26),
+ OPC_BEQZALC = (0x08 << 26),
+ OPC_BEQC = (0x08 << 26),
+ OPC_BLEZC = (0x16 << 26),
+ OPC_BGEZC = (0x16 << 26),
+ OPC_BGEC = (0x16 << 26),
+ OPC_BGTZC = (0x17 << 26),
+ OPC_BLTZC = (0x17 << 26),
+ OPC_BLTC = (0x17 << 26),
+ OPC_BNVC = (0x18 << 26),
+ OPC_BNEZALC = (0x18 << 26),
+ OPC_BNEC = (0x18 << 26),
+ OPC_BC = (0x32 << 26),
+ OPC_BEQZC = (0x36 << 26),
+ OPC_JIC = (0x36 << 26),
+ OPC_BALC = (0x3A << 26),
+ OPC_BNEZC = (0x3E << 26),
+ OPC_JIALC = (0x3E << 26),
+ /* MDMX ASE specific */
+ OPC_MDMX = (0x1E << 26),
+ /* MSA ASE, same as MDMX */
+ OPC_MSA = OPC_MDMX,
+ /* Cache and prefetch */
+ OPC_CACHE = (0x2F << 26),
+ OPC_PREF = (0x33 << 26),
+ /* PC-relative address computation / loads */
+ OPC_PCREL = (0x3B << 26),
+};
+
+/* PC-relative address computation / loads */
+#define MASK_OPC_PCREL_TOP2BITS(op) (MASK_OP_MAJOR(op) | (op & (3 << 19)))
+#define MASK_OPC_PCREL_TOP5BITS(op) (MASK_OP_MAJOR(op) | (op & (0x1f << 16)))
+enum {
+ /* Instructions determined by bits 19 and 20 */
+ OPC_ADDIUPC = OPC_PCREL | (0 << 19),
+ R6_OPC_LWPC = OPC_PCREL | (1 << 19),
+ OPC_LWUPC = OPC_PCREL | (2 << 19),
+
+ /* Instructions determined by bits 16 ... 20 */
+ OPC_AUIPC = OPC_PCREL | (0x1e << 16),
+ OPC_ALUIPC = OPC_PCREL | (0x1f << 16),
+
+ /* Other */
+ R6_OPC_LDPC = OPC_PCREL | (6 << 18),
+};
+
+/* MIPS special opcodes */
+#define MASK_SPECIAL(op) MASK_OP_MAJOR(op) | (op & 0x3F)
+
+enum {
+ /* Shifts */
+ OPC_SLL = 0x00 | OPC_SPECIAL,
+ /* NOP is SLL r0, r0, 0 */
+ /* SSNOP is SLL r0, r0, 1 */
+ /* EHB is SLL r0, r0, 3 */
+ OPC_SRL = 0x02 | OPC_SPECIAL, /* also ROTR */
+ OPC_ROTR = OPC_SRL | (1 << 21),
+ OPC_SRA = 0x03 | OPC_SPECIAL,
+ OPC_SLLV = 0x04 | OPC_SPECIAL,
+ OPC_SRLV = 0x06 | OPC_SPECIAL, /* also ROTRV */
+ OPC_ROTRV = OPC_SRLV | (1 << 6),
+ OPC_SRAV = 0x07 | OPC_SPECIAL,
+ OPC_DSLLV = 0x14 | OPC_SPECIAL,
+ OPC_DSRLV = 0x16 | OPC_SPECIAL, /* also DROTRV */
+ OPC_DROTRV = OPC_DSRLV | (1 << 6),
+ OPC_DSRAV = 0x17 | OPC_SPECIAL,
+ OPC_DSLL = 0x38 | OPC_SPECIAL,
+ OPC_DSRL = 0x3A | OPC_SPECIAL, /* also DROTR */
+ OPC_DROTR = OPC_DSRL | (1 << 21),
+ OPC_DSRA = 0x3B | OPC_SPECIAL,
+ OPC_DSLL32 = 0x3C | OPC_SPECIAL,
+ OPC_DSRL32 = 0x3E | OPC_SPECIAL, /* also DROTR32 */
+ OPC_DROTR32 = OPC_DSRL32 | (1 << 21),
+ OPC_DSRA32 = 0x3F | OPC_SPECIAL,
+ /* Multiplication / division */
+ OPC_MULT = 0x18 | OPC_SPECIAL,
+ OPC_MULTU = 0x19 | OPC_SPECIAL,
+ OPC_DIV = 0x1A | OPC_SPECIAL,
+ OPC_DIVU = 0x1B | OPC_SPECIAL,
+ OPC_DMULT = 0x1C | OPC_SPECIAL,
+ OPC_DMULTU = 0x1D | OPC_SPECIAL,
+ OPC_DDIV = 0x1E | OPC_SPECIAL,
+ OPC_DDIVU = 0x1F | OPC_SPECIAL,
+
+ /* 2 registers arithmetic / logic */
+ OPC_ADD = 0x20 | OPC_SPECIAL,
+ OPC_ADDU = 0x21 | OPC_SPECIAL,
+ OPC_SUB = 0x22 | OPC_SPECIAL,
+ OPC_SUBU = 0x23 | OPC_SPECIAL,
+ OPC_AND = 0x24 | OPC_SPECIAL,
+ OPC_OR = 0x25 | OPC_SPECIAL,
+ OPC_XOR = 0x26 | OPC_SPECIAL,
+ OPC_NOR = 0x27 | OPC_SPECIAL,
+ OPC_SLT = 0x2A | OPC_SPECIAL,
+ OPC_SLTU = 0x2B | OPC_SPECIAL,
+ OPC_DADD = 0x2C | OPC_SPECIAL,
+ OPC_DADDU = 0x2D | OPC_SPECIAL,
+ OPC_DSUB = 0x2E | OPC_SPECIAL,
+ OPC_DSUBU = 0x2F | OPC_SPECIAL,
+ /* Jumps */
+ OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
+ OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
+ /* Traps */
+ OPC_TGE = 0x30 | OPC_SPECIAL,
+ OPC_TGEU = 0x31 | OPC_SPECIAL,
+ OPC_TLT = 0x32 | OPC_SPECIAL,
+ OPC_TLTU = 0x33 | OPC_SPECIAL,
+ OPC_TEQ = 0x34 | OPC_SPECIAL,
+ OPC_TNE = 0x36 | OPC_SPECIAL,
+ /* HI / LO registers load & stores */
+ OPC_MFHI = 0x10 | OPC_SPECIAL,
+ OPC_MTHI = 0x11 | OPC_SPECIAL,
+ OPC_MFLO = 0x12 | OPC_SPECIAL,
+ OPC_MTLO = 0x13 | OPC_SPECIAL,
+ /* Conditional moves */
+ OPC_MOVZ = 0x0A | OPC_SPECIAL,
+ OPC_MOVN = 0x0B | OPC_SPECIAL,
+
+ OPC_SELEQZ = 0x35 | OPC_SPECIAL,
+ OPC_SELNEZ = 0x37 | OPC_SPECIAL,
+
+ OPC_MOVCI = 0x01 | OPC_SPECIAL,
+
+ /* Special */
+ OPC_PMON = 0x05 | OPC_SPECIAL, /* unofficial */
+ OPC_SYSCALL = 0x0C | OPC_SPECIAL,
+ OPC_BREAK = 0x0D | OPC_SPECIAL,
+ OPC_SPIM = 0x0E | OPC_SPECIAL, /* unofficial */
+ OPC_SYNC = 0x0F | OPC_SPECIAL,
+
+ OPC_SPECIAL28_RESERVED = 0x28 | OPC_SPECIAL,
+ OPC_SPECIAL29_RESERVED = 0x29 | OPC_SPECIAL,
+ OPC_SPECIAL39_RESERVED = 0x39 | OPC_SPECIAL,
+ OPC_SPECIAL3D_RESERVED = 0x3D | OPC_SPECIAL,
+};
+
+/* R6 Multiply and Divide instructions have the same Opcode
+ and function field as legacy OPC_MULT[U]/OPC_DIV[U] */
+#define MASK_R6_MULDIV(op) (MASK_SPECIAL(op) | (op & (0x7ff)))
+
+enum {
+ R6_OPC_MUL = OPC_MULT | (2 << 6),
+ R6_OPC_MUH = OPC_MULT | (3 << 6),
+ R6_OPC_MULU = OPC_MULTU | (2 << 6),
+ R6_OPC_MUHU = OPC_MULTU | (3 << 6),
+ R6_OPC_DIV = OPC_DIV | (2 << 6),
+ R6_OPC_MOD = OPC_DIV | (3 << 6),
+ R6_OPC_DIVU = OPC_DIVU | (2 << 6),
+ R6_OPC_MODU = OPC_DIVU | (3 << 6),
+
+ R6_OPC_DMUL = OPC_DMULT | (2 << 6),
+ R6_OPC_DMUH = OPC_DMULT | (3 << 6),
+ R6_OPC_DMULU = OPC_DMULTU | (2 << 6),
+ R6_OPC_DMUHU = OPC_DMULTU | (3 << 6),
+ R6_OPC_DDIV = OPC_DDIV | (2 << 6),
+ R6_OPC_DMOD = OPC_DDIV | (3 << 6),
+ R6_OPC_DDIVU = OPC_DDIVU | (2 << 6),
+ R6_OPC_DMODU = OPC_DDIVU | (3 << 6),
+
+ R6_OPC_CLZ = 0x10 | OPC_SPECIAL,
+ R6_OPC_CLO = 0x11 | OPC_SPECIAL,
+ R6_OPC_DCLZ = 0x12 | OPC_SPECIAL,
+ R6_OPC_DCLO = 0x13 | OPC_SPECIAL,
+ R6_OPC_SDBBP = 0x0e | OPC_SPECIAL,
+
+ OPC_LSA = 0x05 | OPC_SPECIAL,
+ OPC_DLSA = 0x15 | OPC_SPECIAL,
+};
+
+/* Multiplication variants of the vr54xx. */
+#define MASK_MUL_VR54XX(op) MASK_SPECIAL(op) | (op & (0x1F << 6))
+
+enum {
+ OPC_VR54XX_MULS = (0x03 << 6) | OPC_MULT,
+ OPC_VR54XX_MULSU = (0x03 << 6) | OPC_MULTU,
+ OPC_VR54XX_MACC = (0x05 << 6) | OPC_MULT,
+ OPC_VR54XX_MACCU = (0x05 << 6) | OPC_MULTU,
+ OPC_VR54XX_MSAC = (0x07 << 6) | OPC_MULT,
+ OPC_VR54XX_MSACU = (0x07 << 6) | OPC_MULTU,
+ OPC_VR54XX_MULHI = (0x09 << 6) | OPC_MULT,
+ OPC_VR54XX_MULHIU = (0x09 << 6) | OPC_MULTU,
+ OPC_VR54XX_MULSHI = (0x0B << 6) | OPC_MULT,
+ OPC_VR54XX_MULSHIU = (0x0B << 6) | OPC_MULTU,
+ OPC_VR54XX_MACCHI = (0x0D << 6) | OPC_MULT,
+ OPC_VR54XX_MACCHIU = (0x0D << 6) | OPC_MULTU,
+ OPC_VR54XX_MSACHI = (0x0F << 6) | OPC_MULT,
+ OPC_VR54XX_MSACHIU = (0x0F << 6) | OPC_MULTU,
+};
+
+/* REGIMM (rt field) opcodes */
+#define MASK_REGIMM(op) MASK_OP_MAJOR(op) | (op & (0x1F << 16))
+
+enum {
+ OPC_BLTZ = (0x00 << 16) | OPC_REGIMM,
+ OPC_BLTZL = (0x02 << 16) | OPC_REGIMM,
+ OPC_BGEZ = (0x01 << 16) | OPC_REGIMM,
+ OPC_BGEZL = (0x03 << 16) | OPC_REGIMM,
+ OPC_BLTZAL = (0x10 << 16) | OPC_REGIMM,
+ OPC_BLTZALL = (0x12 << 16) | OPC_REGIMM,
+ OPC_BGEZAL = (0x11 << 16) | OPC_REGIMM,
+ OPC_BGEZALL = (0x13 << 16) | OPC_REGIMM,
+ OPC_TGEI = (0x08 << 16) | OPC_REGIMM,
+ OPC_TGEIU = (0x09 << 16) | OPC_REGIMM,
+ OPC_TLTI = (0x0A << 16) | OPC_REGIMM,
+ OPC_TLTIU = (0x0B << 16) | OPC_REGIMM,
+ OPC_TEQI = (0x0C << 16) | OPC_REGIMM,
+ OPC_TNEI = (0x0E << 16) | OPC_REGIMM,
+ OPC_SIGRIE = (0x17 << 16) | OPC_REGIMM,
+ OPC_SYNCI = (0x1F << 16) | OPC_REGIMM,
+
+ OPC_DAHI = (0x06 << 16) | OPC_REGIMM,
+ OPC_DATI = (0x1e << 16) | OPC_REGIMM,
+};
+
+/* Special2 opcodes */
+#define MASK_SPECIAL2(op) MASK_OP_MAJOR(op) | (op & 0x3F)
+
+enum {
+ /* Multiply & xxx operations */
+ OPC_MADD = 0x00 | OPC_SPECIAL2,
+ OPC_MADDU = 0x01 | OPC_SPECIAL2,
+ OPC_MUL = 0x02 | OPC_SPECIAL2,
+ OPC_MSUB = 0x04 | OPC_SPECIAL2,
+ OPC_MSUBU = 0x05 | OPC_SPECIAL2,
+ /* Loongson 2F */
+ OPC_MULT_G_2F = 0x10 | OPC_SPECIAL2,
+ OPC_DMULT_G_2F = 0x11 | OPC_SPECIAL2,
+ OPC_MULTU_G_2F = 0x12 | OPC_SPECIAL2,
+ OPC_DMULTU_G_2F = 0x13 | OPC_SPECIAL2,
+ OPC_DIV_G_2F = 0x14 | OPC_SPECIAL2,
+ OPC_DDIV_G_2F = 0x15 | OPC_SPECIAL2,
+ OPC_DIVU_G_2F = 0x16 | OPC_SPECIAL2,
+ OPC_DDIVU_G_2F = 0x17 | OPC_SPECIAL2,
+ OPC_MOD_G_2F = 0x1c | OPC_SPECIAL2,
+ OPC_DMOD_G_2F = 0x1d | OPC_SPECIAL2,
+ OPC_MODU_G_2F = 0x1e | OPC_SPECIAL2,
+ OPC_DMODU_G_2F = 0x1f | OPC_SPECIAL2,
+ /* Misc */
+ OPC_CLZ = 0x20 | OPC_SPECIAL2,
+ OPC_CLO = 0x21 | OPC_SPECIAL2,
+ OPC_DCLZ = 0x24 | OPC_SPECIAL2,
+ OPC_DCLO = 0x25 | OPC_SPECIAL2,
+ /* Special */
+ OPC_SDBBP = 0x3F | OPC_SPECIAL2,
+};
+
+/* Special3 opcodes */
+#define MASK_SPECIAL3(op) MASK_OP_MAJOR(op) | (op & 0x3F)
+
+enum {
+ OPC_EXT = 0x00 | OPC_SPECIAL3,
+ OPC_DEXTM = 0x01 | OPC_SPECIAL3,
+ OPC_DEXTU = 0x02 | OPC_SPECIAL3,
+ OPC_DEXT = 0x03 | OPC_SPECIAL3,
+ OPC_INS = 0x04 | OPC_SPECIAL3,
+ OPC_DINSM = 0x05 | OPC_SPECIAL3,
+ OPC_DINSU = 0x06 | OPC_SPECIAL3,
+ OPC_DINS = 0x07 | OPC_SPECIAL3,
+ OPC_FORK = 0x08 | OPC_SPECIAL3,
+ OPC_YIELD = 0x09 | OPC_SPECIAL3,
+ OPC_BSHFL = 0x20 | OPC_SPECIAL3,
+ OPC_DBSHFL = 0x24 | OPC_SPECIAL3,
+ OPC_RDHWR = 0x3B | OPC_SPECIAL3,
+
+ /* Loongson 2E */
+ OPC_MULT_G_2E = 0x18 | OPC_SPECIAL3,
+ OPC_MULTU_G_2E = 0x19 | OPC_SPECIAL3,
+ OPC_DIV_G_2E = 0x1A | OPC_SPECIAL3,
+ OPC_DIVU_G_2E = 0x1B | OPC_SPECIAL3,
+ OPC_DMULT_G_2E = 0x1C | OPC_SPECIAL3,
+ OPC_DMULTU_G_2E = 0x1D | OPC_SPECIAL3,
+ OPC_DDIV_G_2E = 0x1E | OPC_SPECIAL3,
+ OPC_DDIVU_G_2E = 0x1F | OPC_SPECIAL3,
+ OPC_MOD_G_2E = 0x22 | OPC_SPECIAL3,
+ OPC_MODU_G_2E = 0x23 | OPC_SPECIAL3,
+ OPC_DMOD_G_2E = 0x26 | OPC_SPECIAL3,
+ OPC_DMODU_G_2E = 0x27 | OPC_SPECIAL3,
+
+ /* MIPS DSP Load */
+ OPC_LX_DSP = 0x0A | OPC_SPECIAL3,
+ /* MIPS DSP Arithmetic */
+ OPC_ADDU_QB_DSP = 0x10 | OPC_SPECIAL3,
+ OPC_ADDU_OB_DSP = 0x14 | OPC_SPECIAL3,
+ OPC_ABSQ_S_PH_DSP = 0x12 | OPC_SPECIAL3,
+ OPC_ABSQ_S_QH_DSP = 0x16 | OPC_SPECIAL3,
+ /* OPC_ADDUH_QB_DSP is same as OPC_MULT_G_2E. */
+ /* OPC_ADDUH_QB_DSP = 0x18 | OPC_SPECIAL3, */
+ OPC_CMPU_EQ_QB_DSP = 0x11 | OPC_SPECIAL3,
+ OPC_CMPU_EQ_OB_DSP = 0x15 | OPC_SPECIAL3,
+ /* MIPS DSP GPR-Based Shift Sub-class */
+ OPC_SHLL_QB_DSP = 0x13 | OPC_SPECIAL3,
+ OPC_SHLL_OB_DSP = 0x17 | OPC_SPECIAL3,
+ /* MIPS DSP Multiply Sub-class insns */
+ /* OPC_MUL_PH_DSP is same as OPC_ADDUH_QB_DSP. */
+ /* OPC_MUL_PH_DSP = 0x18 | OPC_SPECIAL3, */
+ OPC_DPA_W_PH_DSP = 0x30 | OPC_SPECIAL3,
+ OPC_DPAQ_W_QH_DSP = 0x34 | OPC_SPECIAL3,
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_INSV_DSP = 0x0C | OPC_SPECIAL3,
+ OPC_DINSV_DSP = 0x0D | OPC_SPECIAL3,
+ /* MIPS DSP Append Sub-class */
+ OPC_APPEND_DSP = 0x31 | OPC_SPECIAL3,
+ OPC_DAPPEND_DSP = 0x35 | OPC_SPECIAL3,
+ /* MIPS DSP Accumulator and DSPControl Access Sub-class */
+ OPC_EXTR_W_DSP = 0x38 | OPC_SPECIAL3,
+ OPC_DEXTR_W_DSP = 0x3C | OPC_SPECIAL3,
+
+ /* R6 */
+ R6_OPC_PREF = 0x35 | OPC_SPECIAL3,
+ R6_OPC_CACHE = 0x25 | OPC_SPECIAL3,
+ R6_OPC_LL = 0x36 | OPC_SPECIAL3,
+ R6_OPC_SC = 0x26 | OPC_SPECIAL3,
+ R6_OPC_LLD = 0x37 | OPC_SPECIAL3,
+ R6_OPC_SCD = 0x27 | OPC_SPECIAL3,
+};
+
+/* BSHFL opcodes */
+#define MASK_BSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6))
+
+enum {
+ OPC_WSBH = (0x02 << 6) | OPC_BSHFL,
+ OPC_SEB = (0x10 << 6) | OPC_BSHFL,
+ OPC_SEH = (0x18 << 6) | OPC_BSHFL,
+ OPC_ALIGN = (0x08 << 6) | OPC_BSHFL, /* 010.bp */
+ OPC_ALIGN_END = (0x0B << 6) | OPC_BSHFL, /* 010.00 to 010.11 */
+ OPC_BITSWAP = (0x00 << 6) | OPC_BSHFL /* 00000 */
+};
+
+/* DBSHFL opcodes */
+#define MASK_DBSHFL(op) MASK_SPECIAL3(op) | (op & (0x1F << 6))
+
+enum {
+ OPC_DSBH = (0x02 << 6) | OPC_DBSHFL,
+ OPC_DSHD = (0x05 << 6) | OPC_DBSHFL,
+ OPC_DALIGN = (0x08 << 6) | OPC_DBSHFL, /* 01.bp */
+ OPC_DALIGN_END = (0x0F << 6) | OPC_DBSHFL, /* 01.000 to 01.111 */
+ OPC_DBITSWAP = (0x00 << 6) | OPC_DBSHFL, /* 00000 */
+};
+
+/* MIPS DSP REGIMM opcodes */
+enum {
+ OPC_BPOSGE32 = (0x1C << 16) | OPC_REGIMM,
+ OPC_BPOSGE64 = (0x1D << 16) | OPC_REGIMM,
+};
+
+#define MASK_LX(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+/* MIPS DSP Load */
+enum {
+ OPC_LBUX = (0x06 << 6) | OPC_LX_DSP,
+ OPC_LHX = (0x04 << 6) | OPC_LX_DSP,
+ OPC_LWX = (0x00 << 6) | OPC_LX_DSP,
+ OPC_LDX = (0x08 << 6) | OPC_LX_DSP,
+};
+
+#define MASK_ADDU_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_ADDQ_PH = (0x0A << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDQ_S_PH = (0x0E << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDQ_S_W = (0x16 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_QB = (0x00 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_S_QB = (0x04 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_PH = (0x08 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDU_S_PH = (0x0C << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBQ_PH = (0x0B << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBQ_S_PH = (0x0F << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBQ_S_W = (0x17 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_QB = (0x01 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_S_QB = (0x05 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_PH = (0x09 << 6) | OPC_ADDU_QB_DSP,
+ OPC_SUBU_S_PH = (0x0D << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDSC = (0x10 << 6) | OPC_ADDU_QB_DSP,
+ OPC_ADDWC = (0x11 << 6) | OPC_ADDU_QB_DSP,
+ OPC_MODSUB = (0x12 << 6) | OPC_ADDU_QB_DSP,
+ OPC_RADDU_W_QB = (0x14 << 6) | OPC_ADDU_QB_DSP,
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_MULEU_S_PH_QBL = (0x06 << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULEU_S_PH_QBR = (0x07 << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULQ_RS_PH = (0x1F << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULEQ_S_W_PHL = (0x1C << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULEQ_S_W_PHR = (0x1D << 6) | OPC_ADDU_QB_DSP,
+ OPC_MULQ_S_PH = (0x1E << 6) | OPC_ADDU_QB_DSP,
+};
+
+#define OPC_ADDUH_QB_DSP OPC_MULT_G_2E
+#define MASK_ADDUH_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_ADDUH_QB = (0x00 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDUH_R_QB = (0x02 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_PH = (0x08 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_R_PH = (0x0A << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_W = (0x10 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_ADDQH_R_W = (0x12 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBUH_QB = (0x01 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBUH_R_QB = (0x03 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_PH = (0x09 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_R_PH = (0x0B << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_W = (0x11 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_SUBQH_R_W = (0x13 << 6) | OPC_ADDUH_QB_DSP,
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_MUL_PH = (0x0C << 6) | OPC_ADDUH_QB_DSP,
+ OPC_MUL_S_PH = (0x0E << 6) | OPC_ADDUH_QB_DSP,
+ OPC_MULQ_S_W = (0x16 << 6) | OPC_ADDUH_QB_DSP,
+ OPC_MULQ_RS_W = (0x17 << 6) | OPC_ADDUH_QB_DSP,
+};
+
+#define MASK_ABSQ_S_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_ABSQ_S_QB = (0x01 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_ABSQ_S_PH = (0x09 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_ABSQ_S_W = (0x11 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQ_W_PHL = (0x0C << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQ_W_PHR = (0x0D << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBL = (0x04 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBR = (0x05 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBLA = (0x06 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEQU_PH_QBRA = (0x07 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBL = (0x1C << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBR = (0x1D << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBLA = (0x1E << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_PRECEU_PH_QBRA = (0x1F << 6) | OPC_ABSQ_S_PH_DSP,
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_BITREV = (0x1B << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPL_QB = (0x02 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPLV_QB = (0x03 << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPL_PH = (0x0A << 6) | OPC_ABSQ_S_PH_DSP,
+ OPC_REPLV_PH = (0x0B << 6) | OPC_ABSQ_S_PH_DSP,
+};
+
+#define MASK_CMPU_EQ_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_PRECR_QB_PH = (0x0D << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQ_QB_PH = (0x0C << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECR_SRA_PH_W = (0x1E << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECR_SRA_R_PH_W = (0x1F << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQ_PH_W = (0x14 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQ_RS_PH_W = (0x15 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PRECRQU_S_QB_PH = (0x0F << 6) | OPC_CMPU_EQ_QB_DSP,
+ /* DSP Compare-Pick Sub-class */
+ OPC_CMPU_EQ_QB = (0x00 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPU_LT_QB = (0x01 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPU_LE_QB = (0x02 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGU_EQ_QB = (0x04 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGU_LT_QB = (0x05 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGU_LE_QB = (0x06 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGDU_EQ_QB = (0x18 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGDU_LT_QB = (0x19 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMPGDU_LE_QB = (0x1A << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMP_EQ_PH = (0x08 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMP_LT_PH = (0x09 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_CMP_LE_PH = (0x0A << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PICK_QB = (0x03 << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PICK_PH = (0x0B << 6) | OPC_CMPU_EQ_QB_DSP,
+ OPC_PACKRL_PH = (0x0E << 6) | OPC_CMPU_EQ_QB_DSP,
+};
+
+#define MASK_SHLL_QB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP GPR-Based Shift Sub-class */
+ OPC_SHLL_QB = (0x00 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_QB = (0x02 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLL_PH = (0x08 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_PH = (0x0A << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLL_S_PH = (0x0C << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_S_PH = (0x0E << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLL_S_W = (0x14 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHLLV_S_W = (0x16 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRL_QB = (0x01 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRLV_QB = (0x03 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRL_PH = (0x19 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRLV_PH = (0x1B << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_QB = (0x04 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_R_QB = (0x05 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_QB = (0x06 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_R_QB = (0x07 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_PH = (0x09 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_PH = (0x0B << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_R_PH = (0x0D << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_R_PH = (0x0F << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRA_R_W = (0x15 << 6) | OPC_SHLL_QB_DSP,
+ OPC_SHRAV_R_W = (0x17 << 6) | OPC_SHLL_QB_DSP,
+};
+
+#define MASK_DPA_W_PH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_DPAU_H_QBL = (0x03 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAU_H_QBR = (0x07 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSU_H_QBL = (0x0B << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSU_H_QBR = (0x0F << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPA_W_PH = (0x00 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAX_W_PH = (0x08 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQ_S_W_PH = (0x04 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQX_S_W_PH = (0x18 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQX_SA_W_PH = (0x1A << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPS_W_PH = (0x01 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSX_W_PH = (0x09 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQ_S_W_PH = (0x05 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQX_S_W_PH = (0x19 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQX_SA_W_PH = (0x1B << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MULSAQ_S_W_PH = (0x06 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPAQ_SA_L_W = (0x0C << 6) | OPC_DPA_W_PH_DSP,
+ OPC_DPSQ_SA_L_W = (0x0D << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_S_W_PHL = (0x14 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_S_W_PHR = (0x16 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_SA_W_PHL = (0x10 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MAQ_SA_W_PHR = (0x12 << 6) | OPC_DPA_W_PH_DSP,
+ OPC_MULSA_W_PH = (0x02 << 6) | OPC_DPA_W_PH_DSP,
+};
+
+#define MASK_INSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_INSV = (0x00 << 6) | OPC_INSV_DSP,
+};
+
+#define MASK_APPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Append Sub-class */
+ OPC_APPEND = (0x00 << 6) | OPC_APPEND_DSP,
+ OPC_PREPEND = (0x01 << 6) | OPC_APPEND_DSP,
+ OPC_BALIGN = (0x10 << 6) | OPC_APPEND_DSP,
+};
+
+#define MASK_EXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Accumulator and DSPControl Access Sub-class */
+ OPC_EXTR_W = (0x00 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTR_R_W = (0x04 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTR_RS_W = (0x06 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTR_S_H = (0x0E << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_S_H = (0x0F << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_W = (0x01 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_R_W = (0x05 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTRV_RS_W = (0x07 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTP = (0x02 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTPV = (0x03 << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTPDP = (0x0A << 6) | OPC_EXTR_W_DSP,
+ OPC_EXTPDPV = (0x0B << 6) | OPC_EXTR_W_DSP,
+ OPC_SHILO = (0x1A << 6) | OPC_EXTR_W_DSP,
+ OPC_SHILOV = (0x1B << 6) | OPC_EXTR_W_DSP,
+ OPC_MTHLIP = (0x1F << 6) | OPC_EXTR_W_DSP,
+ OPC_WRDSP = (0x13 << 6) | OPC_EXTR_W_DSP,
+ OPC_RDDSP = (0x12 << 6) | OPC_EXTR_W_DSP,
+};
+
+#define MASK_ABSQ_S_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_PRECEQ_L_PWL = (0x14 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_L_PWR = (0x15 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHL = (0x0C << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHR = (0x0D << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHLA = (0x0E << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQ_PW_QHRA = (0x0F << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBL = (0x04 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBR = (0x05 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBLA = (0x06 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEQU_QH_OBRA = (0x07 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBL = (0x1C << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBR = (0x1D << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBLA = (0x1E << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_PRECEU_QH_OBRA = (0x1F << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_ABSQ_S_OB = (0x01 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_ABSQ_S_PW = (0x11 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_ABSQ_S_QH = (0x09 << 6) | OPC_ABSQ_S_QH_DSP,
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_REPL_OB = (0x02 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPL_PW = (0x12 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPL_QH = (0x0A << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPLV_OB = (0x03 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPLV_PW = (0x13 << 6) | OPC_ABSQ_S_QH_DSP,
+ OPC_REPLV_QH = (0x0B << 6) | OPC_ABSQ_S_QH_DSP,
+};
+
+#define MASK_ADDU_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_MULEQ_S_PW_QHL = (0x1C << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULEQ_S_PW_QHR = (0x1D << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULEU_S_QH_OBL = (0x06 << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULEU_S_QH_OBR = (0x07 << 6) | OPC_ADDU_OB_DSP,
+ OPC_MULQ_RS_QH = (0x1F << 6) | OPC_ADDU_OB_DSP,
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_RADDU_L_OB = (0x14 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_PW = (0x13 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_S_PW = (0x17 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_QH = (0x0B << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBQ_S_QH = (0x0F << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_OB = (0x01 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_S_OB = (0x05 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_QH = (0x09 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBU_S_QH = (0x0D << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBUH_OB = (0x19 << 6) | OPC_ADDU_OB_DSP,
+ OPC_SUBUH_R_OB = (0x1B << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_PW = (0x12 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_S_PW = (0x16 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_QH = (0x0A << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDQ_S_QH = (0x0E << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_OB = (0x00 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_S_OB = (0x04 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_QH = (0x08 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDU_S_QH = (0x0C << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDUH_OB = (0x18 << 6) | OPC_ADDU_OB_DSP,
+ OPC_ADDUH_R_OB = (0x1A << 6) | OPC_ADDU_OB_DSP,
+};
+
+#define MASK_CMPU_EQ_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Compare-Pick Sub-class */
+ OPC_CMP_EQ_PW = (0x10 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LT_PW = (0x11 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LE_PW = (0x12 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_EQ_QH = (0x08 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LT_QH = (0x09 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMP_LE_QH = (0x0A << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGDU_EQ_OB = (0x18 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGDU_LT_OB = (0x19 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGDU_LE_OB = (0x1A << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGU_EQ_OB = (0x04 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGU_LT_OB = (0x05 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPGU_LE_OB = (0x06 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPU_EQ_OB = (0x00 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPU_LT_OB = (0x01 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_CMPU_LE_OB = (0x02 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PACKRL_PW = (0x0E << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PICK_OB = (0x03 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PICK_PW = (0x13 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PICK_QH = (0x0B << 6) | OPC_CMPU_EQ_OB_DSP,
+ /* MIPS DSP Arithmetic Sub-class */
+ OPC_PRECR_OB_QH = (0x0D << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECR_SRA_QH_PW = (0x1E << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECR_SRA_R_QH_PW = (0x1F << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_OB_QH = (0x0C << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_PW_L = (0x1C << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_QH_PW = (0x14 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQ_RS_QH_PW = (0x15 << 6) | OPC_CMPU_EQ_OB_DSP,
+ OPC_PRECRQU_S_OB_QH = (0x0F << 6) | OPC_CMPU_EQ_OB_DSP,
+};
+
+#define MASK_DAPPEND(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Append Sub-class */
+ OPC_DAPPEND = (0x00 << 6) | OPC_DAPPEND_DSP,
+ OPC_PREPENDD = (0x03 << 6) | OPC_DAPPEND_DSP,
+ OPC_PREPENDW = (0x01 << 6) | OPC_DAPPEND_DSP,
+ OPC_DBALIGN = (0x10 << 6) | OPC_DAPPEND_DSP,
+};
+
+#define MASK_DEXTR_W(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Accumulator and DSPControl Access Sub-class */
+ OPC_DMTHLIP = (0x1F << 6) | OPC_DEXTR_W_DSP,
+ OPC_DSHILO = (0x1A << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTP = (0x02 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTPDP = (0x0A << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTPDPV = (0x0B << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTPV = (0x03 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_L = (0x10 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_R_L = (0x14 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_RS_L = (0x16 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_W = (0x00 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_R_W = (0x04 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_RS_W = (0x06 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTR_S_H = (0x0E << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_L = (0x11 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_R_L = (0x15 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_RS_L = (0x17 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_S_H = (0x0F << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_W = (0x01 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_R_W = (0x05 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DEXTRV_RS_W = (0x07 << 6) | OPC_DEXTR_W_DSP,
+ OPC_DSHILOV = (0x1B << 6) | OPC_DEXTR_W_DSP,
+};
+
+#define MASK_DINSV(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* DSP Bit/Manipulation Sub-class */
+ OPC_DINSV = (0x00 << 6) | OPC_DINSV_DSP,
+};
+
+#define MASK_DPAQ_W_QH(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP Multiply Sub-class insns */
+ OPC_DMADD = (0x19 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DMADDU = (0x1D << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DMSUB = (0x1B << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DMSUBU = (0x1F << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPA_W_QH = (0x00 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAQ_S_W_QH = (0x04 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAQ_SA_L_PW = (0x0C << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAU_H_OBL = (0x03 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPAU_H_OBR = (0x07 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPS_W_QH = (0x01 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSQ_S_W_QH = (0x05 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSQ_SA_L_PW = (0x0D << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSU_H_OBL = (0x0B << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_DPSU_H_OBR = (0x0F << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_L_PWL = (0x1C << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_L_PWR = (0x1E << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHLL = (0x14 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHLL = (0x10 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHLR = (0x15 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHLR = (0x11 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHRL = (0x16 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHRL = (0x12 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_S_W_QHRR = (0x17 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MAQ_SA_W_QHRR = (0x13 << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MULSAQ_S_L_PW = (0x0E << 6) | OPC_DPAQ_W_QH_DSP,
+ OPC_MULSAQ_S_W_QH = (0x06 << 6) | OPC_DPAQ_W_QH_DSP,
+};
+
+#define MASK_SHLL_OB(op) (MASK_SPECIAL3(op) | (op & (0x1F << 6)))
+enum {
+ /* MIPS DSP GPR-Based Shift Sub-class */
+ OPC_SHLL_PW = (0x10 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_S_PW = (0x14 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_OB = (0x02 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_PW = (0x12 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_S_PW = (0x16 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_QH = (0x0A << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLLV_S_QH = (0x0E << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_PW = (0x11 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_R_PW = (0x15 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_OB = (0x06 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_R_OB = (0x07 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_PW = (0x13 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_R_PW = (0x17 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_QH = (0x0B << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRAV_R_QH = (0x0F << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRLV_OB = (0x03 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRLV_QH = (0x1B << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_OB = (0x00 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_QH = (0x08 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHLL_S_QH = (0x0C << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_OB = (0x04 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_R_OB = (0x05 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_QH = (0x09 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRA_R_QH = (0x0D << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRL_OB = (0x01 << 6) | OPC_SHLL_OB_DSP,
+ OPC_SHRL_QH = (0x19 << 6) | OPC_SHLL_OB_DSP,
+};
+
+/* Coprocessor 0 (rs field) */
+#define MASK_CP0(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21))
+
+enum {
+ OPC_MFC0 = (0x00 << 21) | OPC_CP0,
+ OPC_DMFC0 = (0x01 << 21) | OPC_CP0,
+ OPC_MFHC0 = (0x02 << 21) | OPC_CP0,
+ OPC_MTC0 = (0x04 << 21) | OPC_CP0,
+ OPC_DMTC0 = (0x05 << 21) | OPC_CP0,
+ OPC_MTHC0 = (0x06 << 21) | OPC_CP0,
+ OPC_MFTR = (0x08 << 21) | OPC_CP0,
+ OPC_RDPGPR = (0x0A << 21) | OPC_CP0,
+ OPC_MFMC0 = (0x0B << 21) | OPC_CP0,
+ OPC_MTTR = (0x0C << 21) | OPC_CP0,
+ OPC_WRPGPR = (0x0E << 21) | OPC_CP0,
+ OPC_C0 = (0x10 << 21) | OPC_CP0,
+ OPC_C0_FIRST = (0x10 << 21) | OPC_CP0,
+ OPC_C0_LAST = (0x1F << 21) | OPC_CP0,
+};
+
+/* MFMC0 opcodes */
+#define MASK_MFMC0(op) MASK_CP0(op) | (op & 0xFFFF)
+
+enum {
+ OPC_DMT = 0x01 | (0 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0,
+ OPC_EMT = 0x01 | (1 << 5) | (0x0F << 6) | (0x01 << 11) | OPC_MFMC0,
+ OPC_DVPE = 0x01 | (0 << 5) | OPC_MFMC0,
+ OPC_EVPE = 0x01 | (1 << 5) | OPC_MFMC0,
+ OPC_DI = (0 << 5) | (0x0C << 11) | OPC_MFMC0,
+ OPC_EI = (1 << 5) | (0x0C << 11) | OPC_MFMC0,
+ OPC_DVP = 0x04 | (0 << 3) | (1 << 5) | (0 << 11) | OPC_MFMC0,
+ OPC_EVP = 0x04 | (0 << 3) | (0 << 5) | (0 << 11) | OPC_MFMC0,
+};
+
+/* Coprocessor 0 (with rs == C0) */
+#define MASK_C0(op) MASK_CP0(op) | (op & 0x3F)
+
+enum {
+ OPC_TLBR = 0x01 | OPC_C0,
+ OPC_TLBWI = 0x02 | OPC_C0,
+ OPC_TLBINV = 0x03 | OPC_C0,
+ OPC_TLBINVF = 0x04 | OPC_C0,
+ OPC_TLBWR = 0x06 | OPC_C0,
+ OPC_TLBP = 0x08 | OPC_C0,
+ OPC_RFE = 0x10 | OPC_C0,
+ OPC_ERET = 0x18 | OPC_C0,
+ OPC_DERET = 0x1F | OPC_C0,
+ OPC_WAIT = 0x20 | OPC_C0,
+};
+
+/* Coprocessor 1 (rs field) */
+#define MASK_CP1(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21))
+
+/* Values for the fmt field in FP instructions */
+enum {
+ /* 0 - 15 are reserved */
+ FMT_S = 16, /* single fp */
+ FMT_D = 17, /* double fp */
+ FMT_E = 18, /* extended fp */
+ FMT_Q = 19, /* quad fp */
+ FMT_W = 20, /* 32-bit fixed */
+ FMT_L = 21, /* 64-bit fixed */
+ FMT_PS = 22, /* paired single fp */
+ /* 23 - 31 are reserved */
+};
+
+enum {
+ OPC_MFC1 = (0x00 << 21) | OPC_CP1,
+ OPC_DMFC1 = (0x01 << 21) | OPC_CP1,
+ OPC_CFC1 = (0x02 << 21) | OPC_CP1,
+ OPC_MFHC1 = (0x03 << 21) | OPC_CP1,
+ OPC_MTC1 = (0x04 << 21) | OPC_CP1,
+ OPC_DMTC1 = (0x05 << 21) | OPC_CP1,
+ OPC_CTC1 = (0x06 << 21) | OPC_CP1,
+ OPC_MTHC1 = (0x07 << 21) | OPC_CP1,
+ OPC_BC1 = (0x08 << 21) | OPC_CP1, /* bc */
+ OPC_BC1ANY2 = (0x09 << 21) | OPC_CP1,
+ OPC_BC1ANY4 = (0x0A << 21) | OPC_CP1,
+ OPC_BZ_V = (0x0B << 21) | OPC_CP1,
+ OPC_BNZ_V = (0x0F << 21) | OPC_CP1,
+ OPC_S_FMT = (FMT_S << 21) | OPC_CP1,
+ OPC_D_FMT = (FMT_D << 21) | OPC_CP1,
+ OPC_E_FMT = (FMT_E << 21) | OPC_CP1,
+ OPC_Q_FMT = (FMT_Q << 21) | OPC_CP1,
+ OPC_W_FMT = (FMT_W << 21) | OPC_CP1,
+ OPC_L_FMT = (FMT_L << 21) | OPC_CP1,
+ OPC_PS_FMT = (FMT_PS << 21) | OPC_CP1,
+ OPC_BC1EQZ = (0x09 << 21) | OPC_CP1,
+ OPC_BC1NEZ = (0x0D << 21) | OPC_CP1,
+ OPC_BZ_B = (0x18 << 21) | OPC_CP1,
+ OPC_BZ_H = (0x19 << 21) | OPC_CP1,
+ OPC_BZ_W = (0x1A << 21) | OPC_CP1,
+ OPC_BZ_D = (0x1B << 21) | OPC_CP1,
+ OPC_BNZ_B = (0x1C << 21) | OPC_CP1,
+ OPC_BNZ_H = (0x1D << 21) | OPC_CP1,
+ OPC_BNZ_W = (0x1E << 21) | OPC_CP1,
+ OPC_BNZ_D = (0x1F << 21) | OPC_CP1,
+};
+
+#define MASK_CP1_FUNC(op) MASK_CP1(op) | (op & 0x3F)
+#define MASK_BC1(op) MASK_CP1(op) | (op & (0x3 << 16))
+
+enum {
+ OPC_BC1F = (0x00 << 16) | OPC_BC1,
+ OPC_BC1T = (0x01 << 16) | OPC_BC1,
+ OPC_BC1FL = (0x02 << 16) | OPC_BC1,
+ OPC_BC1TL = (0x03 << 16) | OPC_BC1,
+};
+
+enum {
+ OPC_BC1FANY2 = (0x00 << 16) | OPC_BC1ANY2,
+ OPC_BC1TANY2 = (0x01 << 16) | OPC_BC1ANY2,
+};
+
+enum {
+ OPC_BC1FANY4 = (0x00 << 16) | OPC_BC1ANY4,
+ OPC_BC1TANY4 = (0x01 << 16) | OPC_BC1ANY4,
+};
+
+#define MASK_CP2(op) MASK_OP_MAJOR(op) | (op & (0x1F << 21))
+
+enum {
+ OPC_MFC2 = (0x00 << 21) | OPC_CP2,
+ OPC_DMFC2 = (0x01 << 21) | OPC_CP2,
+ OPC_CFC2 = (0x02 << 21) | OPC_CP2,
+ OPC_MFHC2 = (0x03 << 21) | OPC_CP2,
+ OPC_MTC2 = (0x04 << 21) | OPC_CP2,
+ OPC_DMTC2 = (0x05 << 21) | OPC_CP2,
+ OPC_CTC2 = (0x06 << 21) | OPC_CP2,
+ OPC_MTHC2 = (0x07 << 21) | OPC_CP2,
+ OPC_BC2 = (0x08 << 21) | OPC_CP2,
+ OPC_BC2EQZ = (0x09 << 21) | OPC_CP2,
+ OPC_BC2NEZ = (0x0D << 21) | OPC_CP2,
+};
+
+#define MASK_LMI(op) (MASK_OP_MAJOR(op) | (op & (0x1F << 21)) | (op & 0x1F))
+
+enum {
+ OPC_PADDSH = (24 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDUSH = (25 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDH = (26 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDW = (27 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDSB = (28 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDUSB = (29 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDB = (30 << 21) | (0x00) | OPC_CP2,
+ OPC_PADDD = (31 << 21) | (0x00) | OPC_CP2,
+
+ OPC_PSUBSH = (24 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBUSH = (25 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBH = (26 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBW = (27 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBSB = (28 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBUSB = (29 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBB = (30 << 21) | (0x01) | OPC_CP2,
+ OPC_PSUBD = (31 << 21) | (0x01) | OPC_CP2,
+
+ OPC_PSHUFH = (24 << 21) | (0x02) | OPC_CP2,
+ OPC_PACKSSWH = (25 << 21) | (0x02) | OPC_CP2,
+ OPC_PACKSSHB = (26 << 21) | (0x02) | OPC_CP2,
+ OPC_PACKUSHB = (27 << 21) | (0x02) | OPC_CP2,
+ OPC_XOR_CP2 = (28 << 21) | (0x02) | OPC_CP2,
+ OPC_NOR_CP2 = (29 << 21) | (0x02) | OPC_CP2,
+ OPC_AND_CP2 = (30 << 21) | (0x02) | OPC_CP2,
+ OPC_PANDN = (31 << 21) | (0x02) | OPC_CP2,
+
+ OPC_PUNPCKLHW = (24 << 21) | (0x03) | OPC_CP2,
+ OPC_PUNPCKHHW = (25 << 21) | (0x03) | OPC_CP2,
+ OPC_PUNPCKLBH = (26 << 21) | (0x03) | OPC_CP2,
+ OPC_PUNPCKHBH = (27 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_0 = (28 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_1 = (29 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_2 = (30 << 21) | (0x03) | OPC_CP2,
+ OPC_PINSRH_3 = (31 << 21) | (0x03) | OPC_CP2,
+
+ OPC_PAVGH = (24 << 21) | (0x08) | OPC_CP2,
+ OPC_PAVGB = (25 << 21) | (0x08) | OPC_CP2,
+ OPC_PMAXSH = (26 << 21) | (0x08) | OPC_CP2,
+ OPC_PMINSH = (27 << 21) | (0x08) | OPC_CP2,
+ OPC_PMAXUB = (28 << 21) | (0x08) | OPC_CP2,
+ OPC_PMINUB = (29 << 21) | (0x08) | OPC_CP2,
+
+ OPC_PCMPEQW = (24 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPGTW = (25 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPEQH = (26 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPGTH = (27 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPEQB = (28 << 21) | (0x09) | OPC_CP2,
+ OPC_PCMPGTB = (29 << 21) | (0x09) | OPC_CP2,
+
+ OPC_PSLLW = (24 << 21) | (0x0A) | OPC_CP2,
+ OPC_PSLLH = (25 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULLH = (26 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULHH = (27 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULUW = (28 << 21) | (0x0A) | OPC_CP2,
+ OPC_PMULHUH = (29 << 21) | (0x0A) | OPC_CP2,
+
+ OPC_PSRLW = (24 << 21) | (0x0B) | OPC_CP2,
+ OPC_PSRLH = (25 << 21) | (0x0B) | OPC_CP2,
+ OPC_PSRAW = (26 << 21) | (0x0B) | OPC_CP2,
+ OPC_PSRAH = (27 << 21) | (0x0B) | OPC_CP2,
+ OPC_PUNPCKLWD = (28 << 21) | (0x0B) | OPC_CP2,
+ OPC_PUNPCKHWD = (29 << 21) | (0x0B) | OPC_CP2,
+
+ OPC_ADDU_CP2 = (24 << 21) | (0x0C) | OPC_CP2,
+ OPC_OR_CP2 = (25 << 21) | (0x0C) | OPC_CP2,
+ OPC_ADD_CP2 = (26 << 21) | (0x0C) | OPC_CP2,
+ OPC_DADD_CP2 = (27 << 21) | (0x0C) | OPC_CP2,
+ OPC_SEQU_CP2 = (28 << 21) | (0x0C) | OPC_CP2,
+ OPC_SEQ_CP2 = (29 << 21) | (0x0C) | OPC_CP2,
+
+ OPC_SUBU_CP2 = (24 << 21) | (0x0D) | OPC_CP2,
+ OPC_PASUBUB = (25 << 21) | (0x0D) | OPC_CP2,
+ OPC_SUB_CP2 = (26 << 21) | (0x0D) | OPC_CP2,
+ OPC_DSUB_CP2 = (27 << 21) | (0x0D) | OPC_CP2,
+ OPC_SLTU_CP2 = (28 << 21) | (0x0D) | OPC_CP2,
+ OPC_SLT_CP2 = (29 << 21) | (0x0D) | OPC_CP2,
+
+ OPC_SLL_CP2 = (24 << 21) | (0x0E) | OPC_CP2,
+ OPC_DSLL_CP2 = (25 << 21) | (0x0E) | OPC_CP2,
+ OPC_PEXTRH = (26 << 21) | (0x0E) | OPC_CP2,
+ OPC_PMADDHW = (27 << 21) | (0x0E) | OPC_CP2,
+ OPC_SLEU_CP2 = (28 << 21) | (0x0E) | OPC_CP2,
+ OPC_SLE_CP2 = (29 << 21) | (0x0E) | OPC_CP2,
+
+ OPC_SRL_CP2 = (24 << 21) | (0x0F) | OPC_CP2,
+ OPC_DSRL_CP2 = (25 << 21) | (0x0F) | OPC_CP2,
+ OPC_SRA_CP2 = (26 << 21) | (0x0F) | OPC_CP2,
+ OPC_DSRA_CP2 = (27 << 21) | (0x0F) | OPC_CP2,
+ OPC_BIADD = (28 << 21) | (0x0F) | OPC_CP2,
+ OPC_PMOVMSKB = (29 << 21) | (0x0F) | OPC_CP2,
+};
+
+
+#define MASK_CP3(op) MASK_OP_MAJOR(op) | (op & 0x3F)
+
+enum {
+ OPC_LWXC1 = 0x00 | OPC_CP3,
+ OPC_LDXC1 = 0x01 | OPC_CP3,
+ OPC_LUXC1 = 0x05 | OPC_CP3,
+ OPC_SWXC1 = 0x08 | OPC_CP3,
+ OPC_SDXC1 = 0x09 | OPC_CP3,
+ OPC_SUXC1 = 0x0D | OPC_CP3,
+ OPC_PREFX = 0x0F | OPC_CP3,
+ OPC_ALNV_PS = 0x1E | OPC_CP3,
+ OPC_MADD_S = 0x20 | OPC_CP3,
+ OPC_MADD_D = 0x21 | OPC_CP3,
+ OPC_MADD_PS = 0x26 | OPC_CP3,
+ OPC_MSUB_S = 0x28 | OPC_CP3,
+ OPC_MSUB_D = 0x29 | OPC_CP3,
+ OPC_MSUB_PS = 0x2E | OPC_CP3,
+ OPC_NMADD_S = 0x30 | OPC_CP3,
+ OPC_NMADD_D = 0x31 | OPC_CP3,
+ OPC_NMADD_PS= 0x36 | OPC_CP3,
+ OPC_NMSUB_S = 0x38 | OPC_CP3,
+ OPC_NMSUB_D = 0x39 | OPC_CP3,
+ OPC_NMSUB_PS= 0x3E | OPC_CP3,
+};
+
+/* MSA Opcodes */
+#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F))
+enum {
+ OPC_MSA_I8_00 = 0x00 | OPC_MSA,
+ OPC_MSA_I8_01 = 0x01 | OPC_MSA,
+ OPC_MSA_I8_02 = 0x02 | OPC_MSA,
+ OPC_MSA_I5_06 = 0x06 | OPC_MSA,
+ OPC_MSA_I5_07 = 0x07 | OPC_MSA,
+ OPC_MSA_BIT_09 = 0x09 | OPC_MSA,
+ OPC_MSA_BIT_0A = 0x0A | OPC_MSA,
+ OPC_MSA_3R_0D = 0x0D | OPC_MSA,
+ OPC_MSA_3R_0E = 0x0E | OPC_MSA,
+ OPC_MSA_3R_0F = 0x0F | OPC_MSA,
+ OPC_MSA_3R_10 = 0x10 | OPC_MSA,
+ OPC_MSA_3R_11 = 0x11 | OPC_MSA,
+ OPC_MSA_3R_12 = 0x12 | OPC_MSA,
+ OPC_MSA_3R_13 = 0x13 | OPC_MSA,
+ OPC_MSA_3R_14 = 0x14 | OPC_MSA,
+ OPC_MSA_3R_15 = 0x15 | OPC_MSA,
+ OPC_MSA_ELM = 0x19 | OPC_MSA,
+ OPC_MSA_3RF_1A = 0x1A | OPC_MSA,
+ OPC_MSA_3RF_1B = 0x1B | OPC_MSA,
+ OPC_MSA_3RF_1C = 0x1C | OPC_MSA,
+ OPC_MSA_VEC = 0x1E | OPC_MSA,
+
+ /* MI10 instruction */
+ OPC_LD_B = (0x20) | OPC_MSA,
+ OPC_LD_H = (0x21) | OPC_MSA,
+ OPC_LD_W = (0x22) | OPC_MSA,
+ OPC_LD_D = (0x23) | OPC_MSA,
+ OPC_ST_B = (0x24) | OPC_MSA,
+ OPC_ST_H = (0x25) | OPC_MSA,
+ OPC_ST_W = (0x26) | OPC_MSA,
+ OPC_ST_D = (0x27) | OPC_MSA,
+};
+
+enum {
+ /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */
+ OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06,
+ OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07,
+ OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06,
+ OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06,
+ OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07,
+ OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06,
+ OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07,
+ OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06,
+ OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07,
+ OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06,
+ OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07,
+ OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07,
+
+ /* I8 instruction */
+ OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00,
+ OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01,
+ OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02,
+ OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00,
+ OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01,
+ OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02,
+ OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00,
+ OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01,
+ OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02,
+ OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00,
+
+ /* VEC/2R/2RF instruction */
+ OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC,
+ OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC,
+ OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC,
+ OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC,
+ OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC,
+ OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC,
+ OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC,
+
+ OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC,
+ OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC,
+
+ /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */
+ OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R,
+ OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R,
+ OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R,
+ OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R,
+
+ /* 2RF instruction df(bit 16) = _w, _d */
+ OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF,
+ OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF,
+ OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF,
+ OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF,
+ OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF,
+ OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF,
+ OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF,
+ OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF,
+ OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF,
+ OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF,
+ OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF,
+ OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF,
+ OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF,
+ OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF,
+ OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF,
+ OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF,
+
+ /* 3R instruction df(bits 22..21) = _b, _h, _w, d */
+ OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D,
+ OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E,
+ OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F,
+ OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10,
+ OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11,
+ OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12,
+ OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13,
+ OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14,
+ OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15,
+ OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D,
+ OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E,
+ OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10,
+ OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11,
+ OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12,
+ OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13,
+ OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14,
+ OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15,
+ OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D,
+ OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E,
+ OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F,
+ OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10,
+ OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11,
+ OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12,
+ OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13,
+ OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14,
+ OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15,
+ OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D,
+ OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E,
+ OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F,
+ OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10,
+ OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11,
+ OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13,
+ OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14,
+ OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D,
+ OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E,
+ OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F,
+ OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10,
+ OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11,
+ OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12,
+ OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13,
+ OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14,
+ OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15,
+ OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D,
+ OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E,
+ OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F,
+ OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10,
+ OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11,
+ OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12,
+ OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13,
+ OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14,
+ OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15,
+ OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D,
+ OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E,
+ OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10,
+ OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12,
+ OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14,
+ OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15,
+ OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D,
+ OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E,
+ OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10,
+ OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12,
+ OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14,
+ OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15,
+
+ /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */
+ OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM,
+ OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM,
+ OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM,
+ OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM,
+ OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM,
+ OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM,
+ OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM,
+ OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM,
+ OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM,
+
+ /* 3RF instruction _df(bit 21) = _w, _d */
+ OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A,
+ OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B,
+ OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A,
+ OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B,
+ OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C,
+ OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A,
+ OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B,
+ OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C,
+ OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A,
+ OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B,
+ OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C,
+ OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A,
+ OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B,
+ OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C,
+ OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A,
+ OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B,
+ OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C,
+ OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A,
+ OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C,
+ OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A,
+ OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B,
+ OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A,
+ OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B,
+ OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A,
+ OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C,
+ OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A,
+ OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B,
+ OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C,
+ OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A,
+ OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C,
+ OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A,
+ OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B,
+ OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C,
+ OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A,
+ OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B,
+ OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C,
+ OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A,
+ OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B,
+ OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C,
+ OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A,
+ OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B,
+
+ /* BIT instruction df(bits 22..16) = _B _H _W _D */
+ OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09,
+ OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A,
+ OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09,
+ OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A,
+ OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09,
+ OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A,
+ OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09,
+ OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A,
+ OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09,
+ OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09,
+ OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09,
+ OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09,
+};
+
+/* global register indices */
+static TCGv_env cpu_env;
+static TCGv cpu_gpr[32], cpu_PC;
+static TCGv cpu_HI[MIPS_DSP_ACC], cpu_LO[MIPS_DSP_ACC];
+static TCGv cpu_dspctrl, btarget, bcond;
+static TCGv_i32 hflags;
+static TCGv_i32 fpu_fcr0, fpu_fcr31;
+static TCGv_i64 fpu_f64[32];
+static TCGv_i64 msa_wr_d[64];
+
+#include "exec/gen-icount.h"
+
+#define gen_helper_0e0i(name, arg) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg); \
+ gen_helper_##name(cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_0e1i(name, arg1, arg2) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
+ gen_helper_##name(cpu_env, arg1, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_1e0i(name, ret, arg1) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg1); \
+ gen_helper_##name(ret, cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_1e1i(name, ret, arg1, arg2) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
+ gen_helper_##name(ret, cpu_env, arg1, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_0e2i(name, arg1, arg2, arg3) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
+ gen_helper_##name(cpu_env, arg1, arg2, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
+ gen_helper_##name(ret, cpu_env, arg1, arg2, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg4); \
+ gen_helper_##name(cpu_env, arg1, arg2, arg3, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while(0)
+
+typedef struct DisasContext {
+ struct TranslationBlock *tb;
+ target_ulong pc, saved_pc;
+ uint32_t opcode;
+ int singlestep_enabled;
+ int insn_flags;
+ int32_t CP0_Config1;
+ /* Routine used to access memory */
+ int mem_idx;
+ TCGMemOp default_tcg_memop_mask;
+ uint32_t hflags, saved_hflags;
+ int bstate;
+ target_ulong btarget;
+ bool ulri;
+ int kscrexist;
+ bool rxi;
+ int ie;
+ bool bi;
+ bool bp;
+ uint64_t PAMask;
+ bool mvh;
+ int CP0_LLAddr_shift;
+ bool ps;
+ bool vp;
+ bool cmgcr;
+ bool mrp;
+ bool nan2008;
+ bool abs2008;
+} DisasContext;
+
+enum {
+ BS_NONE = 0, /* We go out of the TB without reaching a branch or an
+ * exception condition */
+ BS_STOP = 1, /* We want to stop translation for any reason */
+ BS_BRANCH = 2, /* We reached a branch condition */
+ BS_EXCP = 3, /* We reached an exception condition */
+};
+
+static const char * const regnames[] = {
+ "r0", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra",
+};
+
+static const char * const regnames_HI[] = {
+ "HI0", "HI1", "HI2", "HI3",
+};
+
+static const char * const regnames_LO[] = {
+ "LO0", "LO1", "LO2", "LO3",
+};
+
+static const char * const fregnames[] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+};
+
+static const char * const msaregnames[] = {
+ "w0.d0", "w0.d1", "w1.d0", "w1.d1",
+ "w2.d0", "w2.d1", "w3.d0", "w3.d1",
+ "w4.d0", "w4.d1", "w5.d0", "w5.d1",
+ "w6.d0", "w6.d1", "w7.d0", "w7.d1",
+ "w8.d0", "w8.d1", "w9.d0", "w9.d1",
+ "w10.d0", "w10.d1", "w11.d0", "w11.d1",
+ "w12.d0", "w12.d1", "w13.d0", "w13.d1",
+ "w14.d0", "w14.d1", "w15.d0", "w15.d1",
+ "w16.d0", "w16.d1", "w17.d0", "w17.d1",
+ "w18.d0", "w18.d1", "w19.d0", "w19.d1",
+ "w20.d0", "w20.d1", "w21.d0", "w21.d1",
+ "w22.d0", "w22.d1", "w23.d0", "w23.d1",
+ "w24.d0", "w24.d1", "w25.d0", "w25.d1",
+ "w26.d0", "w26.d1", "w27.d0", "w27.d1",
+ "w28.d0", "w28.d1", "w29.d0", "w29.d1",
+ "w30.d0", "w30.d1", "w31.d0", "w31.d1",
+};
+
+#define LOG_DISAS(...) \
+ do { \
+ if (MIPS_DEBUG_DISAS) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
+ } \
+ } while (0)
+
+#define MIPS_INVAL(op) \
+ do { \
+ if (MIPS_DEBUG_DISAS) { \
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, \
+ TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \
+ ctx->pc, ctx->opcode, op, ctx->opcode >> 26, \
+ ctx->opcode & 0x3F, ((ctx->opcode >> 16) & 0x1F)); \
+ } \
+ } while (0)
+
+/* General purpose registers moves. */
+static inline void gen_load_gpr (TCGv t, int reg)
+{
+ if (reg == 0)
+ tcg_gen_movi_tl(t, 0);
+ else
+ tcg_gen_mov_tl(t, cpu_gpr[reg]);
+}
+
+static inline void gen_store_gpr (TCGv t, int reg)
+{
+ if (reg != 0)
+ tcg_gen_mov_tl(cpu_gpr[reg], t);
+}
+
+/* Moves to/from shadow registers. */
+static inline void gen_load_srsgpr (int from, int to)
+{
+ TCGv t0 = tcg_temp_new();
+
+ if (from == 0)
+ tcg_gen_movi_tl(t0, 0);
+ else {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_ptr addr = tcg_temp_new_ptr();
+
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
+ tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(t2, t2, 0xf);
+ tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
+ tcg_gen_ext_i32_ptr(addr, t2);
+ tcg_gen_add_ptr(addr, cpu_env, addr);
+
+ tcg_gen_ld_tl(t0, addr, sizeof(target_ulong) * from);
+ tcg_temp_free_ptr(addr);
+ tcg_temp_free_i32(t2);
+ }
+ gen_store_gpr(t0, to);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_store_srsgpr (int from, int to)
+{
+ if (to != 0) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_ptr addr = tcg_temp_new_ptr();
+
+ gen_load_gpr(t0, from);
+ tcg_gen_ld_i32(t2, cpu_env, offsetof(CPUMIPSState, CP0_SRSCtl));
+ tcg_gen_shri_i32(t2, t2, CP0SRSCtl_PSS);
+ tcg_gen_andi_i32(t2, t2, 0xf);
+ tcg_gen_muli_i32(t2, t2, sizeof(target_ulong) * 32);
+ tcg_gen_ext_i32_ptr(addr, t2);
+ tcg_gen_add_ptr(addr, cpu_env, addr);
+
+ tcg_gen_st_tl(t0, addr, sizeof(target_ulong) * to);
+ tcg_temp_free_ptr(addr);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free(t0);
+ }
+}
+
+/* Tests */
+static inline void gen_save_pc(target_ulong pc)
+{
+ tcg_gen_movi_tl(cpu_PC, pc);
+}
+
+static inline void save_cpu_state(DisasContext *ctx, int do_save_pc)
+{
+ LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags);
+ if (do_save_pc && ctx->pc != ctx->saved_pc) {
+ gen_save_pc(ctx->pc);
+ ctx->saved_pc = ctx->pc;
+ }
+ if (ctx->hflags != ctx->saved_hflags) {
+ tcg_gen_movi_i32(hflags, ctx->hflags);
+ ctx->saved_hflags = ctx->hflags;
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_BR:
+ break;
+ case MIPS_HFLAG_BC:
+ case MIPS_HFLAG_BL:
+ case MIPS_HFLAG_B:
+ tcg_gen_movi_tl(btarget, ctx->btarget);
+ break;
+ }
+ }
+}
+
+static inline void restore_cpu_state(CPUMIPSState *env, DisasContext *ctx)
+{
+ ctx->saved_hflags = ctx->hflags;
+ switch (ctx->hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_BR:
+ break;
+ case MIPS_HFLAG_BC:
+ case MIPS_HFLAG_BL:
+ case MIPS_HFLAG_B:
+ ctx->btarget = env->btarget;
+ break;
+ }
+}
+
+static inline void generate_exception_err(DisasContext *ctx, int excp, int err)
+{
+ TCGv_i32 texcp = tcg_const_i32(excp);
+ TCGv_i32 terr = tcg_const_i32(err);
+ save_cpu_state(ctx, 1);
+ gen_helper_raise_exception_err(cpu_env, texcp, terr);
+ tcg_temp_free_i32(terr);
+ tcg_temp_free_i32(texcp);
+ ctx->bstate = BS_EXCP;
+}
+
+static inline void generate_exception(DisasContext *ctx, int excp)
+{
+ gen_helper_0e0i(raise_exception, excp);
+}
+
+static inline void generate_exception_end(DisasContext *ctx, int excp)
+{
+ generate_exception_err(ctx, excp, 0);
+}
+
+/* Floating point register moves. */
+static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_FRE) {
+ generate_exception(ctx, EXCP_RI);
+ }
+ tcg_gen_extrl_i64_i32(t, fpu_f64[reg]);
+}
+
+static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ TCGv_i64 t64;
+ if (ctx->hflags & MIPS_HFLAG_FRE) {
+ generate_exception(ctx, EXCP_RI);
+ }
+ t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32);
+ tcg_temp_free_i64(t64);
+}
+
+static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_extrh_i64_i32(t, fpu_f64[reg]);
+ } else {
+ gen_load_fpr32(ctx, t, reg | 1);
+ }
+}
+
+static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t64, t);
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
+ tcg_temp_free_i64(t64);
+ } else {
+ gen_store_fpr32(ctx, t, reg | 1);
+ }
+}
+
+static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_mov_i64(t, fpu_f64[reg]);
+ } else {
+ tcg_gen_concat32_i64(t, fpu_f64[reg & ~1], fpu_f64[reg | 1]);
+ }
+}
+
+static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
+{
+ if (ctx->hflags & MIPS_HFLAG_F64) {
+ tcg_gen_mov_i64(fpu_f64[reg], t);
+ } else {
+ TCGv_i64 t0;
+ tcg_gen_deposit_i64(fpu_f64[reg & ~1], fpu_f64[reg & ~1], t, 0, 32);
+ t0 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t0, t, 32);
+ tcg_gen_deposit_i64(fpu_f64[reg | 1], fpu_f64[reg | 1], t0, 0, 32);
+ tcg_temp_free_i64(t0);
+ }
+}
+
+static inline int get_fp_bit (int cc)
+{
+ if (cc)
+ return 24 + cc;
+ else
+ return 23;
+}
+
+/* Addresses computation */
+static inline void gen_op_addr_add (DisasContext *ctx, TCGv ret, TCGv arg0, TCGv arg1)
+{
+ tcg_gen_add_tl(ret, arg0, arg1);
+
+#if defined(TARGET_MIPS64)
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
+ tcg_gen_ext32s_i64(ret, ret);
+ }
+#endif
+}
+
+/* Addresses computation (translation time) */
+static target_long addr_add(DisasContext *ctx, target_long base,
+ target_long offset)
+{
+ target_long sum = base + offset;
+
+#if defined(TARGET_MIPS64)
+ if (ctx->hflags & MIPS_HFLAG_AWRAP) {
+ sum = (int32_t)sum;
+ }
+#endif
+ return sum;
+}
+
+/* Sign-extract the low 32-bits to a target_long. */
+static inline void gen_move_low32(TCGv ret, TCGv_i64 arg)
+{
+#if defined(TARGET_MIPS64)
+ tcg_gen_ext32s_i64(ret, arg);
+#else
+ tcg_gen_extrl_i64_i32(ret, arg);
+#endif
+}
+
+/* Sign-extract the high 32-bits to a target_long. */
+static inline void gen_move_high32(TCGv ret, TCGv_i64 arg)
+{
+#if defined(TARGET_MIPS64)
+ tcg_gen_sari_i64(ret, arg, 32);
+#else
+ tcg_gen_extrh_i64_i32(ret, arg);
+#endif
+}
+
+static inline void check_cp0_enabled(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_CP0)))
+ generate_exception_err(ctx, EXCP_CpU, 0);
+}
+
+static inline void check_cp1_enabled(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_FPU)))
+ generate_exception_err(ctx, EXCP_CpU, 1);
+}
+
+/* Verify that the processor is running with COP1X instructions enabled.
+ This is associated with the nabla symbol in the MIPS32 and MIPS64
+ opcode tables. */
+
+static inline void check_cop1x(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_COP1X)))
+ generate_exception_end(ctx, EXCP_RI);
+}
+
+/* Verify that the processor is running with 64-bit floating-point
+ operations enabled. */
+
+static inline void check_cp1_64bitmode(DisasContext *ctx)
+{
+ if (unlikely(~ctx->hflags & (MIPS_HFLAG_F64 | MIPS_HFLAG_COP1X)))
+ generate_exception_end(ctx, EXCP_RI);
+}
+
+/*
+ * Verify if floating point register is valid; an operation is not defined
+ * if bit 0 of any register specification is set and the FR bit in the
+ * Status register equals zero, since the register numbers specify an
+ * even-odd pair of adjacent coprocessor general registers. When the FR bit
+ * in the Status register equals one, both even and odd register numbers
+ * are valid. This limitation exists only for 64 bit wide (d,l,ps) registers.
+ *
+ * Multiple 64 bit wide registers can be checked by calling
+ * gen_op_cp1_registers(freg1 | freg2 | ... | fregN);
+ */
+static inline void check_cp1_registers(DisasContext *ctx, int regs)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_F64) && (regs & 1)))
+ generate_exception_end(ctx, EXCP_RI);
+}
+
+/* Verify that the processor is running with DSP instructions enabled.
+ This is enabled by CP0 Status register MX(24) bit.
+ */
+
+static inline void check_dsp(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSP))) {
+ if (ctx->insn_flags & ASE_DSP) {
+ generate_exception_end(ctx, EXCP_DSPDIS);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ }
+}
+
+static inline void check_dspr2(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_DSPR2))) {
+ if (ctx->insn_flags & ASE_DSP) {
+ generate_exception_end(ctx, EXCP_DSPDIS);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ }
+}
+
+/* This code generates a "reserved instruction" exception if the
+ CPU does not support the instruction set corresponding to flags. */
+static inline void check_insn(DisasContext *ctx, int flags)
+{
+ if (unlikely(!(ctx->insn_flags & flags))) {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+}
+
+/* This code generates a "reserved instruction" exception if the
+ CPU has corresponding flag set which indicates that the instruction
+ has been removed. */
+static inline void check_insn_opc_removed(DisasContext *ctx, int flags)
+{
+ if (unlikely(ctx->insn_flags & flags)) {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+}
+
+/* This code generates a "reserved instruction" exception if the
+ CPU does not support 64-bit paired-single (PS) floating point data type */
+static inline void check_ps(DisasContext *ctx)
+{
+ if (unlikely(!ctx->ps)) {
+ generate_exception(ctx, EXCP_RI);
+ }
+ check_cp1_64bitmode(ctx);
+}
+
+#ifdef TARGET_MIPS64
+/* This code generates a "reserved instruction" exception if 64-bit
+ instructions are not enabled. */
+static inline void check_mips_64(DisasContext *ctx)
+{
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_64)))
+ generate_exception_end(ctx, EXCP_RI);
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+static inline void check_mvh(DisasContext *ctx)
+{
+ if (unlikely(!ctx->mvh)) {
+ generate_exception(ctx, EXCP_RI);
+ }
+}
+#endif
+
+/* Define small wrappers for gen_load_fpr* so that we have a uniform
+ calling interface for 32 and 64-bit FPRs. No sense in changing
+ all callers for gen_load_fpr32 when we need the CTX parameter for
+ this one use. */
+#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y)
+#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y)
+#define FOP_CONDS(type, abs, fmt, ifmt, bits) \
+static inline void gen_cmp ## type ## _ ## fmt(DisasContext *ctx, int n, \
+ int ft, int fs, int cc) \
+{ \
+ TCGv_i##bits fp0 = tcg_temp_new_i##bits (); \
+ TCGv_i##bits fp1 = tcg_temp_new_i##bits (); \
+ switch (ifmt) { \
+ case FMT_PS: \
+ check_ps(ctx); \
+ break; \
+ case FMT_D: \
+ if (abs) { \
+ check_cop1x(ctx); \
+ } \
+ check_cp1_registers(ctx, fs | ft); \
+ break; \
+ case FMT_S: \
+ if (abs) { \
+ check_cop1x(ctx); \
+ } \
+ break; \
+ } \
+ gen_ldcmp_fpr##bits (ctx, fp0, fs); \
+ gen_ldcmp_fpr##bits (ctx, fp1, ft); \
+ switch (n) { \
+ case 0: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _f, fp0, fp1, cc); break;\
+ case 1: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _un, fp0, fp1, cc); break;\
+ case 2: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _eq, fp0, fp1, cc); break;\
+ case 3: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ueq, fp0, fp1, cc); break;\
+ case 4: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _olt, fp0, fp1, cc); break;\
+ case 5: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ult, fp0, fp1, cc); break;\
+ case 6: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ole, fp0, fp1, cc); break;\
+ case 7: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ule, fp0, fp1, cc); break;\
+ case 8: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _sf, fp0, fp1, cc); break;\
+ case 9: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngle, fp0, fp1, cc); break;\
+ case 10: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _seq, fp0, fp1, cc); break;\
+ case 11: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngl, fp0, fp1, cc); break;\
+ case 12: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _lt, fp0, fp1, cc); break;\
+ case 13: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _nge, fp0, fp1, cc); break;\
+ case 14: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _le, fp0, fp1, cc); break;\
+ case 15: gen_helper_0e2i(cmp ## type ## _ ## fmt ## _ngt, fp0, fp1, cc); break;\
+ default: abort(); \
+ } \
+ tcg_temp_free_i##bits (fp0); \
+ tcg_temp_free_i##bits (fp1); \
+}
+
+FOP_CONDS(, 0, d, FMT_D, 64)
+FOP_CONDS(abs, 1, d, FMT_D, 64)
+FOP_CONDS(, 0, s, FMT_S, 32)
+FOP_CONDS(abs, 1, s, FMT_S, 32)
+FOP_CONDS(, 0, ps, FMT_PS, 64)
+FOP_CONDS(abs, 1, ps, FMT_PS, 64)
+#undef FOP_CONDS
+
+#define FOP_CONDNS(fmt, ifmt, bits, STORE) \
+static inline void gen_r6_cmp_ ## fmt(DisasContext * ctx, int n, \
+ int ft, int fs, int fd) \
+{ \
+ TCGv_i ## bits fp0 = tcg_temp_new_i ## bits(); \
+ TCGv_i ## bits fp1 = tcg_temp_new_i ## bits(); \
+ if (ifmt == FMT_D) { \
+ check_cp1_registers(ctx, fs | ft | fd); \
+ } \
+ gen_ldcmp_fpr ## bits(ctx, fp0, fs); \
+ gen_ldcmp_fpr ## bits(ctx, fp1, ft); \
+ switch (n) { \
+ case 0: \
+ gen_helper_r6_cmp_ ## fmt ## _af(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 1: \
+ gen_helper_r6_cmp_ ## fmt ## _un(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 2: \
+ gen_helper_r6_cmp_ ## fmt ## _eq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 3: \
+ gen_helper_r6_cmp_ ## fmt ## _ueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 4: \
+ gen_helper_r6_cmp_ ## fmt ## _lt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 5: \
+ gen_helper_r6_cmp_ ## fmt ## _ult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 6: \
+ gen_helper_r6_cmp_ ## fmt ## _le(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 7: \
+ gen_helper_r6_cmp_ ## fmt ## _ule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 8: \
+ gen_helper_r6_cmp_ ## fmt ## _saf(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 9: \
+ gen_helper_r6_cmp_ ## fmt ## _sun(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 10: \
+ gen_helper_r6_cmp_ ## fmt ## _seq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 11: \
+ gen_helper_r6_cmp_ ## fmt ## _sueq(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 12: \
+ gen_helper_r6_cmp_ ## fmt ## _slt(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 13: \
+ gen_helper_r6_cmp_ ## fmt ## _sult(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 14: \
+ gen_helper_r6_cmp_ ## fmt ## _sle(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 15: \
+ gen_helper_r6_cmp_ ## fmt ## _sule(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 17: \
+ gen_helper_r6_cmp_ ## fmt ## _or(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 18: \
+ gen_helper_r6_cmp_ ## fmt ## _une(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 19: \
+ gen_helper_r6_cmp_ ## fmt ## _ne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 25: \
+ gen_helper_r6_cmp_ ## fmt ## _sor(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 26: \
+ gen_helper_r6_cmp_ ## fmt ## _sune(fp0, cpu_env, fp0, fp1); \
+ break; \
+ case 27: \
+ gen_helper_r6_cmp_ ## fmt ## _sne(fp0, cpu_env, fp0, fp1); \
+ break; \
+ default: \
+ abort(); \
+ } \
+ STORE; \
+ tcg_temp_free_i ## bits (fp0); \
+ tcg_temp_free_i ## bits (fp1); \
+}
+
+FOP_CONDNS(d, FMT_D, 64, gen_store_fpr64(ctx, fp0, fd))
+FOP_CONDNS(s, FMT_S, 32, gen_store_fpr32(ctx, fp0, fd))
+#undef FOP_CONDNS
+#undef gen_ldcmp_fpr32
+#undef gen_ldcmp_fpr64
+
+/* load/store instructions. */
+#ifdef CONFIG_USER_ONLY
+#define OP_LD_ATOMIC(insn,fname) \
+static inline void op_ld_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ tcg_gen_mov_tl(t0, arg1); \
+ tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
+ tcg_gen_st_tl(ret, cpu_env, offsetof(CPUMIPSState, llval)); \
+ tcg_temp_free(t0); \
+}
+#else
+#define OP_LD_ATOMIC(insn,fname) \
+static inline void op_ld_##insn(TCGv ret, TCGv arg1, DisasContext *ctx) \
+{ \
+ gen_helper_1e1i(insn, ret, arg1, ctx->mem_idx); \
+}
+#endif
+OP_LD_ATOMIC(ll,ld32s);
+#if defined(TARGET_MIPS64)
+OP_LD_ATOMIC(lld,ld64);
+#endif
+#undef OP_LD_ATOMIC
+
+#ifdef CONFIG_USER_ONLY
+#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
+static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ TCGLabel *l1 = gen_new_label(); \
+ TCGLabel *l2 = gen_new_label(); \
+ \
+ tcg_gen_andi_tl(t0, arg2, almask); \
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); \
+ tcg_gen_st_tl(arg2, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr)); \
+ generate_exception(ctx, EXCP_AdES); \
+ gen_set_label(l1); \
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUMIPSState, lladdr)); \
+ tcg_gen_brcond_tl(TCG_COND_NE, arg2, t0, l2); \
+ tcg_gen_movi_tl(t0, rt | ((almask << 3) & 0x20)); \
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUMIPSState, llreg)); \
+ tcg_gen_st_tl(arg1, cpu_env, offsetof(CPUMIPSState, llnewval)); \
+ generate_exception_end(ctx, EXCP_SC); \
+ gen_set_label(l2); \
+ tcg_gen_movi_tl(t0, 0); \
+ gen_store_gpr(t0, rt); \
+ tcg_temp_free(t0); \
+}
+#else
+#define OP_ST_ATOMIC(insn,fname,ldname,almask) \
+static inline void op_st_##insn(TCGv arg1, TCGv arg2, int rt, DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_temp_new(); \
+ gen_helper_1e2i(insn, t0, arg1, arg2, ctx->mem_idx); \
+ gen_store_gpr(t0, rt); \
+ tcg_temp_free(t0); \
+}
+#endif
+OP_ST_ATOMIC(sc,st32,ld32s,0x3);
+#if defined(TARGET_MIPS64)
+OP_ST_ATOMIC(scd,st64,ld64,0x7);
+#endif
+#undef OP_ST_ATOMIC
+
+static void gen_base_offset_addr (DisasContext *ctx, TCGv addr,
+ int base, int16_t offset)
+{
+ if (base == 0) {
+ tcg_gen_movi_tl(addr, offset);
+ } else if (offset == 0) {
+ gen_load_gpr(addr, base);
+ } else {
+ tcg_gen_movi_tl(addr, offset);
+ gen_op_addr_add(ctx, addr, cpu_gpr[base], addr);
+ }
+}
+
+static target_ulong pc_relative_pc (DisasContext *ctx)
+{
+ target_ulong pc = ctx->pc;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4;
+
+ pc -= branch_bytes;
+ }
+
+ pc &= ~(target_ulong)3;
+ return pc;
+}
+
+/* Load */
+static void gen_ld(DisasContext *ctx, uint32_t opc,
+ int rt, int base, int16_t offset)
+{
+ TCGv t0, t1, t2;
+
+ if (rt == 0 && ctx->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)) {
+ /* Loongson CPU uses a load to zero register for prefetch.
+ We emulate it as a NOP. On other CPU we must perform the
+ actual memory access. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ switch (opc) {
+#if defined(TARGET_MIPS64)
+ case OPC_LWU:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LD:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LLD:
+ case R6_OPC_LLD:
+ op_ld_lld(t0, t0, ctx);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LDL:
+ t1 = tcg_temp_new();
+ /* Do a byte access to possibly trigger a page
+ fault with the unaligned address. */
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 7);
+#ifndef TARGET_WORDS_BIGENDIAN
+ tcg_gen_xori_tl(t1, t1, 7);
+#endif
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~7);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_shl_tl(t0, t0, t1);
+ t2 = tcg_const_tl(-1);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LDR:
+ t1 = tcg_temp_new();
+ /* Do a byte access to possibly trigger a page
+ fault with the unaligned address. */
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 7);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_xori_tl(t1, t1, 7);
+#endif
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~7);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, 63);
+ t2 = tcg_const_tl(0xfffffffffffffffeull);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LDPC:
+ t1 = tcg_const_tl(pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t0, rt);
+ break;
+#endif
+ case OPC_LWPC:
+ t1 = tcg_const_tl(pc_relative_pc(ctx));
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LW:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LH:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LHU:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LB:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_SB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LBU:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LWL:
+ t1 = tcg_temp_new();
+ /* Do a byte access to possibly trigger a page
+ fault with the unaligned address. */
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 3);
+#ifndef TARGET_WORDS_BIGENDIAN
+ tcg_gen_xori_tl(t1, t1, 3);
+#endif
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_shl_tl(t0, t0, t1);
+ t2 = tcg_const_tl(-1);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LWR:
+ t1 = tcg_temp_new();
+ /* Do a byte access to possibly trigger a page
+ fault with the unaligned address. */
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(t1, t0, 3);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_xori_tl(t1, t1, 3);
+#endif
+ tcg_gen_shli_tl(t1, t1, 3);
+ tcg_gen_andi_tl(t0, t0, ~3);
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_shr_tl(t0, t0, t1);
+ tcg_gen_xori_tl(t1, t1, 31);
+ t2 = tcg_const_tl(0xfffffffeull);
+ tcg_gen_shl_tl(t2, t2, t1);
+ gen_load_gpr(t1, rt);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_LL:
+ case R6_OPC_LL:
+ op_ld_ll(t0, t0, ctx);
+ gen_store_gpr(t0, rt);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Store */
+static void gen_st (DisasContext *ctx, uint32_t opc, int rt,
+ int base, int16_t offset)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+#if defined(TARGET_MIPS64)
+ case OPC_SD:
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_SDL:
+ gen_helper_0e2i(sdl, t1, t0, ctx->mem_idx);
+ break;
+ case OPC_SDR:
+ gen_helper_0e2i(sdr, t1, t0, ctx->mem_idx);
+ break;
+#endif
+ case OPC_SW:
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_SH:
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUW |
+ ctx->default_tcg_memop_mask);
+ break;
+ case OPC_SB:
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_8);
+ break;
+ case OPC_SWL:
+ gen_helper_0e2i(swl, t1, t0, ctx->mem_idx);
+ break;
+ case OPC_SWR:
+ gen_helper_0e2i(swr, t1, t0, ctx->mem_idx);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+
+/* Store conditional */
+static void gen_st_cond (DisasContext *ctx, uint32_t opc, int rt,
+ int base, int16_t offset)
+{
+ TCGv t0, t1;
+
+#ifdef CONFIG_USER_ONLY
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+#else
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+#endif
+ gen_base_offset_addr(ctx, t0, base, offset);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+#if defined(TARGET_MIPS64)
+ case OPC_SCD:
+ case R6_OPC_SCD:
+ op_st_scd(t1, t0, rt, ctx);
+ break;
+#endif
+ case OPC_SC:
+ case R6_OPC_SC:
+ op_st_sc(t1, t0, rt, ctx);
+ break;
+ }
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+/* Load and store */
+static void gen_flt_ldst (DisasContext *ctx, uint32_t opc, int ft,
+ int base, int16_t offset)
+{
+ TCGv t0 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+ /* Don't do NOP if destination is zero: we must perform the actual
+ memory access. */
+ switch (opc) {
+ case OPC_LWC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, MO_TESL |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr32(ctx, fp0, ft);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SWC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, ft);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LDC1:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ gen_store_fpr64(ctx, fp0, ft);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SDC1:
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, ft);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ |
+ ctx->default_tcg_memop_mask);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ default:
+ MIPS_INVAL("flt_ldst");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+}
+
+static void gen_cop1_ldst(DisasContext *ctx, uint32_t op, int rt,
+ int rs, int16_t imm)
+{
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ switch (op) {
+ case OPC_LDC1:
+ case OPC_SDC1:
+ check_insn(ctx, ISA_MIPS2);
+ /* Fallthrough */
+ default:
+ gen_flt_ldst(ctx, op, rt, rs, imm);
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+}
+
+/* Arithmetic with immediate operand */
+static void gen_arith_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+
+ if (rt == 0 && opc != OPC_ADDI && opc != OPC_DADDI) {
+ /* If no destination, treat it as a NOP.
+ For addi, we must generate the overflow exception when needed. */
+ return;
+ }
+ switch (opc) {
+ case OPC_ADDI:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ tcg_gen_addi_tl(t0, t1, uimm);
+ tcg_gen_ext32s_tl(t0, t0);
+
+ tcg_gen_xori_tl(t1, t1, ~uimm);
+ tcg_gen_xori_tl(t2, t0, uimm);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ tcg_gen_ext32s_tl(t0, t0);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_ADDIU:
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DADDI:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ tcg_gen_addi_tl(t0, t1, uimm);
+
+ tcg_gen_xori_tl(t1, t1, ~uimm);
+ tcg_gen_xori_tl(t2, t0, uimm);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_DADDIU:
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ }
+ break;
+#endif
+ }
+}
+
+/* Logic with immediate operand */
+static void gen_logic_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+ uimm = (uint16_t)imm;
+ switch (opc) {
+ case OPC_ANDI:
+ if (likely(rs != 0))
+ tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rt], 0);
+ break;
+ case OPC_ORI:
+ if (rs != 0)
+ tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ break;
+ case OPC_XORI:
+ if (likely(rs != 0))
+ tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
+ break;
+ case OPC_LUI:
+ if (rs != 0 && (ctx->insn_flags & ISA_MIPS32R6)) {
+ /* OPC_AUI */
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], imm << 16);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rt], imm << 16);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Set on less than with immediate operand */
+static void gen_slt_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_SLTI:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_SLTIU:
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+/* Shifts with immediate operand */
+static void gen_shift_imm(DisasContext *ctx, uint32_t opc,
+ int rt, int rs, int16_t imm)
+{
+ target_ulong uimm = ((uint16_t)imm) & 0x1f;
+ TCGv t0;
+
+ if (rt == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_SLL:
+ tcg_gen_shli_tl(t0, t0, uimm);
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ break;
+ case OPC_SRA:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_SRL:
+ if (uimm != 0) {
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_ROTR:
+ if (uimm != 0) {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_rotri_i32(t1, t1, uimm);
+ tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
+ tcg_temp_free_i32(t1);
+ } else {
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DSLL:
+ tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_DSRA:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_DSRL:
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
+ break;
+ case OPC_DROTR:
+ if (uimm != 0) {
+ tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_DSLL32:
+ tcg_gen_shli_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+ case OPC_DSRA32:
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+ case OPC_DSRL32:
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+ case OPC_DROTR32:
+ tcg_gen_rotri_tl(cpu_gpr[rt], t0, uimm + 32);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+/* Arithmetic */
+static void gen_arith(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ if (rd == 0 && opc != OPC_ADD && opc != OPC_SUB
+ && opc != OPC_DADD && opc != OPC_DSUB) {
+ /* If no destination, treat it as a NOP.
+ For add & sub, we must generate the overflow exception when needed. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_ADD:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_ADDU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_SUB:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_sub_tl(t0, t1, t2);
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_xor_tl(t2, t1, t2);
+ tcg_gen_xor_tl(t1, t0, t1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of different sign, first operand and result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_SUBU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DADD:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_add_tl(t0, t1, t2);
+ tcg_gen_xor_tl(t1, t1, t2);
+ tcg_gen_xor_tl(t2, t0, t2);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of same sign, result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_DADDU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_DSUB:
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGLabel *l1 = gen_new_label();
+
+ gen_load_gpr(t1, rs);
+ gen_load_gpr(t2, rt);
+ tcg_gen_sub_tl(t0, t1, t2);
+ tcg_gen_xor_tl(t2, t1, t2);
+ tcg_gen_xor_tl(t1, t0, t1);
+ tcg_gen_and_tl(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_temp_free(t1);
+ /* operands of different sign, first operand and result different sign */
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(l1);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ case OPC_DSUBU:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+#endif
+ case OPC_MUL:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_mul_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ }
+}
+
+/* Conditional move */
+static void gen_cond_move(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1, t2;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ t1 = tcg_const_tl(0);
+ t2 = tcg_temp_new();
+ gen_load_gpr(t2, rs);
+ switch (opc) {
+ case OPC_MOVN:
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, cpu_gpr[rd]);
+ break;
+ case OPC_MOVZ:
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, cpu_gpr[rd]);
+ break;
+ case OPC_SELNEZ:
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, t1);
+ break;
+ case OPC_SELEQZ:
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1);
+ break;
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+/* Logic */
+static void gen_logic(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_AND:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_NOR:
+ if (rs != 0 && rt != 0) {
+ tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0));
+ }
+ break;
+ case OPC_OR:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ case OPC_XOR:
+ if (likely(rs != 0 && rt != 0)) {
+ tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
+ } else if (rs == 0 && rt != 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
+ } else if (rs != 0 && rt == 0) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ break;
+ }
+}
+
+/* Set on lower than */
+static void gen_slt(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_SLT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_SLTU:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
+ break;
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Shifts */
+static void gen_shift(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP.
+ For add & sub, we must generate the overflow exception when needed. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ switch (opc) {
+ case OPC_SLLV:
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shl_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_SRAV:
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_SRLV:
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_andi_tl(t0, t0, 0x1f);
+ tcg_gen_shr_tl(t0, t1, t0);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_ROTRV:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_andi_i32(t2, t2, 0x1f);
+ tcg_gen_rotr_i32(t2, t3, t2);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DSLLV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shl_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_DSRAV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_DSRLV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rd], t1, t0);
+ break;
+ case OPC_DROTRV:
+ tcg_gen_andi_tl(t0, t0, 0x3f);
+ tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Arithmetic on HI/LO registers */
+static void gen_HILO(DisasContext *ctx, uint32_t opc, int acc, int reg)
+{
+ if (reg == 0 && (opc == OPC_MFHI || opc == OPC_MFLO)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ if (acc != 0) {
+ check_dsp(ctx);
+ }
+
+ switch (opc) {
+ case OPC_MFHI:
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_HI[acc]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_gpr[reg], cpu_HI[acc]);
+ }
+ break;
+ case OPC_MFLO:
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_gpr[reg], cpu_LO[acc]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_gpr[reg], cpu_LO[acc]);
+ }
+ break;
+ case OPC_MTHI:
+ if (reg != 0) {
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_HI[acc], cpu_gpr[reg]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_HI[acc], cpu_gpr[reg]);
+ }
+ } else {
+ tcg_gen_movi_tl(cpu_HI[acc], 0);
+ }
+ break;
+ case OPC_MTLO:
+ if (reg != 0) {
+#if defined(TARGET_MIPS64)
+ if (acc != 0) {
+ tcg_gen_ext32s_tl(cpu_LO[acc], cpu_gpr[reg]);
+ } else
+#endif
+ {
+ tcg_gen_mov_tl(cpu_LO[acc], cpu_gpr[reg]);
+ }
+ } else {
+ tcg_gen_movi_tl(cpu_LO[acc], 0);
+ }
+ break;
+ }
+}
+
+static inline void gen_r6_ld(target_long addr, int reg, int memidx,
+ TCGMemOp memop)
+{
+ TCGv t0 = tcg_const_tl(addr);
+ tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
+ gen_store_gpr(t0, reg);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_pcrel(DisasContext *ctx, int opc, target_ulong pc,
+ int rs)
+{
+ target_long offset;
+ target_long addr;
+
+ switch (MASK_OPC_PCREL_TOP2BITS(opc)) {
+ case OPC_ADDIUPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+ case R6_OPC_LWPC:
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, pc, offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TESL);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_LWUPC:
+ check_mips_64(ctx);
+ offset = sextract32(ctx->opcode << 2, 0, 21);
+ addr = addr_add(ctx, pc, offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEUL);
+ break;
+#endif
+ default:
+ switch (MASK_OPC_PCREL_TOP5BITS(opc)) {
+ case OPC_AUIPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode, 0, 16) << 16;
+ addr = addr_add(ctx, pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+ case OPC_ALUIPC:
+ if (rs != 0) {
+ offset = sextract32(ctx->opcode, 0, 16) << 16;
+ addr = ~0xFFFF & addr_add(ctx, pc, offset);
+ tcg_gen_movi_tl(cpu_gpr[rs], addr);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_LDPC: /* bits 16 and 17 are part of immediate */
+ case R6_OPC_LDPC + (1 << 16):
+ case R6_OPC_LDPC + (2 << 16):
+ case R6_OPC_LDPC + (3 << 16):
+ check_mips_64(ctx);
+ offset = sextract32(ctx->opcode << 3, 0, 21);
+ addr = addr_add(ctx, (pc & ~0x7), offset);
+ gen_r6_ld(addr, rs, ctx->mem_idx, MO_TEQ);
+ break;
+#endif
+ default:
+ MIPS_INVAL("OPC_PCREL");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ }
+}
+
+static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case R6_OPC_DIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_MOD:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_MODU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_MUL:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mul_i32(t2, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case R6_OPC_MUH:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case R6_OPC_MULU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mul_i32(t2, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case R6_OPC_MUHU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_DDIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMOD:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DDIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMODU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMUL:
+ tcg_gen_mul_i64(cpu_gpr[rd], t0, t1);
+ break;
+ case R6_OPC_DMUH:
+ {
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ }
+ break;
+ case R6_OPC_DMULU:
+ tcg_gen_mul_i64(cpu_gpr[rd], t0, t1);
+ break;
+ case R6_OPC_DMUHU:
+ {
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t2);
+ }
+ break;
+#endif
+ default:
+ MIPS_INVAL("r6 mul/div");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_muldiv(DisasContext *ctx, uint32_t opc,
+ int acc, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ if (acc != 0) {
+ check_dsp(ctx);
+ }
+
+ switch (opc) {
+ case OPC_DIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_LO[acc], t0, t1);
+ tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
+ tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
+ tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_DIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_tl(cpu_LO[acc], t0, t1);
+ tcg_gen_remu_tl(cpu_HI[acc], t0, t1);
+ tcg_gen_ext32s_tl(cpu_LO[acc], cpu_LO[acc]);
+ tcg_gen_ext32s_tl(cpu_HI[acc], cpu_HI[acc]);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_MULT:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+ case OPC_MULTU:
+ {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t2, t0);
+ tcg_gen_trunc_tl_i32(t3, t1);
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
+ tcg_gen_ext_i32_tl(cpu_LO[acc], t2);
+ tcg_gen_ext_i32_tl(cpu_HI[acc], t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DDIV:
+ {
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
+ tcg_gen_and_tl(t2, t2, t3);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_tl(t2, t2, t3);
+ tcg_gen_movi_tl(t3, 0);
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_tl(cpu_LO[acc], t0, t1);
+ tcg_gen_rem_tl(cpu_HI[acc], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_DDIVU:
+ {
+ TCGv t2 = tcg_const_tl(0);
+ TCGv t3 = tcg_const_tl(1);
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
+ tcg_gen_divu_i64(cpu_LO[acc], t0, t1);
+ tcg_gen_remu_i64(cpu_HI[acc], t0, t1);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ }
+ break;
+ case OPC_DMULT:
+ tcg_gen_muls2_i64(cpu_LO[acc], cpu_HI[acc], t0, t1);
+ break;
+ case OPC_DMULTU:
+ tcg_gen_mulu2_i64(cpu_LO[acc], cpu_HI[acc], t0, t1);
+ break;
+#endif
+ case OPC_MADD:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case OPC_MADDU:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_add_i64(t2, t2, t3);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case OPC_MSUB:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t2, t0);
+ tcg_gen_ext_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_sub_i64(t2, t3, t2);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ case OPC_MSUBU:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_extu_tl_i64(t2, t0);
+ tcg_gen_extu_tl_i64(t3, t1);
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_concat_tl_i64(t3, cpu_LO[acc], cpu_HI[acc]);
+ tcg_gen_sub_i64(t2, t3, t2);
+ tcg_temp_free_i64(t3);
+ gen_move_low32(cpu_LO[acc], t2);
+ gen_move_high32(cpu_HI[acc], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+ default:
+ MIPS_INVAL("mul/div");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_mul_vr54xx (DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case OPC_VR54XX_MULS:
+ gen_helper_muls(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MULSU:
+ gen_helper_mulsu(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MACC:
+ gen_helper_macc(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MACCU:
+ gen_helper_maccu(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MSAC:
+ gen_helper_msac(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MSACU:
+ gen_helper_msacu(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MULHI:
+ gen_helper_mulhi(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MULHIU:
+ gen_helper_mulhiu(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MULSHI:
+ gen_helper_mulshi(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MULSHIU:
+ gen_helper_mulshiu(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MACCHI:
+ gen_helper_macchi(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MACCHIU:
+ gen_helper_macchiu(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MSACHI:
+ gen_helper_msachi(t0, cpu_env, t0, t1);
+ break;
+ case OPC_VR54XX_MSACHIU:
+ gen_helper_msachiu(t0, cpu_env, t0, t1);
+ break;
+ default:
+ MIPS_INVAL("mul vr54xx");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ gen_store_gpr(t0, rd);
+
+ out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_cl (DisasContext *ctx, uint32_t opc,
+ int rd, int rs)
+{
+ TCGv t0;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ switch (opc) {
+ case OPC_CLO:
+ case R6_OPC_CLO:
+ gen_helper_clo(cpu_gpr[rd], t0);
+ break;
+ case OPC_CLZ:
+ case R6_OPC_CLZ:
+ gen_helper_clz(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DCLO:
+ case R6_OPC_DCLO:
+ gen_helper_dclo(cpu_gpr[rd], t0);
+ break;
+ case OPC_DCLZ:
+ case R6_OPC_DCLZ:
+ gen_helper_dclz(cpu_gpr[rd], t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+/* Godson integer instructions */
+static void gen_loongson_integer(DisasContext *ctx, uint32_t opc,
+ int rd, int rs, int rt)
+{
+ TCGv t0, t1;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ switch (opc) {
+ case OPC_MULT_G_2E:
+ case OPC_MULT_G_2F:
+ case OPC_MULTU_G_2E:
+ case OPC_MULTU_G_2F:
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT_G_2E:
+ case OPC_DMULT_G_2F:
+ case OPC_DMULTU_G_2E:
+ case OPC_DMULTU_G_2F:
+#endif
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ break;
+ default:
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+ break;
+ }
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+
+ switch (opc) {
+ case OPC_MULT_G_2E:
+ case OPC_MULT_G_2F:
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ break;
+ case OPC_MULTU_G_2E:
+ case OPC_MULTU_G_2F:
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ break;
+ case OPC_DIV_G_2E:
+ case OPC_DIV_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_ext32s_tl(t0, t0);
+ tcg_gen_ext32s_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_DIVU_G_2E:
+ case OPC_DIVU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l2);
+ }
+ break;
+ case OPC_MOD_G_2E:
+ case OPC_MOD_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, INT_MIN, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1, l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_MODU_G_2E:
+ case OPC_MODU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_ext32u_tl(t1, t1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ gen_set_label(l2);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT_G_2E:
+ case OPC_DMULT_G_2F:
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_DMULTU_G_2E:
+ case OPC_DMULTU_G_2F:
+ tcg_gen_mul_tl(cpu_gpr[rd], t0, t1);
+ break;
+ case OPC_DDIV_G_2E:
+ case OPC_DDIV_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_DDIVU_G_2E:
+ case OPC_DDIVU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l2);
+ }
+ break;
+ case OPC_DMOD_G_2E:
+ case OPC_DMOD_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, -1LL << 63, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, -1LL, l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l3);
+ }
+ break;
+ case OPC_DMODU_G_2E:
+ case OPC_DMODU_G_2F:
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_NE, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
+ gen_set_label(l2);
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* Loongson multimedia instructions */
+static void gen_loongson_multimedia(DisasContext *ctx, int rd, int rs, int rt)
+{
+ uint32_t opc, shift_max;
+ TCGv_i64 t0, t1;
+
+ opc = MASK_LMI(ctx->opcode);
+ switch (opc) {
+ case OPC_ADD_CP2:
+ case OPC_SUB_CP2:
+ case OPC_DADD_CP2:
+ case OPC_DSUB_CP2:
+ t0 = tcg_temp_local_new_i64();
+ t1 = tcg_temp_local_new_i64();
+ break;
+ default:
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ break;
+ }
+
+ check_cp1_enabled(ctx);
+ gen_load_fpr64(ctx, t0, rs);
+ gen_load_fpr64(ctx, t1, rt);
+
+#define LMI_HELPER(UP, LO) \
+ case OPC_##UP: gen_helper_##LO(t0, t0, t1); break
+#define LMI_HELPER_1(UP, LO) \
+ case OPC_##UP: gen_helper_##LO(t0, t0); break
+#define LMI_DIRECT(UP, LO, OP) \
+ case OPC_##UP: tcg_gen_##OP##_i64(t0, t0, t1); break
+
+ switch (opc) {
+ LMI_HELPER(PADDSH, paddsh);
+ LMI_HELPER(PADDUSH, paddush);
+ LMI_HELPER(PADDH, paddh);
+ LMI_HELPER(PADDW, paddw);
+ LMI_HELPER(PADDSB, paddsb);
+ LMI_HELPER(PADDUSB, paddusb);
+ LMI_HELPER(PADDB, paddb);
+
+ LMI_HELPER(PSUBSH, psubsh);
+ LMI_HELPER(PSUBUSH, psubush);
+ LMI_HELPER(PSUBH, psubh);
+ LMI_HELPER(PSUBW, psubw);
+ LMI_HELPER(PSUBSB, psubsb);
+ LMI_HELPER(PSUBUSB, psubusb);
+ LMI_HELPER(PSUBB, psubb);
+
+ LMI_HELPER(PSHUFH, pshufh);
+ LMI_HELPER(PACKSSWH, packsswh);
+ LMI_HELPER(PACKSSHB, packsshb);
+ LMI_HELPER(PACKUSHB, packushb);
+
+ LMI_HELPER(PUNPCKLHW, punpcklhw);
+ LMI_HELPER(PUNPCKHHW, punpckhhw);
+ LMI_HELPER(PUNPCKLBH, punpcklbh);
+ LMI_HELPER(PUNPCKHBH, punpckhbh);
+ LMI_HELPER(PUNPCKLWD, punpcklwd);
+ LMI_HELPER(PUNPCKHWD, punpckhwd);
+
+ LMI_HELPER(PAVGH, pavgh);
+ LMI_HELPER(PAVGB, pavgb);
+ LMI_HELPER(PMAXSH, pmaxsh);
+ LMI_HELPER(PMINSH, pminsh);
+ LMI_HELPER(PMAXUB, pmaxub);
+ LMI_HELPER(PMINUB, pminub);
+
+ LMI_HELPER(PCMPEQW, pcmpeqw);
+ LMI_HELPER(PCMPGTW, pcmpgtw);
+ LMI_HELPER(PCMPEQH, pcmpeqh);
+ LMI_HELPER(PCMPGTH, pcmpgth);
+ LMI_HELPER(PCMPEQB, pcmpeqb);
+ LMI_HELPER(PCMPGTB, pcmpgtb);
+
+ LMI_HELPER(PSLLW, psllw);
+ LMI_HELPER(PSLLH, psllh);
+ LMI_HELPER(PSRLW, psrlw);
+ LMI_HELPER(PSRLH, psrlh);
+ LMI_HELPER(PSRAW, psraw);
+ LMI_HELPER(PSRAH, psrah);
+
+ LMI_HELPER(PMULLH, pmullh);
+ LMI_HELPER(PMULHH, pmulhh);
+ LMI_HELPER(PMULHUH, pmulhuh);
+ LMI_HELPER(PMADDHW, pmaddhw);
+
+ LMI_HELPER(PASUBUB, pasubub);
+ LMI_HELPER_1(BIADD, biadd);
+ LMI_HELPER_1(PMOVMSKB, pmovmskb);
+
+ LMI_DIRECT(PADDD, paddd, add);
+ LMI_DIRECT(PSUBD, psubd, sub);
+ LMI_DIRECT(XOR_CP2, xor, xor);
+ LMI_DIRECT(NOR_CP2, nor, nor);
+ LMI_DIRECT(AND_CP2, and, and);
+ LMI_DIRECT(OR_CP2, or, or);
+
+ case OPC_PANDN:
+ tcg_gen_andc_i64(t0, t1, t0);
+ break;
+
+ case OPC_PINSRH_0:
+ tcg_gen_deposit_i64(t0, t0, t1, 0, 16);
+ break;
+ case OPC_PINSRH_1:
+ tcg_gen_deposit_i64(t0, t0, t1, 16, 16);
+ break;
+ case OPC_PINSRH_2:
+ tcg_gen_deposit_i64(t0, t0, t1, 32, 16);
+ break;
+ case OPC_PINSRH_3:
+ tcg_gen_deposit_i64(t0, t0, t1, 48, 16);
+ break;
+
+ case OPC_PEXTRH:
+ tcg_gen_andi_i64(t1, t1, 3);
+ tcg_gen_shli_i64(t1, t1, 4);
+ tcg_gen_shr_i64(t0, t0, t1);
+ tcg_gen_ext16u_i64(t0, t0);
+ break;
+
+ case OPC_ADDU_CP2:
+ tcg_gen_add_i64(t0, t0, t1);
+ tcg_gen_ext32s_i64(t0, t0);
+ break;
+ case OPC_SUBU_CP2:
+ tcg_gen_sub_i64(t0, t0, t1);
+ tcg_gen_ext32s_i64(t0, t0);
+ break;
+
+ case OPC_SLL_CP2:
+ shift_max = 32;
+ goto do_shift;
+ case OPC_SRL_CP2:
+ shift_max = 32;
+ goto do_shift;
+ case OPC_SRA_CP2:
+ shift_max = 32;
+ goto do_shift;
+ case OPC_DSLL_CP2:
+ shift_max = 64;
+ goto do_shift;
+ case OPC_DSRL_CP2:
+ shift_max = 64;
+ goto do_shift;
+ case OPC_DSRA_CP2:
+ shift_max = 64;
+ goto do_shift;
+ do_shift:
+ /* Make sure shift count isn't TCG undefined behaviour. */
+ tcg_gen_andi_i64(t1, t1, shift_max - 1);
+
+ switch (opc) {
+ case OPC_SLL_CP2:
+ case OPC_DSLL_CP2:
+ tcg_gen_shl_i64(t0, t0, t1);
+ break;
+ case OPC_SRA_CP2:
+ case OPC_DSRA_CP2:
+ /* Since SRA is UndefinedResult without sign-extended inputs,
+ we can treat SRA and DSRA the same. */
+ tcg_gen_sar_i64(t0, t0, t1);
+ break;
+ case OPC_SRL_CP2:
+ /* We want to shift in zeros for SRL; zero-extend first. */
+ tcg_gen_ext32u_i64(t0, t0);
+ /* FALLTHRU */
+ case OPC_DSRL_CP2:
+ tcg_gen_shr_i64(t0, t0, t1);
+ break;
+ }
+
+ if (shift_max == 32) {
+ tcg_gen_ext32s_i64(t0, t0);
+ }
+
+ /* Shifts larger than MAX produce zero. */
+ tcg_gen_setcondi_i64(TCG_COND_LTU, t1, t1, shift_max);
+ tcg_gen_neg_i64(t1, t1);
+ tcg_gen_and_i64(t0, t0, t1);
+ break;
+
+ case OPC_ADD_CP2:
+ case OPC_DADD_CP2:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGLabel *lab = gen_new_label();
+
+ tcg_gen_mov_i64(t2, t0);
+ tcg_gen_add_i64(t0, t1, t2);
+ if (opc == OPC_ADD_CP2) {
+ tcg_gen_ext32s_i64(t0, t0);
+ }
+ tcg_gen_xor_i64(t1, t1, t2);
+ tcg_gen_xor_i64(t2, t2, t0);
+ tcg_gen_andc_i64(t1, t2, t1);
+ tcg_temp_free_i64(t2);
+ tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(lab);
+ break;
+ }
+
+ case OPC_SUB_CP2:
+ case OPC_DSUB_CP2:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGLabel *lab = gen_new_label();
+
+ tcg_gen_mov_i64(t2, t0);
+ tcg_gen_sub_i64(t0, t1, t2);
+ if (opc == OPC_SUB_CP2) {
+ tcg_gen_ext32s_i64(t0, t0);
+ }
+ tcg_gen_xor_i64(t1, t1, t2);
+ tcg_gen_xor_i64(t2, t2, t0);
+ tcg_gen_and_i64(t1, t1, t2);
+ tcg_temp_free_i64(t2);
+ tcg_gen_brcondi_i64(TCG_COND_GE, t1, 0, lab);
+ generate_exception(ctx, EXCP_OVERFLOW);
+ gen_set_label(lab);
+ break;
+ }
+
+ case OPC_PMULUW:
+ tcg_gen_ext32u_i64(t0, t0);
+ tcg_gen_ext32u_i64(t1, t1);
+ tcg_gen_mul_i64(t0, t0, t1);
+ break;
+
+ case OPC_SEQU_CP2:
+ case OPC_SEQ_CP2:
+ case OPC_SLTU_CP2:
+ case OPC_SLT_CP2:
+ case OPC_SLEU_CP2:
+ case OPC_SLE_CP2:
+ /* ??? Document is unclear: Set FCC[CC]. Does that mean the
+ FD field is the CC field? */
+ default:
+ MIPS_INVAL("loongson_cp2");
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+#undef LMI_HELPER
+#undef LMI_DIRECT
+
+ gen_store_fpr64(ctx, t0, rd);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+/* Traps */
+static void gen_trap (DisasContext *ctx, uint32_t opc,
+ int rs, int rt, int16_t imm)
+{
+ int cond;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ cond = 0;
+ /* Load needed operands */
+ switch (opc) {
+ case OPC_TEQ:
+ case OPC_TGE:
+ case OPC_TGEU:
+ case OPC_TLT:
+ case OPC_TLTU:
+ case OPC_TNE:
+ /* Compare two registers */
+ if (rs != rt) {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ cond = 1;
+ }
+ break;
+ case OPC_TEQI:
+ case OPC_TGEI:
+ case OPC_TGEIU:
+ case OPC_TLTI:
+ case OPC_TLTIU:
+ case OPC_TNEI:
+ /* Compare register to immediate */
+ if (rs != 0 || imm != 0) {
+ gen_load_gpr(t0, rs);
+ tcg_gen_movi_tl(t1, (int32_t)imm);
+ cond = 1;
+ }
+ break;
+ }
+ if (cond == 0) {
+ switch (opc) {
+ case OPC_TEQ: /* rs == rs */
+ case OPC_TEQI: /* r0 == 0 */
+ case OPC_TGE: /* rs >= rs */
+ case OPC_TGEI: /* r0 >= 0 */
+ case OPC_TGEU: /* rs >= rs unsigned */
+ case OPC_TGEIU: /* r0 >= 0 unsigned */
+ /* Always trap */
+ generate_exception_end(ctx, EXCP_TRAP);
+ break;
+ case OPC_TLT: /* rs < rs */
+ case OPC_TLTI: /* r0 < 0 */
+ case OPC_TLTU: /* rs < rs unsigned */
+ case OPC_TLTIU: /* r0 < 0 unsigned */
+ case OPC_TNE: /* rs != rs */
+ case OPC_TNEI: /* r0 != 0 */
+ /* Never trap: treat as NOP. */
+ break;
+ }
+ } else {
+ TCGLabel *l1 = gen_new_label();
+
+ switch (opc) {
+ case OPC_TEQ:
+ case OPC_TEQI:
+ tcg_gen_brcond_tl(TCG_COND_NE, t0, t1, l1);
+ break;
+ case OPC_TGE:
+ case OPC_TGEI:
+ tcg_gen_brcond_tl(TCG_COND_LT, t0, t1, l1);
+ break;
+ case OPC_TGEU:
+ case OPC_TGEIU:
+ tcg_gen_brcond_tl(TCG_COND_LTU, t0, t1, l1);
+ break;
+ case OPC_TLT:
+ case OPC_TLTI:
+ tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
+ break;
+ case OPC_TLTU:
+ case OPC_TLTIU:
+ tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+ break;
+ case OPC_TNE:
+ case OPC_TNEI:
+ tcg_gen_brcond_tl(TCG_COND_EQ, t0, t1, l1);
+ break;
+ }
+ generate_exception(ctx, EXCP_TRAP);
+ gen_set_label(l1);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_save_pc(dest);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+ } else {
+ gen_save_pc(dest);
+ if (ctx->singlestep_enabled) {
+ save_cpu_state(ctx, 0);
+ gen_helper_raise_exception_debug(cpu_env);
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+/* Branches (before delay slot) */
+static void gen_compute_branch (DisasContext *ctx, uint32_t opc,
+ int insn_bytes,
+ int rs, int rt, int32_t offset,
+ int delayslot_size)
+{
+ target_ulong btgt = -1;
+ int blink = 0;
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x"
+ TARGET_FMT_lx "\n", ctx->pc);
+#endif
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Load needed operands */
+ switch (opc) {
+ case OPC_BEQ:
+ case OPC_BEQL:
+ case OPC_BNE:
+ case OPC_BNEL:
+ /* Compare two registers */
+ if (rs != rt) {
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ }
+ btgt = ctx->pc + insn_bytes + offset;
+ break;
+ case OPC_BGEZ:
+ case OPC_BGEZAL:
+ case OPC_BGEZALL:
+ case OPC_BGEZL:
+ case OPC_BGTZ:
+ case OPC_BGTZL:
+ case OPC_BLEZ:
+ case OPC_BLEZL:
+ case OPC_BLTZ:
+ case OPC_BLTZAL:
+ case OPC_BLTZALL:
+ case OPC_BLTZL:
+ /* Compare to zero */
+ if (rs != 0) {
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ }
+ btgt = ctx->pc + insn_bytes + offset;
+ break;
+ case OPC_BPOSGE32:
+#if defined(TARGET_MIPS64)
+ case OPC_BPOSGE64:
+ tcg_gen_andi_tl(t0, cpu_dspctrl, 0x7F);
+#else
+ tcg_gen_andi_tl(t0, cpu_dspctrl, 0x3F);
+#endif
+ bcond_compute = 1;
+ btgt = ctx->pc + insn_bytes + offset;
+ break;
+ case OPC_J:
+ case OPC_JAL:
+ case OPC_JALX:
+ /* Jump to immediate */
+ btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) | (uint32_t)offset;
+ break;
+ case OPC_JR:
+ case OPC_JALR:
+ /* Jump to register */
+ if (offset != 0 && offset != 16) {
+ /* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
+ others are reserved. */
+ MIPS_INVAL("jump hint");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ gen_load_gpr(btarget, rs);
+ break;
+ default:
+ MIPS_INVAL("branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ if (bcond_compute == 0) {
+ /* No condition to be computed */
+ switch (opc) {
+ case OPC_BEQ: /* rx == rx */
+ case OPC_BEQL: /* rx == rx likely */
+ case OPC_BGEZ: /* 0 >= 0 */
+ case OPC_BGEZL: /* 0 >= 0 likely */
+ case OPC_BLEZ: /* 0 <= 0 */
+ case OPC_BLEZL: /* 0 <= 0 likely */
+ /* Always take */
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BGEZAL: /* 0 >= 0 */
+ case OPC_BGEZALL: /* 0 >= 0 likely */
+ /* Always take and link */
+ blink = 31;
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BNE: /* rx != rx */
+ case OPC_BGTZ: /* 0 > 0 */
+ case OPC_BLTZ: /* 0 < 0 */
+ /* Treat as NOP. */
+ goto out;
+ case OPC_BLTZAL: /* 0 < 0 */
+ /* Handle as an unconditional branch to get correct delay
+ slot checking. */
+ blink = 31;
+ btgt = ctx->pc + insn_bytes + delayslot_size;
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_BLTZALL: /* 0 < 0 likely */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 8);
+ /* Skip the instruction in the delay slot */
+ ctx->pc += 4;
+ goto out;
+ case OPC_BNEL: /* rx != rx likely */
+ case OPC_BGTZL: /* 0 > 0 likely */
+ case OPC_BLTZL: /* 0 < 0 likely */
+ /* Skip the instruction in the delay slot */
+ ctx->pc += 4;
+ goto out;
+ case OPC_J:
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_JALX:
+ ctx->hflags |= MIPS_HFLAG_BX;
+ /* Fallthrough */
+ case OPC_JAL:
+ blink = 31;
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ case OPC_JR:
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ case OPC_JALR:
+ blink = rt;
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ default:
+ MIPS_INVAL("branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ } else {
+ switch (opc) {
+ case OPC_BEQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
+ goto not_likely;
+ case OPC_BEQL:
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
+ goto likely;
+ case OPC_BNE:
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
+ goto not_likely;
+ case OPC_BNEL:
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
+ goto likely;
+ case OPC_BGEZ:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BGEZL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ goto likely;
+ case OPC_BGEZAL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ blink = 31;
+ goto not_likely;
+ case OPC_BGEZALL:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 0);
+ blink = 31;
+ goto likely;
+ case OPC_BGTZ:
+ tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BGTZL:
+ tcg_gen_setcondi_tl(TCG_COND_GT, bcond, t0, 0);
+ goto likely;
+ case OPC_BLEZ:
+ tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BLEZL:
+ tcg_gen_setcondi_tl(TCG_COND_LE, bcond, t0, 0);
+ goto likely;
+ case OPC_BLTZ:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ goto not_likely;
+ case OPC_BLTZL:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ goto likely;
+ case OPC_BPOSGE32:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 32);
+ goto not_likely;
+#if defined(TARGET_MIPS64)
+ case OPC_BPOSGE64:
+ tcg_gen_setcondi_tl(TCG_COND_GE, bcond, t0, 64);
+ goto not_likely;
+#endif
+ case OPC_BLTZAL:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ blink = 31;
+ not_likely:
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ case OPC_BLTZALL:
+ tcg_gen_setcondi_tl(TCG_COND_LT, bcond, t0, 0);
+ blink = 31;
+ likely:
+ ctx->hflags |= MIPS_HFLAG_BL;
+ break;
+ default:
+ MIPS_INVAL("conditional branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ }
+
+ ctx->btarget = btgt;
+
+ switch (delayslot_size) {
+ case 2:
+ ctx->hflags |= MIPS_HFLAG_BDS16;
+ break;
+ case 4:
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ break;
+ }
+
+ if (blink > 0) {
+ int post_delay = insn_bytes + delayslot_size;
+ int lowbit = !!(ctx->hflags & MIPS_HFLAG_M16);
+
+ tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + post_delay + lowbit);
+ }
+
+ out:
+ if (insn_bytes == 2)
+ ctx->hflags |= MIPS_HFLAG_B16;
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* special3 bitfield operations */
+static void gen_bitops (DisasContext *ctx, uint32_t opc, int rt,
+ int rs, int lsb, int msb)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t1, rs);
+ switch (opc) {
+ case OPC_EXT:
+ if (lsb + msb > 31) {
+ goto fail;
+ }
+ tcg_gen_shri_tl(t0, t1, lsb);
+ if (msb != 31) {
+ tcg_gen_andi_tl(t0, t0, (1U << (msb + 1)) - 1);
+ } else {
+ tcg_gen_ext32s_tl(t0, t0);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DEXTU:
+ lsb += 32;
+ goto do_dext;
+ case OPC_DEXTM:
+ msb += 32;
+ goto do_dext;
+ case OPC_DEXT:
+ do_dext:
+ if (lsb + msb > 63) {
+ goto fail;
+ }
+ tcg_gen_shri_tl(t0, t1, lsb);
+ if (msb != 63) {
+ tcg_gen_andi_tl(t0, t0, (1ULL << (msb + 1)) - 1);
+ }
+ break;
+#endif
+ case OPC_INS:
+ if (lsb > msb) {
+ goto fail;
+ }
+ gen_load_gpr(t0, rt);
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
+ tcg_gen_ext32s_tl(t0, t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DINSU:
+ lsb += 32;
+ /* FALLTHRU */
+ case OPC_DINSM:
+ msb += 32;
+ /* FALLTHRU */
+ case OPC_DINS:
+ if (lsb > msb) {
+ goto fail;
+ }
+ gen_load_gpr(t0, rt);
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
+ break;
+#endif
+ default:
+fail:
+ MIPS_INVAL("bitops");
+ generate_exception_end(ctx, EXCP_RI);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ return;
+ }
+ gen_store_gpr(t0, rt);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_bshfl (DisasContext *ctx, uint32_t op2, int rt, int rd)
+{
+ TCGv t0;
+
+ if (rd == 0) {
+ /* If no destination, treat it as a NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ switch (op2) {
+ case OPC_WSBH:
+ {
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_shri_tl(t1, t0, 8);
+ tcg_gen_andi_tl(t1, t1, 0x00FF00FF);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_andi_tl(t0, t0, ~0x00FF00FF);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_temp_free(t1);
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ }
+ break;
+ case OPC_SEB:
+ tcg_gen_ext8s_tl(cpu_gpr[rd], t0);
+ break;
+ case OPC_SEH:
+ tcg_gen_ext16s_tl(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DSBH:
+ {
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_shri_tl(t1, t0, 8);
+ tcg_gen_andi_tl(t1, t1, 0x00FF00FF00FF00FFULL);
+ tcg_gen_shli_tl(t0, t0, 8);
+ tcg_gen_andi_tl(t0, t0, ~0x00FF00FF00FF00FFULL);
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_DSHD:
+ {
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_shri_tl(t1, t0, 16);
+ tcg_gen_andi_tl(t1, t1, 0x0000FFFF0000FFFFULL);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_andi_tl(t0, t0, ~0x0000FFFF0000FFFFULL);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_shri_tl(t1, t0, 32);
+ tcg_gen_shli_tl(t0, t0, 32);
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
+ tcg_temp_free(t1);
+ }
+ break;
+#endif
+ default:
+ MIPS_INVAL("bsfhl");
+ generate_exception_end(ctx, EXCP_RI);
+ tcg_temp_free(t0);
+ return;
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_lsa(DisasContext *ctx, int opc, int rd, int rs, int rt,
+ int imm2)
+{
+ TCGv t0;
+ TCGv t1;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_shli_tl(t0, t0, imm2 + 1);
+ tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
+ if (opc == OPC_LSA) {
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
+ }
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+
+ return;
+}
+
+static void gen_align(DisasContext *ctx, int opc, int rd, int rs, int rt,
+ int bp)
+{
+ TCGv t0;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ if (bp == 0) {
+ switch (opc) {
+ case OPC_ALIGN:
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DALIGN:
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
+ break;
+#endif
+ }
+ } else {
+ TCGv t1 = tcg_temp_new();
+ gen_load_gpr(t1, rs);
+ switch (opc) {
+ case OPC_ALIGN:
+ {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t2, t1, t0);
+ tcg_gen_shri_i64(t2, t2, 8 * (4 - bp));
+ gen_move_low32(cpu_gpr[rd], t2);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DALIGN:
+ tcg_gen_shli_tl(t0, t0, 8 * bp);
+ tcg_gen_shri_tl(t1, t1, 8 * (8 - bp));
+ tcg_gen_or_tl(cpu_gpr[rd], t1, t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t1);
+ }
+
+ tcg_temp_free(t0);
+}
+
+static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt)
+{
+ TCGv t0;
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ switch (opc) {
+ case OPC_BITSWAP:
+ gen_helper_bitswap(cpu_gpr[rd], t0);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DBITSWAP:
+ gen_helper_dbitswap(cpu_gpr[rd], t0);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+#ifndef CONFIG_USER_ONLY
+/* CP0 (MMU and control) */
+static inline void gen_mthc0_entrylo(TCGv arg, target_ulong off)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t0, arg);
+ tcg_gen_ld_i64(t1, cpu_env, off);
+#if defined(TARGET_MIPS64)
+ tcg_gen_deposit_i64(t1, t1, t0, 30, 32);
+#else
+ tcg_gen_concat32_i64(t1, t1, t0);
+#endif
+ tcg_gen_st_i64(t1, cpu_env, off);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mthc0_store64(TCGv arg, target_ulong off)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(t0, arg);
+ tcg_gen_ld_i64(t1, cpu_env, off);
+ tcg_gen_concat32_i64(t1, t1, t0);
+ tcg_gen_st_i64(t1, cpu_env, off);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mfhc0_entrylo(TCGv arg, target_ulong off)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t0, cpu_env, off);
+#if defined(TARGET_MIPS64)
+ tcg_gen_shri_i64(t0, t0, 30);
+#else
+ tcg_gen_shri_i64(t0, t0, 32);
+#endif
+ gen_move_low32(arg, t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mfhc0_load64(TCGv arg, target_ulong off, int shift)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ tcg_gen_ld_i64(t0, cpu_env, off);
+ tcg_gen_shri_i64(t0, t0, 32 + shift);
+ gen_move_low32(arg, t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_mfc0_load32 (TCGv arg, target_ulong off)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(t0, cpu_env, off);
+ tcg_gen_ext_i32_tl(arg, t0);
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_mfc0_load64 (TCGv arg, target_ulong off)
+{
+ tcg_gen_ld_tl(arg, cpu_env, off);
+ tcg_gen_ext32s_tl(arg, arg);
+}
+
+static inline void gen_mtc0_store32 (TCGv arg, target_ulong off)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg);
+ tcg_gen_st_i32(t0, cpu_env, off);
+ tcg_temp_free_i32(t0);
+}
+
+#define CP0_CHECK(c) \
+ do { \
+ if (!(c)) { \
+ goto cp0_unimplemented; \
+ } \
+ } while (0)
+
+static void gen_mfhc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *rn = "invalid";
+
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
+
+ switch (reg) {
+ case 2:
+ switch (sel) {
+ case 0:
+ gen_mfhc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo0));
+ rn = "EntryLo0";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 3:
+ switch (sel) {
+ case 0:
+ gen_mfhc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo1));
+ rn = "EntryLo1";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 17:
+ switch (sel) {
+ case 0:
+ gen_mfhc0_load64(arg, offsetof(CPUMIPSState, lladdr),
+ ctx->CP0_LLAddr_shift);
+ rn = "LLAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mfhc0_maar(arg, cpu_env);
+ rn = "MAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_mfhc0_load64(arg, offsetof(CPUMIPSState, CP0_TagLo), 0);
+ rn = "TagLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+
+ (void)rn; /* avoid a compiler warning */
+ LOG_DISAS("mfhc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ return;
+
+cp0_unimplemented:
+ LOG_DISAS("mfhc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ tcg_gen_movi_tl(arg, 0);
+}
+
+static void gen_mthc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *rn = "invalid";
+ uint64_t mask = ctx->PAMask >> 36;
+
+ CP0_CHECK(ctx->hflags & MIPS_HFLAG_ELPA);
+
+ switch (reg) {
+ case 2:
+ switch (sel) {
+ case 0:
+ tcg_gen_andi_tl(arg, arg, mask);
+ gen_mthc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo0));
+ rn = "EntryLo0";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 3:
+ switch (sel) {
+ case 0:
+ tcg_gen_andi_tl(arg, arg, mask);
+ gen_mthc0_entrylo(arg, offsetof(CPUMIPSState, CP0_EntryLo1));
+ rn = "EntryLo1";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 17:
+ switch (sel) {
+ case 0:
+ /* LLAddr is read-only (the only exception is bit 0 if LLB is
+ supported); the CP0_LLAddr_rw_bitmask does not seem to be
+ relevant for modern MIPS cores supporting MTHC0, therefore
+ treating MTHC0 to LLAddr as NOP. */
+ rn = "LLAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mthc0_maar(cpu_env, arg);
+ rn = "MAAR";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ tcg_gen_andi_tl(arg, arg, mask);
+ gen_mthc0_store64(arg, offsetof(CPUMIPSState, CP0_TagLo));
+ rn = "TagLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+
+ (void)rn; /* avoid a compiler warning */
+cp0_unimplemented:
+ LOG_DISAS("mthc0 %s (reg %d sel %d)\n", rn, reg, sel);
+}
+
+static inline void gen_mfc0_unimplemented(DisasContext *ctx, TCGv arg)
+{
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ tcg_gen_movi_tl(arg, 0);
+ } else {
+ tcg_gen_movi_tl(arg, ~0);
+ }
+}
+
+static void gen_mfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *rn = "invalid";
+
+ if (sel != 0)
+ check_insn(ctx, ISA_MIPS32);
+
+ switch (reg) {
+ case 0:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index));
+ rn = "Index";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpcontrol(arg, cpu_env);
+ rn = "MVPControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf0(arg, cpu_env);
+ rn = "MVPConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf1(arg, cpu_env);
+ rn = "MVPConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
+ rn = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 1:
+ switch (sel) {
+ case 0:
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6));
+ gen_helper_mfc0_random(arg, cpu_env);
+ rn = "Random";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
+ rn = "VPEControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
+ rn = "VPEConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
+ rn = "VPEConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_YQMask));
+ rn = "YQMask";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPESchedule));
+ rn = "VPESchedule";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load64(arg, offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ rn = "VPEScheFBack";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
+ rn = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 0:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env,
+ offsetof(CPUMIPSState, CP0_EntryLo0));
+#if defined(TARGET_MIPS64)
+ if (ctx->rxi) {
+ /* Move RI/XI fields to bits 31:30 */
+ tcg_gen_shri_tl(arg, tmp, CP0EnLo_XI);
+ tcg_gen_deposit_tl(tmp, tmp, arg, 30, 2);
+ }
+#endif
+ gen_move_low32(arg, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ rn = "EntryLo0";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcstatus(arg, cpu_env);
+ rn = "TCStatus";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcbind(arg, cpu_env);
+ rn = "TCBind";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcrestart(arg, cpu_env);
+ rn = "TCRestart";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tchalt(arg, cpu_env);
+ rn = "TCHalt";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tccontext(arg, cpu_env);
+ rn = "TCContext";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcschedule(arg, cpu_env);
+ rn = "TCSchedule";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcschefback(arg, cpu_env);
+ rn = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 3:
+ switch (sel) {
+ case 0:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env,
+ offsetof(CPUMIPSState, CP0_EntryLo1));
+#if defined(TARGET_MIPS64)
+ if (ctx->rxi) {
+ /* Move RI/XI fields to bits 31:30 */
+ tcg_gen_shri_tl(arg, tmp, CP0EnLo_XI);
+ tcg_gen_deposit_tl(tmp, tmp, arg, 30, 2);
+ }
+#endif
+ gen_move_low32(arg, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ rn = "EntryLo1";
+ break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
+ rn = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 4:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "Context";
+ break;
+ case 1:
+// gen_helper_mfc0_contextconfig(arg); /* SmartMIPS ASE */
+ rn = "ContextConfig";
+ goto cp0_unimplemented;
+// break;
+ case 2:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_ld32s_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ rn = "UserLocal";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 5:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask));
+ rn = "PageMask";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
+ rn = "PageGrain";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 6:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired));
+ rn = "Wired";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0));
+ rn = "SRSConf0";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1));
+ rn = "SRSConf1";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2));
+ rn = "SRSConf2";
+ break;
+ case 4:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3));
+ rn = "SRSConf3";
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4));
+ rn = "SRSConf4";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 7:
+ switch (sel) {
+ case 0:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna));
+ rn = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 8:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "BadVAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr));
+ rn = "BadInstr";
+ break;
+ case 2:
+ CP0_CHECK(ctx->bp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
+ rn = "BadInstrP";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 9:
+ switch (sel) {
+ case 0:
+ /* Mark as an IO operation because we read the time. */
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_mfc0_count(arg, cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ /* Break the TB to be able to take timer interrupts immediately
+ after reading count. */
+ ctx->bstate = BS_STOP;
+ rn = "Count";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 11:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare));
+ rn = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 12:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status));
+ rn = "Status";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl));
+ rn = "IntCtl";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl));
+ rn = "SRSCtl";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ rn = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 13:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause));
+ rn = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 14:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid));
+ rn = "PRid";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase));
+ rn = "EBase";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ CP0_CHECK(ctx->cmgcr);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "CMGCRBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 16:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0));
+ rn = "Config";
+ break;
+ case 1:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1));
+ rn = "Config1";
+ break;
+ case 2:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2));
+ rn = "Config2";
+ break;
+ case 3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
+ rn = "Config3";
+ break;
+ case 4:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4));
+ rn = "Config4";
+ break;
+ case 5:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5));
+ rn = "Config5";
+ break;
+ /* 6,7 are implementation dependent */
+ case 6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
+ rn = "Config6";
+ break;
+ case 7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7));
+ rn = "Config7";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 17:
+ switch (sel) {
+ case 0:
+ gen_helper_mfc0_lladdr(arg, cpu_env);
+ rn = "LLAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mfc0_maar(arg, cpu_env);
+ rn = "MAAR";
+ break;
+ case 2:
+ CP0_CHECK(ctx->mrp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MAARI));
+ rn = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 18:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_1e0i(mfc0_watchlo, arg, sel);
+ rn = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 19:
+ switch (sel) {
+ case 0 ...7:
+ gen_helper_1e0i(mfc0_watchhi, arg, sel);
+ rn = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 20:
+ switch (sel) {
+ case 0:
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "XContext";
+ break;
+#endif
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6));
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask));
+ rn = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 22:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ rn = "'Diagnostic"; /* implementation dependent */
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
+ rn = "Debug";
+ break;
+ case 1:
+// gen_helper_mfc0_tracecontrol(arg); /* PDtrace support */
+ rn = "TraceControl";
+// break;
+ case 2:
+// gen_helper_mfc0_tracecontrol2(arg); /* PDtrace support */
+ rn = "TraceControl2";
+// break;
+ case 3:
+// gen_helper_mfc0_usertracedata(arg); /* PDtrace support */
+ rn = "UserTraceData";
+// break;
+ case 4:
+// gen_helper_mfc0_tracebpc(arg); /* PDtrace support */
+ rn = "TraceBPC";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 24:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 25:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0));
+ rn = "Performance0";
+ break;
+ case 1:
+// gen_helper_mfc0_performance1(arg);
+ rn = "Performance1";
+// break;
+ case 2:
+// gen_helper_mfc0_performance2(arg);
+ rn = "Performance2";
+// break;
+ case 3:
+// gen_helper_mfc0_performance3(arg);
+ rn = "Performance3";
+// break;
+ case 4:
+// gen_helper_mfc0_performance4(arg);
+ rn = "Performance4";
+// break;
+ case 5:
+// gen_helper_mfc0_performance5(arg);
+ rn = "Performance5";
+// break;
+ case 6:
+// gen_helper_mfc0_performance6(arg);
+ rn = "Performance6";
+// break;
+ case 7:
+// gen_helper_mfc0_performance7(arg);
+ rn = "Performance7";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 26:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_ErrCtl));
+ rn = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 27:
+ switch (sel) {
+ case 0 ... 3:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ rn = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUMIPSState, CP0_TagLo));
+ gen_move_low32(arg, tmp);
+ tcg_temp_free_i64(tmp);
+ }
+ rn = "TagLo";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo));
+ rn = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 29:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi));
+ rn = "TagHi";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi));
+ rn = "DataHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 30:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 31:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ rn = "DESAVE";
+ break;
+ case 2 ... 7:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ tcg_gen_ext32s_tl(arg, arg);
+ rn = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ (void)rn; /* avoid a compiler warning */
+ LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ return;
+
+cp0_unimplemented:
+ LOG_DISAS("mfc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ gen_mfc0_unimplemented(ctx, arg);
+}
+
+static void gen_mtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *rn = "invalid";
+
+ if (sel != 0)
+ check_insn(ctx, ISA_MIPS32);
+
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ switch (reg) {
+ case 0:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_index(cpu_env, arg);
+ rn = "Index";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_mvpcontrol(cpu_env, arg);
+ rn = "MVPControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ rn = "MVPConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ rn = "MVPConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 1:
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "Random";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpecontrol(cpu_env, arg);
+ rn = "VPEControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf0(cpu_env, arg);
+ rn = "VPEConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf1(cpu_env, arg);
+ rn = "VPEConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_yqmask(cpu_env, arg);
+ rn = "YQMask";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPESchedule));
+ rn = "VPESchedule";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ rn = "VPEScheFBack";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeopt(cpu_env, arg);
+ rn = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_entrylo0(cpu_env, arg);
+ rn = "EntryLo0";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcstatus(cpu_env, arg);
+ rn = "TCStatus";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcbind(cpu_env, arg);
+ rn = "TCBind";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcrestart(cpu_env, arg);
+ rn = "TCRestart";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tchalt(cpu_env, arg);
+ rn = "TCHalt";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tccontext(cpu_env, arg);
+ rn = "TCContext";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschedule(cpu_env, arg);
+ rn = "TCSchedule";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschefback(cpu_env, arg);
+ rn = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 3:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_entrylo1(cpu_env, arg);
+ rn = "EntryLo1";
+ break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 4:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_context(cpu_env, arg);
+ rn = "Context";
+ break;
+ case 1:
+// gen_helper_mtc0_contextconfig(cpu_env, arg); /* SmartMIPS ASE */
+ rn = "ContextConfig";
+ goto cp0_unimplemented;
+// break;
+ case 2:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ rn = "UserLocal";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 5:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_pagemask(cpu_env, arg);
+ rn = "PageMask";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_pagegrain(cpu_env, arg);
+ rn = "PageGrain";
+ ctx->bstate = BS_STOP;
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 6:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_wired(cpu_env, arg);
+ rn = "Wired";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf0(cpu_env, arg);
+ rn = "SRSConf0";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf1(cpu_env, arg);
+ rn = "SRSConf1";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf2(cpu_env, arg);
+ rn = "SRSConf2";
+ break;
+ case 4:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf3(cpu_env, arg);
+ rn = "SRSConf3";
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf4(cpu_env, arg);
+ rn = "SRSConf4";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 7:
+ switch (sel) {
+ case 0:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_hwrena(cpu_env, arg);
+ ctx->bstate = BS_STOP;
+ rn = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 8:
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "BadVAddr";
+ break;
+ case 1:
+ /* ignored */
+ rn = "BadInstr";
+ break;
+ case 2:
+ /* ignored */
+ rn = "BadInstrP";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 9:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_count(cpu_env, arg);
+ rn = "Count";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_entryhi(cpu_env, arg);
+ rn = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 11:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_compare(cpu_env, arg);
+ rn = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 12:
+ switch (sel) {
+ case 0:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_status(cpu_env, arg);
+ /* BS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->pc + 4);
+ ctx->bstate = BS_EXCP;
+ rn = "Status";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_intctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "IntCtl";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "SRSCtl";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 13:
+ switch (sel) {
+ case 0:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_cause(cpu_env, arg);
+ rn = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 14:
+ switch (sel) {
+ case 0:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ rn = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "PRid";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_ebase(cpu_env, arg);
+ rn = "EBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 16:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_config0(cpu_env, arg);
+ rn = "Config";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 1:
+ /* ignored, read only */
+ rn = "Config1";
+ break;
+ case 2:
+ gen_helper_mtc0_config2(cpu_env, arg);
+ rn = "Config2";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 3:
+ gen_helper_mtc0_config3(cpu_env, arg);
+ rn = "Config3";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 4:
+ gen_helper_mtc0_config4(cpu_env, arg);
+ rn = "Config4";
+ ctx->bstate = BS_STOP;
+ break;
+ case 5:
+ gen_helper_mtc0_config5(cpu_env, arg);
+ rn = "Config5";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ /* 6,7 are implementation dependent */
+ case 6:
+ /* ignored */
+ rn = "Config6";
+ break;
+ case 7:
+ /* ignored */
+ rn = "Config7";
+ break;
+ default:
+ rn = "Invalid config selector";
+ goto cp0_unimplemented;
+ }
+ break;
+ case 17:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_lladdr(cpu_env, arg);
+ rn = "LLAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maar(cpu_env, arg);
+ rn = "MAAR";
+ break;
+ case 2:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maari(cpu_env, arg);
+ rn = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 18:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_0e1i(mtc0_watchlo, arg, sel);
+ rn = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 19:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_0e1i(mtc0_watchhi, arg, sel);
+ rn = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 20:
+ switch (sel) {
+ case 0:
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ gen_helper_mtc0_xcontext(cpu_env, arg);
+ rn = "XContext";
+ break;
+#endif
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6));
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_framemask(cpu_env, arg);
+ rn = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 22:
+ /* ignored */
+ rn = "Diagnostic"; /* implementation dependent */
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
+ /* BS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->pc + 4);
+ ctx->bstate = BS_EXCP;
+ rn = "Debug";
+ break;
+ case 1:
+// gen_helper_mtc0_tracecontrol(cpu_env, arg); /* PDtrace support */
+ rn = "TraceControl";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+// break;
+ case 2:
+// gen_helper_mtc0_tracecontrol2(cpu_env, arg); /* PDtrace support */
+ rn = "TraceControl2";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+// break;
+ case 3:
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+// gen_helper_mtc0_usertracedata(cpu_env, arg); /* PDtrace support */
+ rn = "UserTraceData";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+// break;
+ case 4:
+// gen_helper_mtc0_tracebpc(cpu_env, arg); /* PDtrace support */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "TraceBPC";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 24:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ rn = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 25:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_performance0(cpu_env, arg);
+ rn = "Performance0";
+ break;
+ case 1:
+// gen_helper_mtc0_performance1(arg);
+ rn = "Performance1";
+// break;
+ case 2:
+// gen_helper_mtc0_performance2(arg);
+ rn = "Performance2";
+// break;
+ case 3:
+// gen_helper_mtc0_performance3(arg);
+ rn = "Performance3";
+// break;
+ case 4:
+// gen_helper_mtc0_performance4(arg);
+ rn = "Performance4";
+// break;
+ case 5:
+// gen_helper_mtc0_performance5(arg);
+ rn = "Performance5";
+// break;
+ case 6:
+// gen_helper_mtc0_performance6(arg);
+ rn = "Performance6";
+// break;
+ case 7:
+// gen_helper_mtc0_performance7(arg);
+ rn = "Performance7";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 26:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_errctl(cpu_env, arg);
+ ctx->bstate = BS_STOP;
+ rn = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 27:
+ switch (sel) {
+ case 0 ... 3:
+ /* ignored */
+ rn = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_helper_mtc0_taglo(cpu_env, arg);
+ rn = "TagLo";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_helper_mtc0_datalo(cpu_env, arg);
+ rn = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 29:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_helper_mtc0_taghi(cpu_env, arg);
+ rn = "TagHi";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_helper_mtc0_datahi(cpu_env, arg);
+ rn = "DataHi";
+ break;
+ default:
+ rn = "invalid sel";
+ goto cp0_unimplemented;
+ }
+ break;
+ case 30:
+ switch (sel) {
+ case 0:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ rn = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 31:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ rn = "DESAVE";
+ break;
+ case 2 ... 7:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ rn = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ (void)rn; /* avoid a compiler warning */
+ LOG_DISAS("mtc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ /* For simplicity assume that all writes can cause interrupts. */
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ ctx->bstate = BS_STOP;
+ }
+ return;
+
+cp0_unimplemented:
+ LOG_DISAS("mtc0 %s (reg %d sel %d)\n", rn, reg, sel);
+}
+
+#if defined(TARGET_MIPS64)
+static void gen_dmfc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *rn = "invalid";
+
+ if (sel != 0)
+ check_insn(ctx, ISA_MIPS64);
+
+ switch (reg) {
+ case 0:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Index));
+ rn = "Index";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpcontrol(arg, cpu_env);
+ rn = "MVPControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf0(arg, cpu_env);
+ rn = "MVPConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_mvpconf1(arg, cpu_env);
+ rn = "MVPConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPControl));
+ rn = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 1:
+ switch (sel) {
+ case 0:
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6));
+ gen_helper_mfc0_random(arg, cpu_env);
+ rn = "Random";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEControl));
+ rn = "VPEControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf0));
+ rn = "VPEConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEConf1));
+ rn = "VPEConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_YQMask));
+ rn = "YQMask";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule));
+ rn = "VPESchedule";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ rn = "VPEScheFBack";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_VPEOpt));
+ rn = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo0));
+ rn = "EntryLo0";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcstatus(arg, cpu_env);
+ rn = "TCStatus";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mfc0_tcbind(arg, cpu_env);
+ rn = "TCBind";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tcrestart(arg, cpu_env);
+ rn = "TCRestart";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tchalt(arg, cpu_env);
+ rn = "TCHalt";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tccontext(arg, cpu_env);
+ rn = "TCContext";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tcschedule(arg, cpu_env);
+ rn = "TCSchedule";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_dmfc0_tcschefback(arg, cpu_env);
+ rn = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 3:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryLo1));
+ rn = "EntryLo1";
+ break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_GlobalNumber));
+ rn = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 4:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_Context));
+ rn = "Context";
+ break;
+ case 1:
+// gen_helper_dmfc0_contextconfig(arg); /* SmartMIPS ASE */
+ rn = "ContextConfig";
+ goto cp0_unimplemented;
+// break;
+ case 2:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ rn = "UserLocal";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 5:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageMask));
+ rn = "PageMask";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PageGrain));
+ rn = "PageGrain";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 6:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Wired));
+ rn = "Wired";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf0));
+ rn = "SRSConf0";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf1));
+ rn = "SRSConf1";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf2));
+ rn = "SRSConf2";
+ break;
+ case 4:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf3));
+ rn = "SRSConf3";
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSConf4));
+ rn = "SRSConf4";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 7:
+ switch (sel) {
+ case 0:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_HWREna));
+ rn = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 8:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_BadVAddr));
+ rn = "BadVAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->bi);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstr));
+ rn = "BadInstr";
+ break;
+ case 2:
+ CP0_CHECK(ctx->bp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_BadInstrP));
+ rn = "BadInstrP";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 9:
+ switch (sel) {
+ case 0:
+ /* Mark as an IO operation because we read the time. */
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_mfc0_count(arg, cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ /* Break the TB to be able to take timer interrupts immediately
+ after reading count. */
+ ctx->bstate = BS_STOP;
+ rn = "Count";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EntryHi));
+ rn = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 11:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Compare));
+ rn = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 12:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Status));
+ rn = "Status";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_IntCtl));
+ rn = "IntCtl";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSCtl));
+ rn = "SRSCtl";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ rn = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 13:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Cause));
+ rn = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 14:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ rn = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_PRid));
+ rn = "PRid";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_EBase));
+ rn = "EBase";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ CP0_CHECK(ctx->cmgcr);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_CMGCRBase));
+ rn = "CMGCRBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 16:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config0));
+ rn = "Config";
+ break;
+ case 1:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config1));
+ rn = "Config1";
+ break;
+ case 2:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config2));
+ rn = "Config2";
+ break;
+ case 3:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config3));
+ rn = "Config3";
+ break;
+ case 4:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config4));
+ rn = "Config4";
+ break;
+ case 5:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config5));
+ rn = "Config5";
+ break;
+ /* 6,7 are implementation dependent */
+ case 6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config6));
+ rn = "Config6";
+ break;
+ case 7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Config7));
+ rn = "Config7";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 17:
+ switch (sel) {
+ case 0:
+ gen_helper_dmfc0_lladdr(arg, cpu_env);
+ rn = "LLAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_dmfc0_maar(arg, cpu_env);
+ rn = "MAAR";
+ break;
+ case 2:
+ CP0_CHECK(ctx->mrp);
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_MAARI));
+ rn = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 18:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_1e0i(dmfc0_watchlo, arg, sel);
+ rn = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 19:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_1e0i(mfc0_watchhi, arg, sel);
+ rn = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 20:
+ switch (sel) {
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_XContext));
+ rn = "XContext";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6));
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Framemask));
+ rn = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 22:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ rn = "'Diagnostic"; /* implementation dependent */
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mfc0_debug(arg, cpu_env); /* EJTAG support */
+ rn = "Debug";
+ break;
+ case 1:
+// gen_helper_dmfc0_tracecontrol(arg, cpu_env); /* PDtrace support */
+ rn = "TraceControl";
+// break;
+ case 2:
+// gen_helper_dmfc0_tracecontrol2(arg, cpu_env); /* PDtrace support */
+ rn = "TraceControl2";
+// break;
+ case 3:
+// gen_helper_dmfc0_usertracedata(arg, cpu_env); /* PDtrace support */
+ rn = "UserTraceData";
+// break;
+ case 4:
+// gen_helper_dmfc0_tracebpc(arg, cpu_env); /* PDtrace support */
+ rn = "TraceBPC";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 24:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ rn = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 25:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_Performance0));
+ rn = "Performance0";
+ break;
+ case 1:
+// gen_helper_dmfc0_performance1(arg);
+ rn = "Performance1";
+// break;
+ case 2:
+// gen_helper_dmfc0_performance2(arg);
+ rn = "Performance2";
+// break;
+ case 3:
+// gen_helper_dmfc0_performance3(arg);
+ rn = "Performance3";
+// break;
+ case 4:
+// gen_helper_dmfc0_performance4(arg);
+ rn = "Performance4";
+// break;
+ case 5:
+// gen_helper_dmfc0_performance5(arg);
+ rn = "Performance5";
+// break;
+ case 6:
+// gen_helper_dmfc0_performance6(arg);
+ rn = "Performance6";
+// break;
+ case 7:
+// gen_helper_dmfc0_performance7(arg);
+ rn = "Performance7";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 26:
+ switch (sel) {
+ case 0:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_ErrCtl));
+ rn = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 27:
+ switch (sel) {
+ /* ignored */
+ case 0 ... 3:
+ tcg_gen_movi_tl(arg, 0); /* unimplemented */
+ rn = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagLo));
+ rn = "TagLo";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataLo));
+ rn = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 29:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_TagHi));
+ rn = "TagHi";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DataHi));
+ rn = "DataHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 30:
+ switch (sel) {
+ case 0:
+ tcg_gen_ld_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ rn = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 31:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ gen_mfc0_load32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ rn = "DESAVE";
+ break;
+ case 2 ... 7:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_ld_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ rn = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ (void)rn; /* avoid a compiler warning */
+ LOG_DISAS("dmfc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ return;
+
+cp0_unimplemented:
+ LOG_DISAS("dmfc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ gen_mfc0_unimplemented(ctx, arg);
+}
+
+static void gen_dmtc0(DisasContext *ctx, TCGv arg, int reg, int sel)
+{
+ const char *rn = "invalid";
+
+ if (sel != 0)
+ check_insn(ctx, ISA_MIPS64);
+
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+
+ switch (reg) {
+ case 0:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_index(cpu_env, arg);
+ rn = "Index";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_mvpcontrol(cpu_env, arg);
+ rn = "MVPControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ rn = "MVPConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ /* ignored */
+ rn = "MVPConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "VPControl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 1:
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "Random";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpecontrol(cpu_env, arg);
+ rn = "VPEControl";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf0(cpu_env, arg);
+ rn = "VPEConf0";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeconf1(cpu_env, arg);
+ rn = "VPEConf1";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_yqmask(cpu_env, arg);
+ rn = "YQMask";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPESchedule));
+ rn = "VPESchedule";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_VPEScheFBack));
+ rn = "VPEScheFBack";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_vpeopt(cpu_env, arg);
+ rn = "VPEOpt";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 0:
+ gen_helper_dmtc0_entrylo0(cpu_env, arg);
+ rn = "EntryLo0";
+ break;
+ case 1:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcstatus(cpu_env, arg);
+ rn = "TCStatus";
+ break;
+ case 2:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcbind(cpu_env, arg);
+ rn = "TCBind";
+ break;
+ case 3:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcrestart(cpu_env, arg);
+ rn = "TCRestart";
+ break;
+ case 4:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tchalt(cpu_env, arg);
+ rn = "TCHalt";
+ break;
+ case 5:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tccontext(cpu_env, arg);
+ rn = "TCContext";
+ break;
+ case 6:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschedule(cpu_env, arg);
+ rn = "TCSchedule";
+ break;
+ case 7:
+ CP0_CHECK(ctx->insn_flags & ASE_MT);
+ gen_helper_mtc0_tcschefback(cpu_env, arg);
+ rn = "TCScheFBack";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 3:
+ switch (sel) {
+ case 0:
+ gen_helper_dmtc0_entrylo1(cpu_env, arg);
+ rn = "EntryLo1";
+ break;
+ case 1:
+ CP0_CHECK(ctx->vp);
+ /* ignored */
+ rn = "GlobalNumber";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 4:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_context(cpu_env, arg);
+ rn = "Context";
+ break;
+ case 1:
+// gen_helper_mtc0_contextconfig(cpu_env, arg); /* SmartMIPS ASE */
+ rn = "ContextConfig";
+ goto cp0_unimplemented;
+// break;
+ case 2:
+ CP0_CHECK(ctx->ulri);
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ rn = "UserLocal";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 5:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_pagemask(cpu_env, arg);
+ rn = "PageMask";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_pagegrain(cpu_env, arg);
+ rn = "PageGrain";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 6:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_wired(cpu_env, arg);
+ rn = "Wired";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf0(cpu_env, arg);
+ rn = "SRSConf0";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf1(cpu_env, arg);
+ rn = "SRSConf1";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf2(cpu_env, arg);
+ rn = "SRSConf2";
+ break;
+ case 4:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf3(cpu_env, arg);
+ rn = "SRSConf3";
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsconf4(cpu_env, arg);
+ rn = "SRSConf4";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 7:
+ switch (sel) {
+ case 0:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_hwrena(cpu_env, arg);
+ ctx->bstate = BS_STOP;
+ rn = "HWREna";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 8:
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "BadVAddr";
+ break;
+ case 1:
+ /* ignored */
+ rn = "BadInstr";
+ break;
+ case 2:
+ /* ignored */
+ rn = "BadInstrP";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 9:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_count(cpu_env, arg);
+ rn = "Count";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_entryhi(cpu_env, arg);
+ rn = "EntryHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 11:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_compare(cpu_env, arg);
+ rn = "Compare";
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ goto cp0_unimplemented;
+ }
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 12:
+ switch (sel) {
+ case 0:
+ save_cpu_state(ctx, 1);
+ gen_helper_mtc0_status(cpu_env, arg);
+ /* BS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->pc + 4);
+ ctx->bstate = BS_EXCP;
+ rn = "Status";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_intctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "IntCtl";
+ break;
+ case 2:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_srsctl(cpu_env, arg);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "SRSCtl";
+ break;
+ case 3:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_SRSMap));
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "SRSMap";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 13:
+ switch (sel) {
+ case 0:
+ save_cpu_state(ctx, 1);
+ /* Mark as an IO operation because we may trigger a software
+ interrupt. */
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_mtc0_cause(cpu_env, arg);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ }
+ /* Stop translation as we may have triggered an intetrupt */
+ ctx->bstate = BS_STOP;
+ rn = "Cause";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 14:
+ switch (sel) {
+ case 0:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_EPC));
+ rn = "EPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 0:
+ /* ignored */
+ rn = "PRid";
+ break;
+ case 1:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_helper_mtc0_ebase(cpu_env, arg);
+ rn = "EBase";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 16:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_config0(cpu_env, arg);
+ rn = "Config";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 1:
+ /* ignored, read only */
+ rn = "Config1";
+ break;
+ case 2:
+ gen_helper_mtc0_config2(cpu_env, arg);
+ rn = "Config2";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 3:
+ gen_helper_mtc0_config3(cpu_env, arg);
+ rn = "Config3";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ case 4:
+ /* currently ignored */
+ rn = "Config4";
+ break;
+ case 5:
+ gen_helper_mtc0_config5(cpu_env, arg);
+ rn = "Config5";
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ /* 6,7 are implementation dependent */
+ default:
+ rn = "Invalid config selector";
+ goto cp0_unimplemented;
+ }
+ break;
+ case 17:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_lladdr(cpu_env, arg);
+ rn = "LLAddr";
+ break;
+ case 1:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maar(cpu_env, arg);
+ rn = "MAAR";
+ break;
+ case 2:
+ CP0_CHECK(ctx->mrp);
+ gen_helper_mtc0_maari(cpu_env, arg);
+ rn = "MAARI";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 18:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_0e1i(mtc0_watchlo, arg, sel);
+ rn = "WatchLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 19:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_0e1i(mtc0_watchhi, arg, sel);
+ rn = "WatchHi";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 20:
+ switch (sel) {
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ gen_helper_mtc0_xcontext(cpu_env, arg);
+ rn = "XContext";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 21:
+ /* Officially reserved, but sel 0 is used for R1x000 framemask */
+ CP0_CHECK(!(ctx->insn_flags & ISA_MIPS32R6));
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_framemask(cpu_env, arg);
+ rn = "Framemask";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 22:
+ /* ignored */
+ rn = "Diagnostic"; /* implementation dependent */
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_debug(cpu_env, arg); /* EJTAG support */
+ /* BS_STOP isn't good enough here, hflags may have changed. */
+ gen_save_pc(ctx->pc + 4);
+ ctx->bstate = BS_EXCP;
+ rn = "Debug";
+ break;
+ case 1:
+// gen_helper_mtc0_tracecontrol(cpu_env, arg); /* PDtrace support */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "TraceControl";
+// break;
+ case 2:
+// gen_helper_mtc0_tracecontrol2(cpu_env, arg); /* PDtrace support */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "TraceControl2";
+// break;
+ case 3:
+// gen_helper_mtc0_usertracedata(cpu_env, arg); /* PDtrace support */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "UserTraceData";
+// break;
+ case 4:
+// gen_helper_mtc0_tracebpc(cpu_env, arg); /* PDtrace support */
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ rn = "TraceBPC";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 24:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_DEPC));
+ rn = "DEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 25:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_performance0(cpu_env, arg);
+ rn = "Performance0";
+ break;
+ case 1:
+// gen_helper_mtc0_performance1(cpu_env, arg);
+ rn = "Performance1";
+// break;
+ case 2:
+// gen_helper_mtc0_performance2(cpu_env, arg);
+ rn = "Performance2";
+// break;
+ case 3:
+// gen_helper_mtc0_performance3(cpu_env, arg);
+ rn = "Performance3";
+// break;
+ case 4:
+// gen_helper_mtc0_performance4(cpu_env, arg);
+ rn = "Performance4";
+// break;
+ case 5:
+// gen_helper_mtc0_performance5(cpu_env, arg);
+ rn = "Performance5";
+// break;
+ case 6:
+// gen_helper_mtc0_performance6(cpu_env, arg);
+ rn = "Performance6";
+// break;
+ case 7:
+// gen_helper_mtc0_performance7(cpu_env, arg);
+ rn = "Performance7";
+// break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 26:
+ switch (sel) {
+ case 0:
+ gen_helper_mtc0_errctl(cpu_env, arg);
+ ctx->bstate = BS_STOP;
+ rn = "ErrCtl";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 27:
+ switch (sel) {
+ case 0 ... 3:
+ /* ignored */
+ rn = "CacheErr";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 28:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_helper_mtc0_taglo(cpu_env, arg);
+ rn = "TagLo";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_helper_mtc0_datalo(cpu_env, arg);
+ rn = "DataLo";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 29:
+ switch (sel) {
+ case 0:
+ case 2:
+ case 4:
+ case 6:
+ gen_helper_mtc0_taghi(cpu_env, arg);
+ rn = "TagHi";
+ break;
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ gen_helper_mtc0_datahi(cpu_env, arg);
+ rn = "DataHi";
+ break;
+ default:
+ rn = "invalid sel";
+ goto cp0_unimplemented;
+ }
+ break;
+ case 30:
+ switch (sel) {
+ case 0:
+ tcg_gen_st_tl(arg, cpu_env, offsetof(CPUMIPSState, CP0_ErrorEPC));
+ rn = "ErrorEPC";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ break;
+ case 31:
+ switch (sel) {
+ case 0:
+ /* EJTAG support */
+ gen_mtc0_store32(arg, offsetof(CPUMIPSState, CP0_DESAVE));
+ rn = "DESAVE";
+ break;
+ case 2 ... 7:
+ CP0_CHECK(ctx->kscrexist & (1 << sel));
+ tcg_gen_st_tl(arg, cpu_env,
+ offsetof(CPUMIPSState, CP0_KScratch[sel-2]));
+ rn = "KScratch";
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ break;
+ default:
+ goto cp0_unimplemented;
+ }
+ (void)rn; /* avoid a compiler warning */
+ LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel);
+ /* For simplicity assume that all writes can cause interrupts. */
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ ctx->bstate = BS_STOP;
+ }
+ return;
+
+cp0_unimplemented:
+ LOG_DISAS("dmtc0 %s (reg %d sel %d)\n", rn, reg, sel);
+}
+#endif /* TARGET_MIPS64 */
+
+static void gen_mftr(CPUMIPSState *env, DisasContext *ctx, int rt, int rd,
+ int u, int sel, int h)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ TCGv t0 = tcg_temp_local_new();
+
+ if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
+ ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) !=
+ (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE))))
+ tcg_gen_movi_tl(t0, -1);
+ else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) >
+ (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC)))
+ tcg_gen_movi_tl(t0, -1);
+ else if (u == 0) {
+ switch (rt) {
+ case 1:
+ switch (sel) {
+ case 1:
+ gen_helper_mftc0_vpecontrol(t0, cpu_env);
+ break;
+ case 2:
+ gen_helper_mftc0_vpeconf0(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 1:
+ gen_helper_mftc0_tcstatus(t0, cpu_env);
+ break;
+ case 2:
+ gen_helper_mftc0_tcbind(t0, cpu_env);
+ break;
+ case 3:
+ gen_helper_mftc0_tcrestart(t0, cpu_env);
+ break;
+ case 4:
+ gen_helper_mftc0_tchalt(t0, cpu_env);
+ break;
+ case 5:
+ gen_helper_mftc0_tccontext(t0, cpu_env);
+ break;
+ case 6:
+ gen_helper_mftc0_tcschedule(t0, cpu_env);
+ break;
+ case 7:
+ gen_helper_mftc0_tcschefback(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_entryhi(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ case 12:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_status(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ case 13:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_cause(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 14:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_epc(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 1:
+ gen_helper_mftc0_ebase(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 16:
+ switch (sel) {
+ case 0 ... 7:
+ gen_helper_mftc0_configx(t0, cpu_env, tcg_const_tl(sel));
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mftc0_debug(t0, cpu_env);
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ break;
+ }
+ break;
+ default:
+ gen_mfc0(ctx, t0, rt, sel);
+ }
+ } else switch (sel) {
+ /* GPR registers. */
+ case 0:
+ gen_helper_1e0i(mftgpr, t0, rt);
+ break;
+ /* Auxiliary CPU registers */
+ case 1:
+ switch (rt) {
+ case 0:
+ gen_helper_1e0i(mftlo, t0, 0);
+ break;
+ case 1:
+ gen_helper_1e0i(mfthi, t0, 0);
+ break;
+ case 2:
+ gen_helper_1e0i(mftacx, t0, 0);
+ break;
+ case 4:
+ gen_helper_1e0i(mftlo, t0, 1);
+ break;
+ case 5:
+ gen_helper_1e0i(mfthi, t0, 1);
+ break;
+ case 6:
+ gen_helper_1e0i(mftacx, t0, 1);
+ break;
+ case 8:
+ gen_helper_1e0i(mftlo, t0, 2);
+ break;
+ case 9:
+ gen_helper_1e0i(mfthi, t0, 2);
+ break;
+ case 10:
+ gen_helper_1e0i(mftacx, t0, 2);
+ break;
+ case 12:
+ gen_helper_1e0i(mftlo, t0, 3);
+ break;
+ case 13:
+ gen_helper_1e0i(mfthi, t0, 3);
+ break;
+ case 14:
+ gen_helper_1e0i(mftacx, t0, 3);
+ break;
+ case 16:
+ gen_helper_mftdsp(t0, cpu_env);
+ break;
+ default:
+ goto die;
+ }
+ break;
+ /* Floating point (COP1). */
+ case 2:
+ /* XXX: For now we support only a single FPU context. */
+ if (h == 0) {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ } else {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, rt);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case 3:
+ /* XXX: For now we support only a single FPU context. */
+ gen_helper_1e0i(cfc1, t0, rt);
+ break;
+ /* COP2: Not implemented. */
+ case 4:
+ case 5:
+ /* fall through */
+ default:
+ goto die;
+ }
+ LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ return;
+
+die:
+ tcg_temp_free(t0);
+ LOG_DISAS("mftr (reg %d u %d sel %d h %d)\n", rt, u, sel, h);
+ generate_exception_end(ctx, EXCP_RI);
+}
+
+static void gen_mttr(CPUMIPSState *env, DisasContext *ctx, int rd, int rt,
+ int u, int sel, int h)
+{
+ int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
+ TCGv t0 = tcg_temp_local_new();
+
+ gen_load_gpr(t0, rt);
+ if ((env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) == 0 &&
+ ((env->tcs[other_tc].CP0_TCBind & (0xf << CP0TCBd_CurVPE)) !=
+ (env->active_tc.CP0_TCBind & (0xf << CP0TCBd_CurVPE))))
+ /* NOP */ ;
+ else if ((env->CP0_VPEControl & (0xff << CP0VPECo_TargTC)) >
+ (env->mvp->CP0_MVPConf0 & (0xff << CP0MVPC0_PTC)))
+ /* NOP */ ;
+ else if (u == 0) {
+ switch (rd) {
+ case 1:
+ switch (sel) {
+ case 1:
+ gen_helper_mttc0_vpecontrol(cpu_env, t0);
+ break;
+ case 2:
+ gen_helper_mttc0_vpeconf0(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 2:
+ switch (sel) {
+ case 1:
+ gen_helper_mttc0_tcstatus(cpu_env, t0);
+ break;
+ case 2:
+ gen_helper_mttc0_tcbind(cpu_env, t0);
+ break;
+ case 3:
+ gen_helper_mttc0_tcrestart(cpu_env, t0);
+ break;
+ case 4:
+ gen_helper_mttc0_tchalt(cpu_env, t0);
+ break;
+ case 5:
+ gen_helper_mttc0_tccontext(cpu_env, t0);
+ break;
+ case 6:
+ gen_helper_mttc0_tcschedule(cpu_env, t0);
+ break;
+ case 7:
+ gen_helper_mttc0_tcschefback(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ break;
+ case 10:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_entryhi(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ case 12:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_status(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ case 13:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_cause(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 15:
+ switch (sel) {
+ case 1:
+ gen_helper_mttc0_ebase(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ break;
+ }
+ break;
+ case 23:
+ switch (sel) {
+ case 0:
+ gen_helper_mttc0_debug(cpu_env, t0);
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ break;
+ }
+ break;
+ default:
+ gen_mtc0(ctx, t0, rd, sel);
+ }
+ } else switch (sel) {
+ /* GPR registers. */
+ case 0:
+ gen_helper_0e1i(mttgpr, t0, rd);
+ break;
+ /* Auxiliary CPU registers */
+ case 1:
+ switch (rd) {
+ case 0:
+ gen_helper_0e1i(mttlo, t0, 0);
+ break;
+ case 1:
+ gen_helper_0e1i(mtthi, t0, 0);
+ break;
+ case 2:
+ gen_helper_0e1i(mttacx, t0, 0);
+ break;
+ case 4:
+ gen_helper_0e1i(mttlo, t0, 1);
+ break;
+ case 5:
+ gen_helper_0e1i(mtthi, t0, 1);
+ break;
+ case 6:
+ gen_helper_0e1i(mttacx, t0, 1);
+ break;
+ case 8:
+ gen_helper_0e1i(mttlo, t0, 2);
+ break;
+ case 9:
+ gen_helper_0e1i(mtthi, t0, 2);
+ break;
+ case 10:
+ gen_helper_0e1i(mttacx, t0, 2);
+ break;
+ case 12:
+ gen_helper_0e1i(mttlo, t0, 3);
+ break;
+ case 13:
+ gen_helper_0e1i(mtthi, t0, 3);
+ break;
+ case 14:
+ gen_helper_0e1i(mttacx, t0, 3);
+ break;
+ case 16:
+ gen_helper_mttdsp(cpu_env, t0);
+ break;
+ default:
+ goto die;
+ }
+ break;
+ /* Floating point (COP1). */
+ case 2:
+ /* XXX: For now we support only a single FPU context. */
+ if (h == 0) {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, rd);
+ tcg_temp_free_i32(fp0);
+ } else {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32h(ctx, fp0, rd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case 3:
+ /* XXX: For now we support only a single FPU context. */
+ {
+ TCGv_i32 fs_tmp = tcg_const_i32(rd);
+
+ gen_helper_0e2i(ctc1, t0, fs_tmp, rt);
+ tcg_temp_free_i32(fs_tmp);
+ }
+ /* Stop translation as we may have changed hflags */
+ ctx->bstate = BS_STOP;
+ break;
+ /* COP2: Not implemented. */
+ case 4:
+ case 5:
+ /* fall through */
+ default:
+ goto die;
+ }
+ LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h);
+ tcg_temp_free(t0);
+ return;
+
+die:
+ tcg_temp_free(t0);
+ LOG_DISAS("mttr (reg %d u %d sel %d h %d)\n", rd, u, sel, h);
+ generate_exception_end(ctx, EXCP_RI);
+}
+
+static void gen_cp0 (CPUMIPSState *env, DisasContext *ctx, uint32_t opc, int rt, int rd)
+{
+ const char *opn = "ldst";
+
+ check_cp0_enabled(ctx);
+ switch (opc) {
+ case OPC_MFC0:
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mfc0(ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
+ opn = "mfc0";
+ break;
+ case OPC_MTC0:
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_mtc0(ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
+ opn = "mtc0";
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC0:
+ check_insn(ctx, ISA_MIPS3);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_dmfc0(ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
+ opn = "dmfc0";
+ break;
+ case OPC_DMTC0:
+ check_insn(ctx, ISA_MIPS3);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_dmtc0(ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
+ opn = "dmtc0";
+ break;
+#endif
+ case OPC_MFHC0:
+ check_mvh(ctx);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mfhc0(ctx, cpu_gpr[rt], rd, ctx->opcode & 0x7);
+ opn = "mfhc0";
+ break;
+ case OPC_MTHC0:
+ check_mvh(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rt);
+ gen_mthc0(ctx, t0, rd, ctx->opcode & 0x7);
+ tcg_temp_free(t0);
+ }
+ opn = "mthc0";
+ break;
+ case OPC_MFTR:
+ check_insn(ctx, ASE_MT);
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+ gen_mftr(env, ctx, rt, rd, (ctx->opcode >> 5) & 1,
+ ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
+ opn = "mftr";
+ break;
+ case OPC_MTTR:
+ check_insn(ctx, ASE_MT);
+ gen_mttr(env, ctx, rd, rt, (ctx->opcode >> 5) & 1,
+ ctx->opcode & 0x7, (ctx->opcode >> 4) & 1);
+ opn = "mttr";
+ break;
+ case OPC_TLBWI:
+ opn = "tlbwi";
+ if (!env->tlb->helper_tlbwi)
+ goto die;
+ gen_helper_tlbwi(cpu_env);
+ break;
+ case OPC_TLBINV:
+ opn = "tlbinv";
+ if (ctx->ie >= 2) {
+ if (!env->tlb->helper_tlbinv) {
+ goto die;
+ }
+ gen_helper_tlbinv(cpu_env);
+ } /* treat as nop if TLBINV not supported */
+ break;
+ case OPC_TLBINVF:
+ opn = "tlbinvf";
+ if (ctx->ie >= 2) {
+ if (!env->tlb->helper_tlbinvf) {
+ goto die;
+ }
+ gen_helper_tlbinvf(cpu_env);
+ } /* treat as nop if TLBINV not supported */
+ break;
+ case OPC_TLBWR:
+ opn = "tlbwr";
+ if (!env->tlb->helper_tlbwr)
+ goto die;
+ gen_helper_tlbwr(cpu_env);
+ break;
+ case OPC_TLBP:
+ opn = "tlbp";
+ if (!env->tlb->helper_tlbp)
+ goto die;
+ gen_helper_tlbp(cpu_env);
+ break;
+ case OPC_TLBR:
+ opn = "tlbr";
+ if (!env->tlb->helper_tlbr)
+ goto die;
+ gen_helper_tlbr(cpu_env);
+ break;
+ case OPC_ERET: /* OPC_ERETNC */
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ goto die;
+ } else {
+ int bit_shift = (ctx->hflags & MIPS_HFLAG_M16) ? 16 : 6;
+ if (ctx->opcode & (1 << bit_shift)) {
+ /* OPC_ERETNC */
+ opn = "eretnc";
+ check_insn(ctx, ISA_MIPS32R5);
+ gen_helper_eretnc(cpu_env);
+ } else {
+ /* OPC_ERET */
+ opn = "eret";
+ check_insn(ctx, ISA_MIPS2);
+ gen_helper_eret(cpu_env);
+ }
+ ctx->bstate = BS_EXCP;
+ }
+ break;
+ case OPC_DERET:
+ opn = "deret";
+ check_insn(ctx, ISA_MIPS32);
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ goto die;
+ }
+ if (!(ctx->hflags & MIPS_HFLAG_DM)) {
+ MIPS_INVAL(opn);
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ gen_helper_deret(cpu_env);
+ ctx->bstate = BS_EXCP;
+ }
+ break;
+ case OPC_WAIT:
+ opn = "wait";
+ check_insn(ctx, ISA_MIPS3 | ISA_MIPS32);
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ goto die;
+ }
+ /* If we get an exception, we want to restart at next instruction */
+ ctx->pc += 4;
+ save_cpu_state(ctx, 1);
+ ctx->pc -= 4;
+ gen_helper_wait(cpu_env);
+ ctx->bstate = BS_EXCP;
+ break;
+ default:
+ die:
+ MIPS_INVAL(opn);
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+ (void)opn; /* avoid a compiler warning */
+}
+#endif /* !CONFIG_USER_ONLY */
+
+/* CP1 Branches (before delay slot) */
+static void gen_compute_branch1(DisasContext *ctx, uint32_t op,
+ int32_t cc, int32_t offset)
+{
+ target_ulong btarget;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+
+ if ((ctx->insn_flags & ISA_MIPS32R6) && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ if (cc != 0)
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32);
+
+ btarget = ctx->pc + 4 + offset;
+
+ switch (op) {
+ case OPC_BC1F:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ goto not_likely;
+ case OPC_BC1FL:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_not_i32(t0, t0);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ goto likely;
+ case OPC_BC1T:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ goto not_likely;
+ case OPC_BC1TL:
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ likely:
+ ctx->hflags |= MIPS_HFLAG_BL;
+ break;
+ case OPC_BC1FANY2:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_nand_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ goto not_likely;
+ case OPC_BC1TANY2:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ goto not_likely;
+ case OPC_BC1FANY4:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_and_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+2));
+ tcg_gen_and_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+3));
+ tcg_gen_nand_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ goto not_likely;
+ case OPC_BC1TANY4:
+ {
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, fpu_fcr31, get_fp_bit(cc));
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+1));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+2));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_gen_shri_i32(t1, fpu_fcr31, get_fp_bit(cc+3));
+ tcg_gen_or_i32(t0, t0, t1);
+ tcg_temp_free_i32(t1);
+ tcg_gen_andi_i32(t0, t0, 1);
+ tcg_gen_extu_i32_tl(bcond, t0);
+ }
+ not_likely:
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL("cp1 cond branch");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+ ctx->btarget = btarget;
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ out:
+ tcg_temp_free_i32(t0);
+}
+
+/* R6 CP1 Branches */
+static void gen_compute_branch1_r6(DisasContext *ctx, uint32_t op,
+ int32_t ft, int32_t offset,
+ int delayslot_size)
+{
+ target_ulong btarget;
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx
+ "\n", ctx->pc);
+#endif
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ gen_load_fpr64(ctx, t0, ft);
+ tcg_gen_andi_i64(t0, t0, 1);
+
+ btarget = addr_add(ctx, ctx->pc + 4, offset);
+
+ switch (op) {
+ case OPC_BC1EQZ:
+ tcg_gen_xori_i64(t0, t0, 1);
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ case OPC_BC1NEZ:
+ /* t0 already set */
+ ctx->hflags |= MIPS_HFLAG_BC;
+ break;
+ default:
+ MIPS_INVAL("cp1 cond branch");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ tcg_gen_trunc_i64_tl(bcond, t0);
+
+ ctx->btarget = btarget;
+
+ switch (delayslot_size) {
+ case 2:
+ ctx->hflags |= MIPS_HFLAG_BDS16;
+ break;
+ case 4:
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+ break;
+ }
+
+out:
+ tcg_temp_free_i64(t0);
+}
+
+/* Coprocessor 1 (FPU) */
+
+#define FOP(func, fmt) (((fmt) << 21) | (func))
+
+enum fopcode {
+ OPC_ADD_S = FOP(0, FMT_S),
+ OPC_SUB_S = FOP(1, FMT_S),
+ OPC_MUL_S = FOP(2, FMT_S),
+ OPC_DIV_S = FOP(3, FMT_S),
+ OPC_SQRT_S = FOP(4, FMT_S),
+ OPC_ABS_S = FOP(5, FMT_S),
+ OPC_MOV_S = FOP(6, FMT_S),
+ OPC_NEG_S = FOP(7, FMT_S),
+ OPC_ROUND_L_S = FOP(8, FMT_S),
+ OPC_TRUNC_L_S = FOP(9, FMT_S),
+ OPC_CEIL_L_S = FOP(10, FMT_S),
+ OPC_FLOOR_L_S = FOP(11, FMT_S),
+ OPC_ROUND_W_S = FOP(12, FMT_S),
+ OPC_TRUNC_W_S = FOP(13, FMT_S),
+ OPC_CEIL_W_S = FOP(14, FMT_S),
+ OPC_FLOOR_W_S = FOP(15, FMT_S),
+ OPC_SEL_S = FOP(16, FMT_S),
+ OPC_MOVCF_S = FOP(17, FMT_S),
+ OPC_MOVZ_S = FOP(18, FMT_S),
+ OPC_MOVN_S = FOP(19, FMT_S),
+ OPC_SELEQZ_S = FOP(20, FMT_S),
+ OPC_RECIP_S = FOP(21, FMT_S),
+ OPC_RSQRT_S = FOP(22, FMT_S),
+ OPC_SELNEZ_S = FOP(23, FMT_S),
+ OPC_MADDF_S = FOP(24, FMT_S),
+ OPC_MSUBF_S = FOP(25, FMT_S),
+ OPC_RINT_S = FOP(26, FMT_S),
+ OPC_CLASS_S = FOP(27, FMT_S),
+ OPC_MIN_S = FOP(28, FMT_S),
+ OPC_RECIP2_S = FOP(28, FMT_S),
+ OPC_MINA_S = FOP(29, FMT_S),
+ OPC_RECIP1_S = FOP(29, FMT_S),
+ OPC_MAX_S = FOP(30, FMT_S),
+ OPC_RSQRT1_S = FOP(30, FMT_S),
+ OPC_MAXA_S = FOP(31, FMT_S),
+ OPC_RSQRT2_S = FOP(31, FMT_S),
+ OPC_CVT_D_S = FOP(33, FMT_S),
+ OPC_CVT_W_S = FOP(36, FMT_S),
+ OPC_CVT_L_S = FOP(37, FMT_S),
+ OPC_CVT_PS_S = FOP(38, FMT_S),
+ OPC_CMP_F_S = FOP (48, FMT_S),
+ OPC_CMP_UN_S = FOP (49, FMT_S),
+ OPC_CMP_EQ_S = FOP (50, FMT_S),
+ OPC_CMP_UEQ_S = FOP (51, FMT_S),
+ OPC_CMP_OLT_S = FOP (52, FMT_S),
+ OPC_CMP_ULT_S = FOP (53, FMT_S),
+ OPC_CMP_OLE_S = FOP (54, FMT_S),
+ OPC_CMP_ULE_S = FOP (55, FMT_S),
+ OPC_CMP_SF_S = FOP (56, FMT_S),
+ OPC_CMP_NGLE_S = FOP (57, FMT_S),
+ OPC_CMP_SEQ_S = FOP (58, FMT_S),
+ OPC_CMP_NGL_S = FOP (59, FMT_S),
+ OPC_CMP_LT_S = FOP (60, FMT_S),
+ OPC_CMP_NGE_S = FOP (61, FMT_S),
+ OPC_CMP_LE_S = FOP (62, FMT_S),
+ OPC_CMP_NGT_S = FOP (63, FMT_S),
+
+ OPC_ADD_D = FOP(0, FMT_D),
+ OPC_SUB_D = FOP(1, FMT_D),
+ OPC_MUL_D = FOP(2, FMT_D),
+ OPC_DIV_D = FOP(3, FMT_D),
+ OPC_SQRT_D = FOP(4, FMT_D),
+ OPC_ABS_D = FOP(5, FMT_D),
+ OPC_MOV_D = FOP(6, FMT_D),
+ OPC_NEG_D = FOP(7, FMT_D),
+ OPC_ROUND_L_D = FOP(8, FMT_D),
+ OPC_TRUNC_L_D = FOP(9, FMT_D),
+ OPC_CEIL_L_D = FOP(10, FMT_D),
+ OPC_FLOOR_L_D = FOP(11, FMT_D),
+ OPC_ROUND_W_D = FOP(12, FMT_D),
+ OPC_TRUNC_W_D = FOP(13, FMT_D),
+ OPC_CEIL_W_D = FOP(14, FMT_D),
+ OPC_FLOOR_W_D = FOP(15, FMT_D),
+ OPC_SEL_D = FOP(16, FMT_D),
+ OPC_MOVCF_D = FOP(17, FMT_D),
+ OPC_MOVZ_D = FOP(18, FMT_D),
+ OPC_MOVN_D = FOP(19, FMT_D),
+ OPC_SELEQZ_D = FOP(20, FMT_D),
+ OPC_RECIP_D = FOP(21, FMT_D),
+ OPC_RSQRT_D = FOP(22, FMT_D),
+ OPC_SELNEZ_D = FOP(23, FMT_D),
+ OPC_MADDF_D = FOP(24, FMT_D),
+ OPC_MSUBF_D = FOP(25, FMT_D),
+ OPC_RINT_D = FOP(26, FMT_D),
+ OPC_CLASS_D = FOP(27, FMT_D),
+ OPC_MIN_D = FOP(28, FMT_D),
+ OPC_RECIP2_D = FOP(28, FMT_D),
+ OPC_MINA_D = FOP(29, FMT_D),
+ OPC_RECIP1_D = FOP(29, FMT_D),
+ OPC_MAX_D = FOP(30, FMT_D),
+ OPC_RSQRT1_D = FOP(30, FMT_D),
+ OPC_MAXA_D = FOP(31, FMT_D),
+ OPC_RSQRT2_D = FOP(31, FMT_D),
+ OPC_CVT_S_D = FOP(32, FMT_D),
+ OPC_CVT_W_D = FOP(36, FMT_D),
+ OPC_CVT_L_D = FOP(37, FMT_D),
+ OPC_CMP_F_D = FOP (48, FMT_D),
+ OPC_CMP_UN_D = FOP (49, FMT_D),
+ OPC_CMP_EQ_D = FOP (50, FMT_D),
+ OPC_CMP_UEQ_D = FOP (51, FMT_D),
+ OPC_CMP_OLT_D = FOP (52, FMT_D),
+ OPC_CMP_ULT_D = FOP (53, FMT_D),
+ OPC_CMP_OLE_D = FOP (54, FMT_D),
+ OPC_CMP_ULE_D = FOP (55, FMT_D),
+ OPC_CMP_SF_D = FOP (56, FMT_D),
+ OPC_CMP_NGLE_D = FOP (57, FMT_D),
+ OPC_CMP_SEQ_D = FOP (58, FMT_D),
+ OPC_CMP_NGL_D = FOP (59, FMT_D),
+ OPC_CMP_LT_D = FOP (60, FMT_D),
+ OPC_CMP_NGE_D = FOP (61, FMT_D),
+ OPC_CMP_LE_D = FOP (62, FMT_D),
+ OPC_CMP_NGT_D = FOP (63, FMT_D),
+
+ OPC_CVT_S_W = FOP(32, FMT_W),
+ OPC_CVT_D_W = FOP(33, FMT_W),
+ OPC_CVT_S_L = FOP(32, FMT_L),
+ OPC_CVT_D_L = FOP(33, FMT_L),
+ OPC_CVT_PS_PW = FOP(38, FMT_W),
+
+ OPC_ADD_PS = FOP(0, FMT_PS),
+ OPC_SUB_PS = FOP(1, FMT_PS),
+ OPC_MUL_PS = FOP(2, FMT_PS),
+ OPC_DIV_PS = FOP(3, FMT_PS),
+ OPC_ABS_PS = FOP(5, FMT_PS),
+ OPC_MOV_PS = FOP(6, FMT_PS),
+ OPC_NEG_PS = FOP(7, FMT_PS),
+ OPC_MOVCF_PS = FOP(17, FMT_PS),
+ OPC_MOVZ_PS = FOP(18, FMT_PS),
+ OPC_MOVN_PS = FOP(19, FMT_PS),
+ OPC_ADDR_PS = FOP(24, FMT_PS),
+ OPC_MULR_PS = FOP(26, FMT_PS),
+ OPC_RECIP2_PS = FOP(28, FMT_PS),
+ OPC_RECIP1_PS = FOP(29, FMT_PS),
+ OPC_RSQRT1_PS = FOP(30, FMT_PS),
+ OPC_RSQRT2_PS = FOP(31, FMT_PS),
+
+ OPC_CVT_S_PU = FOP(32, FMT_PS),
+ OPC_CVT_PW_PS = FOP(36, FMT_PS),
+ OPC_CVT_S_PL = FOP(40, FMT_PS),
+ OPC_PLL_PS = FOP(44, FMT_PS),
+ OPC_PLU_PS = FOP(45, FMT_PS),
+ OPC_PUL_PS = FOP(46, FMT_PS),
+ OPC_PUU_PS = FOP(47, FMT_PS),
+ OPC_CMP_F_PS = FOP (48, FMT_PS),
+ OPC_CMP_UN_PS = FOP (49, FMT_PS),
+ OPC_CMP_EQ_PS = FOP (50, FMT_PS),
+ OPC_CMP_UEQ_PS = FOP (51, FMT_PS),
+ OPC_CMP_OLT_PS = FOP (52, FMT_PS),
+ OPC_CMP_ULT_PS = FOP (53, FMT_PS),
+ OPC_CMP_OLE_PS = FOP (54, FMT_PS),
+ OPC_CMP_ULE_PS = FOP (55, FMT_PS),
+ OPC_CMP_SF_PS = FOP (56, FMT_PS),
+ OPC_CMP_NGLE_PS = FOP (57, FMT_PS),
+ OPC_CMP_SEQ_PS = FOP (58, FMT_PS),
+ OPC_CMP_NGL_PS = FOP (59, FMT_PS),
+ OPC_CMP_LT_PS = FOP (60, FMT_PS),
+ OPC_CMP_NGE_PS = FOP (61, FMT_PS),
+ OPC_CMP_LE_PS = FOP (62, FMT_PS),
+ OPC_CMP_NGT_PS = FOP (63, FMT_PS),
+};
+
+enum r6_f_cmp_op {
+ R6_OPC_CMP_AF_S = FOP(0, FMT_W),
+ R6_OPC_CMP_UN_S = FOP(1, FMT_W),
+ R6_OPC_CMP_EQ_S = FOP(2, FMT_W),
+ R6_OPC_CMP_UEQ_S = FOP(3, FMT_W),
+ R6_OPC_CMP_LT_S = FOP(4, FMT_W),
+ R6_OPC_CMP_ULT_S = FOP(5, FMT_W),
+ R6_OPC_CMP_LE_S = FOP(6, FMT_W),
+ R6_OPC_CMP_ULE_S = FOP(7, FMT_W),
+ R6_OPC_CMP_SAF_S = FOP(8, FMT_W),
+ R6_OPC_CMP_SUN_S = FOP(9, FMT_W),
+ R6_OPC_CMP_SEQ_S = FOP(10, FMT_W),
+ R6_OPC_CMP_SEUQ_S = FOP(11, FMT_W),
+ R6_OPC_CMP_SLT_S = FOP(12, FMT_W),
+ R6_OPC_CMP_SULT_S = FOP(13, FMT_W),
+ R6_OPC_CMP_SLE_S = FOP(14, FMT_W),
+ R6_OPC_CMP_SULE_S = FOP(15, FMT_W),
+ R6_OPC_CMP_OR_S = FOP(17, FMT_W),
+ R6_OPC_CMP_UNE_S = FOP(18, FMT_W),
+ R6_OPC_CMP_NE_S = FOP(19, FMT_W),
+ R6_OPC_CMP_SOR_S = FOP(25, FMT_W),
+ R6_OPC_CMP_SUNE_S = FOP(26, FMT_W),
+ R6_OPC_CMP_SNE_S = FOP(27, FMT_W),
+
+ R6_OPC_CMP_AF_D = FOP(0, FMT_L),
+ R6_OPC_CMP_UN_D = FOP(1, FMT_L),
+ R6_OPC_CMP_EQ_D = FOP(2, FMT_L),
+ R6_OPC_CMP_UEQ_D = FOP(3, FMT_L),
+ R6_OPC_CMP_LT_D = FOP(4, FMT_L),
+ R6_OPC_CMP_ULT_D = FOP(5, FMT_L),
+ R6_OPC_CMP_LE_D = FOP(6, FMT_L),
+ R6_OPC_CMP_ULE_D = FOP(7, FMT_L),
+ R6_OPC_CMP_SAF_D = FOP(8, FMT_L),
+ R6_OPC_CMP_SUN_D = FOP(9, FMT_L),
+ R6_OPC_CMP_SEQ_D = FOP(10, FMT_L),
+ R6_OPC_CMP_SEUQ_D = FOP(11, FMT_L),
+ R6_OPC_CMP_SLT_D = FOP(12, FMT_L),
+ R6_OPC_CMP_SULT_D = FOP(13, FMT_L),
+ R6_OPC_CMP_SLE_D = FOP(14, FMT_L),
+ R6_OPC_CMP_SULE_D = FOP(15, FMT_L),
+ R6_OPC_CMP_OR_D = FOP(17, FMT_L),
+ R6_OPC_CMP_UNE_D = FOP(18, FMT_L),
+ R6_OPC_CMP_NE_D = FOP(19, FMT_L),
+ R6_OPC_CMP_SOR_D = FOP(25, FMT_L),
+ R6_OPC_CMP_SUNE_D = FOP(26, FMT_L),
+ R6_OPC_CMP_SNE_D = FOP(27, FMT_L),
+};
+static void gen_cp1 (DisasContext *ctx, uint32_t opc, int rt, int fs)
+{
+ TCGv t0 = tcg_temp_new();
+
+ switch (opc) {
+ case OPC_MFC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_MTC1:
+ gen_load_gpr(t0, rt);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, fs);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CFC1:
+ gen_helper_1e0i(cfc1, t0, fs);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_CTC1:
+ gen_load_gpr(t0, rt);
+ save_cpu_state(ctx, 0);
+ {
+ TCGv_i32 fs_tmp = tcg_const_i32(fs);
+
+ gen_helper_0e2i(ctc1, t0, fs_tmp, rt);
+ tcg_temp_free_i32(fs_tmp);
+ }
+ /* Stop translation as we may have changed hflags */
+ ctx->bstate = BS_STOP;
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC1:
+ gen_load_fpr64(ctx, t0, fs);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_DMTC1:
+ gen_load_gpr(t0, rt);
+ gen_store_fpr64(ctx, t0, fs);
+ break;
+#endif
+ case OPC_MFHC1:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ tcg_gen_ext_i32_tl(t0, fp0);
+ tcg_temp_free_i32(fp0);
+ }
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_MTHC1:
+ gen_load_gpr(t0, rt);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32h(ctx, fp0, fs);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ default:
+ MIPS_INVAL("cp1 move");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ out:
+ tcg_temp_free(t0);
+}
+
+static void gen_movci (DisasContext *ctx, int rd, int rs, int cc, int tf)
+{
+ TCGLabel *l1;
+ TCGCond cond;
+ TCGv_i32 t0;
+
+ if (rd == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ l1 = gen_new_label();
+ t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ tcg_temp_free_i32(t0);
+ if (rs == 0) {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ }
+ gen_set_label(l1);
+}
+
+static inline void gen_movcf_s(DisasContext *ctx, int fs, int fd, int cc,
+ int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(ctx, t0, fs);
+ gen_store_fpr32(ctx, t0, fd);
+ gen_set_label(l1);
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_movcf_d (DisasContext *ctx, int fs, int fd, int cc, int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 fp0;
+ TCGLabel *l1 = gen_new_label();
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ tcg_temp_free_i32(t0);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+}
+
+static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd,
+ int cc, int tf)
+{
+ int cond;
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ if (tf)
+ cond = TCG_COND_EQ;
+ else
+ cond = TCG_COND_NE;
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc));
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
+ gen_load_fpr32(ctx, t0, fs);
+ gen_store_fpr32(ctx, t0, fd);
+ gen_set_label(l1);
+
+ tcg_gen_andi_i32(t0, fpu_fcr31, 1 << get_fp_bit(cc+1));
+ tcg_gen_brcondi_i32(cond, t0, 0, l2);
+ gen_load_fpr32h(ctx, t0, fs);
+ gen_store_fpr32h(ctx, t0, fd);
+ tcg_temp_free_i32(t0);
+ gen_set_label(l2);
+}
+
+static void gen_sel_s(DisasContext *ctx, enum fopcode op1, int fd, int ft,
+ int fs)
+{
+ TCGv_i32 t1 = tcg_const_i32(0);
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fd);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fs);
+
+ switch (op1) {
+ case OPC_SEL_S:
+ tcg_gen_andi_i32(fp0, fp0, 1);
+ tcg_gen_movcond_i32(TCG_COND_NE, fp0, fp0, t1, fp1, fp2);
+ break;
+ case OPC_SELEQZ_S:
+ tcg_gen_andi_i32(fp1, fp1, 1);
+ tcg_gen_movcond_i32(TCG_COND_EQ, fp0, fp1, t1, fp2, t1);
+ break;
+ case OPC_SELNEZ_S:
+ tcg_gen_andi_i32(fp1, fp1, 1);
+ tcg_gen_movcond_i32(TCG_COND_NE, fp0, fp1, t1, fp2, t1);
+ break;
+ default:
+ MIPS_INVAL("gen_sel_s");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_sel_d(DisasContext *ctx, enum fopcode op1, int fd, int ft,
+ int fs)
+{
+ TCGv_i64 t1 = tcg_const_i64(0);
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fd);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fs);
+
+ switch (op1) {
+ case OPC_SEL_D:
+ tcg_gen_andi_i64(fp0, fp0, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, fp0, fp0, t1, fp1, fp2);
+ break;
+ case OPC_SELEQZ_D:
+ tcg_gen_andi_i64(fp1, fp1, 1);
+ tcg_gen_movcond_i64(TCG_COND_EQ, fp0, fp1, t1, fp2, t1);
+ break;
+ case OPC_SELNEZ_D:
+ tcg_gen_andi_i64(fp1, fp1, 1);
+ tcg_gen_movcond_i64(TCG_COND_NE, fp0, fp1, t1, fp2, t1);
+ break;
+ default:
+ MIPS_INVAL("gen_sel_d");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_farith (DisasContext *ctx, enum fopcode op1,
+ int ft, int fs, int fd, int cc)
+{
+ uint32_t func = ctx->opcode & 0x3f;
+ switch (op1) {
+ case OPC_ADD_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SUB_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MUL_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_DIV_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SQRT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_ABS_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_andi_i32(fp0, fp0, 0x7fffffffUL);
+ } else {
+ gen_helper_float_abs_s(fp0, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MOV_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_NEG_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_xori_i32(fp0, fp0, 1UL << 31);
+ } else {
+ gen_helper_float_chs_s(fp0, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_ROUND_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_round_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_TRUNC_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CEIL_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_FLOOR_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_ROUND_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_round_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_TRUNC_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CEIL_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_FLOOR_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SEL_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELEQZ_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELNEZ_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_s(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_MOVCF_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_movcf_s(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ break;
+ case OPC_MOVZ_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i32 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ gen_set_label(l1);
+ }
+ break;
+ case OPC_MOVN_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i32 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ gen_set_label(l1);
+ }
+ }
+ break;
+ case OPC_RECIP_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_recip_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_RSQRT_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MADDF_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fd);
+ gen_helper_float_maddf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MSUBF_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fd);
+ gen_helper_float_msubf_s(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_RINT_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rint_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CLASS_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_class_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_MIN_S: /* OPC_RECIP2_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MIN_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RECIP2_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_recip2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_MINA_S: /* OPC_RECIP1_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MINA_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RECIP1_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_recip1_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_MAX_S: /* OPC_RSQRT1_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAX_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RSQRT1_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_s(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_MAXA_S: /* OPC_RSQRT2_S */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAXA_S */
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp1);
+ tcg_temp_free_i32(fp0);
+ } else {
+ /* OPC_RSQRT2_S */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_s(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ }
+ break;
+ case OPC_CVT_D_S:
+ check_cp1_registers(ctx, fd);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CVT_W_S:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_w_s(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CVT_L_S:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_l_s(fp64, cpu_env, fp32);
+ } else {
+ gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
+ }
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CVT_PS_S:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+ TCGv_i32 fp32_0 = tcg_temp_new_i32();
+ TCGv_i32 fp32_1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp32_0, fs);
+ gen_load_fpr32(ctx, fp32_1, ft);
+ tcg_gen_concat_i32_i64(fp64, fp32_1, fp32_0);
+ tcg_temp_free_i32(fp32_1);
+ tcg_temp_free_i32(fp32_0);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CMP_F_S:
+ case OPC_CMP_UN_S:
+ case OPC_CMP_EQ_S:
+ case OPC_CMP_UEQ_S:
+ case OPC_CMP_OLT_S:
+ case OPC_CMP_ULT_S:
+ case OPC_CMP_OLE_S:
+ case OPC_CMP_ULE_S:
+ case OPC_CMP_SF_S:
+ case OPC_CMP_NGLE_S:
+ case OPC_CMP_SEQ_S:
+ case OPC_CMP_NGL_S:
+ case OPC_CMP_LT_S:
+ case OPC_CMP_NGE_S:
+ case OPC_CMP_LE_S:
+ case OPC_CMP_NGT_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ if (ctx->opcode & (1 << 6)) {
+ gen_cmpabs_s(ctx, func-48, ft, fs, cc);
+ } else {
+ gen_cmp_s(ctx, func-48, ft, fs, cc);
+ }
+ break;
+ case OPC_ADD_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SUB_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MUL_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_DIV_D:
+ check_cp1_registers(ctx, fs | ft | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SQRT_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ABS_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_andi_i64(fp0, fp0, 0x7fffffffffffffffULL);
+ } else {
+ gen_helper_float_abs_d(fp0, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MOV_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_NEG_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->abs2008) {
+ tcg_gen_xori_i64(fp0, fp0, 1ULL << 63);
+ } else {
+ gen_helper_float_chs_d(fp0, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ROUND_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_round_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_TRUNC_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CEIL_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_FLOOR_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ROUND_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_round_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_round_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_TRUNC_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_trunc_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CEIL_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_ceil_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_FLOOR_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_floor_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_SEL_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELEQZ_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_SELNEZ_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_sel_d(ctx, op1, fd, ft, fs);
+ break;
+ case OPC_MOVCF_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_movcf_d(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ break;
+ case OPC_MOVZ_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ }
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ break;
+ case OPC_MOVN_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ }
+ break;
+ case OPC_RECIP_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RSQRT_D:
+ check_cp1_registers(ctx, fs | fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MADDF_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fd);
+ gen_helper_float_maddf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MSUBF_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fd);
+ gen_helper_float_msubf_d(fp2, cpu_env, fp0, fp1, fp2);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RINT_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rint_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CLASS_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_class_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MIN_D: /* OPC_RECIP2_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MIN_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RECIP2_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_recip2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_MINA_D: /* OPC_RECIP1_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MINA_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RECIP1_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip1_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_MAX_D: /* OPC_RSQRT1_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAX_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RSQRT1_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_d(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_MAXA_D: /* OPC_RSQRT2_D */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_MAXA_D */
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
+ gen_store_fpr64(ctx, fp1, fd);
+ tcg_temp_free_i64(fp1);
+ tcg_temp_free_i64(fp0);
+ } else {
+ /* OPC_RSQRT2_D */
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_d(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ }
+ break;
+ case OPC_CMP_F_D:
+ case OPC_CMP_UN_D:
+ case OPC_CMP_EQ_D:
+ case OPC_CMP_UEQ_D:
+ case OPC_CMP_OLT_D:
+ case OPC_CMP_ULT_D:
+ case OPC_CMP_OLE_D:
+ case OPC_CMP_ULE_D:
+ case OPC_CMP_SF_D:
+ case OPC_CMP_NGLE_D:
+ case OPC_CMP_SEQ_D:
+ case OPC_CMP_NGL_D:
+ case OPC_CMP_LT_D:
+ case OPC_CMP_NGE_D:
+ case OPC_CMP_LE_D:
+ case OPC_CMP_NGT_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ if (ctx->opcode & (1 << 6)) {
+ gen_cmpabs_d(ctx, func-48, ft, fs, cc);
+ } else {
+ gen_cmp_d(ctx, func-48, ft, fs, cc);
+ }
+ break;
+ case OPC_CVT_S_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvts_d(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CVT_W_D:
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_w_d(fp32, cpu_env, fp64);
+ } else {
+ gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
+ }
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CVT_L_D:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ if (ctx->nan2008) {
+ gen_helper_float_cvt_2008_l_d(fp0, cpu_env, fp0);
+ } else {
+ gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
+ }
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_S_W:
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_cvts_w(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CVT_D_W:
+ check_cp1_registers(ctx, fd);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr32(ctx, fp32, fs);
+ gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
+ tcg_temp_free_i32(fp32);
+ gen_store_fpr64(ctx, fp64, fd);
+ tcg_temp_free_i64(fp64);
+ }
+ break;
+ case OPC_CVT_S_L:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp32 = tcg_temp_new_i32();
+ TCGv_i64 fp64 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp64, fs);
+ gen_helper_float_cvts_l(fp32, cpu_env, fp64);
+ tcg_temp_free_i64(fp64);
+ gen_store_fpr32(ctx, fp32, fd);
+ tcg_temp_free_i32(fp32);
+ }
+ break;
+ case OPC_CVT_D_L:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_PS_PW:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtps_pw(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ADD_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_add_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SUB_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_sub_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MUL_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_mul_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_ABS_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_abs_ps(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MOV_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_NEG_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_chs_ps(fp0, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MOVCF_PS:
+ check_ps(ctx);
+ gen_movcf_ps(ctx, fs, fd, (ft >> 2) & 0x7, ft & 0x1);
+ break;
+ case OPC_MOVZ_PS:
+ check_ps(ctx);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0)
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ break;
+ case OPC_MOVN_PS:
+ check_ps(ctx);
+ {
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 fp0;
+
+ if (ft != 0) {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[ft], 0, l1);
+ fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ gen_set_label(l1);
+ }
+ }
+ break;
+ case OPC_ADDR_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, ft);
+ gen_load_fpr64(ctx, fp1, fs);
+ gen_helper_float_addr_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_MULR_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, ft);
+ gen_load_fpr64(ctx, fp1, fs);
+ gen_helper_float_mulr_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RECIP2_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_recip2_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RECIP1_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_recip1_ps(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RSQRT1_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_rsqrt1_ps(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_RSQRT2_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_helper_float_rsqrt2_ps(fp0, cpu_env, fp0, fp1);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_S_PU:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_helper_float_cvts_pu(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_CVT_PW_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_helper_float_cvtpw_ps(fp0, cpu_env, fp0);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_CVT_S_PL:
+ check_cp1_64bitmode(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_helper_float_cvts_pl(fp0, cpu_env, fp0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_PLL_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_store_fpr32h(ctx, fp0, fd);
+ gen_store_fpr32(ctx, fp1, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_PLU_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32h(ctx, fp1, ft);
+ gen_store_fpr32(ctx, fp1, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_PUL_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_store_fpr32(ctx, fp1, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_PUU_PS:
+ check_ps(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+
+ gen_load_fpr32h(ctx, fp0, fs);
+ gen_load_fpr32h(ctx, fp1, ft);
+ gen_store_fpr32(ctx, fp1, fd);
+ gen_store_fpr32h(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ }
+ break;
+ case OPC_CMP_F_PS:
+ case OPC_CMP_UN_PS:
+ case OPC_CMP_EQ_PS:
+ case OPC_CMP_UEQ_PS:
+ case OPC_CMP_OLT_PS:
+ case OPC_CMP_ULT_PS:
+ case OPC_CMP_OLE_PS:
+ case OPC_CMP_ULE_PS:
+ case OPC_CMP_SF_PS:
+ case OPC_CMP_NGLE_PS:
+ case OPC_CMP_SEQ_PS:
+ case OPC_CMP_NGL_PS:
+ case OPC_CMP_LT_PS:
+ case OPC_CMP_NGE_PS:
+ case OPC_CMP_LE_PS:
+ case OPC_CMP_NGT_PS:
+ if (ctx->opcode & (1 << 6)) {
+ gen_cmpabs_ps(ctx, func-48, ft, fs, cc);
+ } else {
+ gen_cmp_ps(ctx, func-48, ft, fs, cc);
+ }
+ break;
+ default:
+ MIPS_INVAL("farith");
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+}
+
+/* Coprocessor 3 (FPU) */
+static void gen_flt3_ldst (DisasContext *ctx, uint32_t opc,
+ int fd, int fs, int base, int index)
+{
+ TCGv t0 = tcg_temp_new();
+
+ if (base == 0) {
+ gen_load_gpr(t0, index);
+ } else if (index == 0) {
+ gen_load_gpr(t0, base);
+ } else {
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[index]);
+ }
+ /* Don't do NOP if destination is zero: we must perform the actual
+ memory access. */
+ switch (opc) {
+ case OPC_LWXC1:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ tcg_gen_trunc_tl_i32(fp0, t0);
+ gen_store_fpr32(ctx, fp0, fd);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_LDXC1:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_LUXC1:
+ check_cp1_64bitmode(ctx);
+ tcg_gen_andi_tl(t0, t0, ~0x7);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_fpr64(ctx, fp0, fd);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SWXC1:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ gen_load_fpr32(ctx, fp0, fs);
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
+ tcg_temp_free_i32(fp0);
+ }
+ break;
+ case OPC_SDXC1:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fs);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ case OPC_SUXC1:
+ check_cp1_64bitmode(ctx);
+ tcg_gen_andi_tl(t0, t0, ~0x7);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ gen_load_fpr64(ctx, fp0, fs);
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
+ tcg_temp_free_i64(fp0);
+ }
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_flt3_arith (DisasContext *ctx, uint32_t opc,
+ int fd, int fr, int fs, int ft)
+{
+ switch (opc) {
+ case OPC_ALNV_PS:
+ check_ps(ctx);
+ {
+ TCGv t0 = tcg_temp_local_new();
+ TCGv_i32 fp = tcg_temp_new_i32();
+ TCGv_i32 fph = tcg_temp_new_i32();
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ gen_load_gpr(t0, fr);
+ tcg_gen_andi_tl(t0, t0, 0x7);
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0, l1);
+ gen_load_fpr32(ctx, fp, fs);
+ gen_load_fpr32h(ctx, fph, fs);
+ gen_store_fpr32(ctx, fp, fd);
+ gen_store_fpr32h(ctx, fph, fd);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, t0, 4, l2);
+ tcg_temp_free(t0);
+#ifdef TARGET_WORDS_BIGENDIAN
+ gen_load_fpr32(ctx, fp, fs);
+ gen_load_fpr32h(ctx, fph, ft);
+ gen_store_fpr32h(ctx, fp, fd);
+ gen_store_fpr32(ctx, fph, fd);
+#else
+ gen_load_fpr32h(ctx, fph, fs);
+ gen_load_fpr32(ctx, fp, ft);
+ gen_store_fpr32(ctx, fph, fd);
+ gen_store_fpr32h(ctx, fp, fd);
+#endif
+ gen_set_label(l2);
+ tcg_temp_free_i32(fp);
+ tcg_temp_free_i32(fph);
+ }
+ break;
+ case OPC_MADD_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_madd_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_MADD_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_madd_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_MADD_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_madd_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_MSUB_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_msub_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_MSUB_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_msub_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_MSUB_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_msub_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMADD_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_nmadd_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_NMADD_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmadd_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMADD_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmadd_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMSUB_S:
+ check_cop1x(ctx);
+ {
+ TCGv_i32 fp0 = tcg_temp_new_i32();
+ TCGv_i32 fp1 = tcg_temp_new_i32();
+ TCGv_i32 fp2 = tcg_temp_new_i32();
+
+ gen_load_fpr32(ctx, fp0, fs);
+ gen_load_fpr32(ctx, fp1, ft);
+ gen_load_fpr32(ctx, fp2, fr);
+ gen_helper_float_nmsub_s(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i32(fp0);
+ tcg_temp_free_i32(fp1);
+ gen_store_fpr32(ctx, fp2, fd);
+ tcg_temp_free_i32(fp2);
+ }
+ break;
+ case OPC_NMSUB_D:
+ check_cop1x(ctx);
+ check_cp1_registers(ctx, fd | fs | ft | fr);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmsub_d(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ case OPC_NMSUB_PS:
+ check_ps(ctx);
+ {
+ TCGv_i64 fp0 = tcg_temp_new_i64();
+ TCGv_i64 fp1 = tcg_temp_new_i64();
+ TCGv_i64 fp2 = tcg_temp_new_i64();
+
+ gen_load_fpr64(ctx, fp0, fs);
+ gen_load_fpr64(ctx, fp1, ft);
+ gen_load_fpr64(ctx, fp2, fr);
+ gen_helper_float_nmsub_ps(fp2, cpu_env, fp0, fp1, fp2);
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ gen_store_fpr64(ctx, fp2, fd);
+ tcg_temp_free_i64(fp2);
+ }
+ break;
+ default:
+ MIPS_INVAL("flt3_arith");
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+}
+
+static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
+{
+ TCGv t0;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* The Linux kernel will emulate rdhwr if it's not supported natively.
+ Therefore only check the ISA in system mode. */
+ check_insn(ctx, ISA_MIPS32R2);
+#endif
+ t0 = tcg_temp_new();
+
+ switch (rd) {
+ case 0:
+ gen_helper_rdhwr_cpunum(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 1:
+ gen_helper_rdhwr_synci_step(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 2:
+ gen_helper_rdhwr_cc(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 3:
+ gen_helper_rdhwr_ccres(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 4:
+ check_insn(ctx, ISA_MIPS32R6);
+ if (sel != 0) {
+ /* Performance counter registers are not implemented other than
+ * control register 0.
+ */
+ generate_exception(ctx, EXCP_RI);
+ }
+ gen_helper_rdhwr_performance(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 5:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_helper_rdhwr_xnp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case 29:
+#if defined(CONFIG_USER_ONLY)
+ tcg_gen_ld_tl(t0, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ gen_store_gpr(t0, rt);
+ break;
+#else
+ if ((ctx->hflags & MIPS_HFLAG_CP0) ||
+ (ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) {
+ tcg_gen_ld_tl(t0, cpu_env,
+ offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
+ gen_store_gpr(t0, rt);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("rdhwr");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ tcg_temp_free(t0);
+}
+
+static inline void clear_branch_hflags(DisasContext *ctx)
+{
+ ctx->hflags &= ~MIPS_HFLAG_BMASK;
+ if (ctx->bstate == BS_NONE) {
+ save_cpu_state(ctx, 0);
+ } else {
+ /* it is not safe to save ctx->hflags as hflags may be changed
+ in execution time by the instruction in delay / forbidden slot. */
+ tcg_gen_andi_i32(hflags, hflags, ~MIPS_HFLAG_BMASK);
+ }
+}
+
+static void gen_branch(DisasContext *ctx, int insn_bytes)
+{
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ int proc_hflags = ctx->hflags & MIPS_HFLAG_BMASK;
+ /* Branches completion */
+ clear_branch_hflags(ctx);
+ ctx->bstate = BS_BRANCH;
+ /* FIXME: Need to clear can_do_io. */
+ switch (proc_hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_FBNSLOT:
+ gen_goto_tb(ctx, 0, ctx->pc + insn_bytes);
+ break;
+ case MIPS_HFLAG_B:
+ /* unconditional branch */
+ if (proc_hflags & MIPS_HFLAG_BX) {
+ tcg_gen_xori_i32(hflags, hflags, MIPS_HFLAG_M16);
+ }
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case MIPS_HFLAG_BL:
+ /* blikely taken case */
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ break;
+ case MIPS_HFLAG_BC:
+ /* Conditional branch */
+ {
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
+ gen_goto_tb(ctx, 1, ctx->pc + insn_bytes);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 0, ctx->btarget);
+ }
+ break;
+ case MIPS_HFLAG_BR:
+ /* unconditional branch to register */
+ if (ctx->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_andi_tl(t0, btarget, 0x1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ tcg_gen_andi_i32(hflags, hflags, ~(uint32_t)MIPS_HFLAG_M16);
+ tcg_gen_shli_i32(t1, t1, MIPS_HFLAG_M16_SHIFT);
+ tcg_gen_or_i32(hflags, hflags, t1);
+ tcg_temp_free_i32(t1);
+
+ tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
+ } else {
+ tcg_gen_mov_tl(cpu_PC, btarget);
+ }
+ if (ctx->singlestep_enabled) {
+ save_cpu_state(ctx, 0);
+ gen_helper_raise_exception_debug(cpu_env);
+ }
+ tcg_gen_exit_tb(0);
+ break;
+ default:
+ fprintf(stderr, "unknown branch 0x%x\n", proc_hflags);
+ abort();
+ }
+ }
+}
+
+/* Compact Branches */
+static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc,
+ int rs, int rt, int32_t offset)
+{
+ int bcond_compute = 0;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ int m16_lowbit = (ctx->hflags & MIPS_HFLAG_M16) != 0;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+#ifdef MIPS_DEBUG_DISAS
+ LOG_DISAS("Branch in delay / forbidden slot at PC 0x" TARGET_FMT_lx
+ "\n", ctx->pc);
+#endif
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Load needed operands and calculate btarget */
+ switch (opc) {
+ /* compact branch */
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ if (rs <= rt && rs == 0) {
+ /* OPC_BEQZALC, OPC_BNEZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4 + m16_lowbit);
+ }
+ break;
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ break;
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */
+ if (rs == 0 || rs == rt) {
+ /* OPC_BLEZALC, OPC_BGEZALC */
+ /* OPC_BGTZALC, OPC_BLTZALC */
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4 + m16_lowbit);
+ }
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ break;
+ case OPC_BC:
+ case OPC_BALC:
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ break;
+ case OPC_BEQZC:
+ case OPC_BNEZC:
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_load_gpr(t0, rs);
+ bcond_compute = 1;
+ ctx->btarget = addr_add(ctx, ctx->pc + 4, offset);
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ TCGv tbase = tcg_temp_new();
+ TCGv toffset = tcg_temp_new();
+
+ gen_load_gpr(tbase, rt);
+ tcg_gen_movi_tl(toffset, offset);
+ gen_op_addr_add(ctx, btarget, tbase, toffset);
+ tcg_temp_free(tbase);
+ tcg_temp_free(toffset);
+ }
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ if (bcond_compute == 0) {
+ /* Uncoditional compact branch */
+ switch (opc) {
+ case OPC_JIALC:
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4 + m16_lowbit);
+ /* Fallthrough */
+ case OPC_JIC:
+ ctx->hflags |= MIPS_HFLAG_BR;
+ break;
+ case OPC_BALC:
+ tcg_gen_movi_tl(cpu_gpr[31], ctx->pc + 4 + m16_lowbit);
+ /* Fallthrough */
+ case OPC_BC:
+ ctx->hflags |= MIPS_HFLAG_B;
+ break;
+ default:
+ MIPS_INVAL("Compact branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_branch(ctx, 4);
+ } else {
+ /* Conditional compact branch */
+ TCGLabel *fs = gen_new_label();
+ save_cpu_state(ctx, 0);
+
+ switch (opc) {
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GEU), t0, t1, fs);
+ }
+ break;
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTUC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LTU), t0, t1, fs);
+ }
+ break;
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BLEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LE), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BGEZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GE), t1, 0, fs);
+ } else {
+ /* OPC_BGEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_GE), t0, t1, fs);
+ }
+ break;
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC */
+ if (rs == 0 && rt != 0) {
+ /* OPC_BGTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_GT), t1, 0, fs);
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* OPC_BLTZC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_LT), t1, 0, fs);
+ } else {
+ /* OPC_BLTC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_LT), t0, t1, fs);
+ }
+ break;
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC */
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ if (rs >= rt) {
+ /* OPC_BOVC, OPC_BNVC */
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+ TCGv input_overflow = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_load_gpr(t1, rt);
+ tcg_gen_ext32s_tl(t2, t0);
+ tcg_gen_setcond_tl(TCG_COND_NE, input_overflow, t2, t0);
+ tcg_gen_ext32s_tl(t3, t1);
+ tcg_gen_setcond_tl(TCG_COND_NE, t4, t3, t1);
+ tcg_gen_or_tl(input_overflow, input_overflow, t4);
+
+ tcg_gen_add_tl(t4, t2, t3);
+ tcg_gen_ext32s_tl(t4, t4);
+ tcg_gen_xor_tl(t2, t2, t3);
+ tcg_gen_xor_tl(t3, t4, t3);
+ tcg_gen_andc_tl(t2, t3, t2);
+ tcg_gen_setcondi_tl(TCG_COND_LT, t4, t2, 0);
+ tcg_gen_or_tl(t4, t4, input_overflow);
+ if (opc == OPC_BOVC) {
+ /* OPC_BOVC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t4, 0, fs);
+ } else {
+ /* OPC_BNVC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t4, 0, fs);
+ }
+ tcg_temp_free(input_overflow);
+ tcg_temp_free(t4);
+ tcg_temp_free(t3);
+ tcg_temp_free(t2);
+ } else if (rs < rt && rs == 0) {
+ /* OPC_BEQZALC, OPC_BNEZALC */
+ if (opc == OPC_BEQZALC) {
+ /* OPC_BEQZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t1, 0, fs);
+ } else {
+ /* OPC_BNEZALC */
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t1, 0, fs);
+ }
+ } else {
+ /* OPC_BEQC, OPC_BNEC */
+ if (opc == OPC_BEQC) {
+ /* OPC_BEQC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_EQ), t0, t1, fs);
+ } else {
+ /* OPC_BNEC */
+ tcg_gen_brcond_tl(tcg_invert_cond(TCG_COND_NE), t0, t1, fs);
+ }
+ }
+ break;
+ case OPC_BEQZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_EQ), t0, 0, fs);
+ break;
+ case OPC_BNEZC:
+ tcg_gen_brcondi_tl(tcg_invert_cond(TCG_COND_NE), t0, 0, fs);
+ break;
+ default:
+ MIPS_INVAL("Compact conditional branch/jump");
+ generate_exception_end(ctx, EXCP_RI);
+ goto out;
+ }
+
+ /* Generating branch here as compact branches don't have delay slot */
+ gen_goto_tb(ctx, 1, ctx->btarget);
+ gen_set_label(fs);
+
+ ctx->hflags |= MIPS_HFLAG_FBNSLOT;
+ }
+
+out:
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* ISA extensions (ASEs) */
+/* MIPS16 extension to MIPS32 */
+
+/* MIPS16 major opcodes */
+enum {
+ M16_OPC_ADDIUSP = 0x00,
+ M16_OPC_ADDIUPC = 0x01,
+ M16_OPC_B = 0x02,
+ M16_OPC_JAL = 0x03,
+ M16_OPC_BEQZ = 0x04,
+ M16_OPC_BNEQZ = 0x05,
+ M16_OPC_SHIFT = 0x06,
+ M16_OPC_LD = 0x07,
+ M16_OPC_RRIA = 0x08,
+ M16_OPC_ADDIU8 = 0x09,
+ M16_OPC_SLTI = 0x0a,
+ M16_OPC_SLTIU = 0x0b,
+ M16_OPC_I8 = 0x0c,
+ M16_OPC_LI = 0x0d,
+ M16_OPC_CMPI = 0x0e,
+ M16_OPC_SD = 0x0f,
+ M16_OPC_LB = 0x10,
+ M16_OPC_LH = 0x11,
+ M16_OPC_LWSP = 0x12,
+ M16_OPC_LW = 0x13,
+ M16_OPC_LBU = 0x14,
+ M16_OPC_LHU = 0x15,
+ M16_OPC_LWPC = 0x16,
+ M16_OPC_LWU = 0x17,
+ M16_OPC_SB = 0x18,
+ M16_OPC_SH = 0x19,
+ M16_OPC_SWSP = 0x1a,
+ M16_OPC_SW = 0x1b,
+ M16_OPC_RRR = 0x1c,
+ M16_OPC_RR = 0x1d,
+ M16_OPC_EXTEND = 0x1e,
+ M16_OPC_I64 = 0x1f
+};
+
+/* I8 funct field */
+enum {
+ I8_BTEQZ = 0x0,
+ I8_BTNEZ = 0x1,
+ I8_SWRASP = 0x2,
+ I8_ADJSP = 0x3,
+ I8_SVRS = 0x4,
+ I8_MOV32R = 0x5,
+ I8_MOVR32 = 0x7
+};
+
+/* RRR f field */
+enum {
+ RRR_DADDU = 0x0,
+ RRR_ADDU = 0x1,
+ RRR_DSUBU = 0x2,
+ RRR_SUBU = 0x3
+};
+
+/* RR funct field */
+enum {
+ RR_JR = 0x00,
+ RR_SDBBP = 0x01,
+ RR_SLT = 0x02,
+ RR_SLTU = 0x03,
+ RR_SLLV = 0x04,
+ RR_BREAK = 0x05,
+ RR_SRLV = 0x06,
+ RR_SRAV = 0x07,
+ RR_DSRL = 0x08,
+ RR_CMP = 0x0a,
+ RR_NEG = 0x0b,
+ RR_AND = 0x0c,
+ RR_OR = 0x0d,
+ RR_XOR = 0x0e,
+ RR_NOT = 0x0f,
+ RR_MFHI = 0x10,
+ RR_CNVT = 0x11,
+ RR_MFLO = 0x12,
+ RR_DSRA = 0x13,
+ RR_DSLLV = 0x14,
+ RR_DSRLV = 0x16,
+ RR_DSRAV = 0x17,
+ RR_MULT = 0x18,
+ RR_MULTU = 0x19,
+ RR_DIV = 0x1a,
+ RR_DIVU = 0x1b,
+ RR_DMULT = 0x1c,
+ RR_DMULTU = 0x1d,
+ RR_DDIV = 0x1e,
+ RR_DDIVU = 0x1f
+};
+
+/* I64 funct field */
+enum {
+ I64_LDSP = 0x0,
+ I64_SDSP = 0x1,
+ I64_SDRASP = 0x2,
+ I64_DADJSP = 0x3,
+ I64_LDPC = 0x4,
+ I64_DADDIU5 = 0x5,
+ I64_DADDIUPC = 0x6,
+ I64_DADDIUSP = 0x7
+};
+
+/* RR ry field for CNVT */
+enum {
+ RR_RY_CNVT_ZEB = 0x0,
+ RR_RY_CNVT_ZEH = 0x1,
+ RR_RY_CNVT_ZEW = 0x2,
+ RR_RY_CNVT_SEB = 0x4,
+ RR_RY_CNVT_SEH = 0x5,
+ RR_RY_CNVT_SEW = 0x6,
+};
+
+static int xlat (int r)
+{
+ static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+static void gen_mips16_save (DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ int args, astatic;
+
+ switch (aregs) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 11:
+ args = 0;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ args = 1;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ args = 2;
+ break;
+ case 12:
+ case 13:
+ args = 3;
+ break;
+ case 14:
+ args = 4;
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ switch (args) {
+ case 4:
+ gen_base_offset_addr(ctx, t0, 29, 12);
+ gen_load_gpr(t1, 7);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ /* Fall through */
+ case 3:
+ gen_base_offset_addr(ctx, t0, 29, 8);
+ gen_load_gpr(t1, 6);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ /* Fall through */
+ case 2:
+ gen_base_offset_addr(ctx, t0, 29, 4);
+ gen_load_gpr(t1, 5);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ /* Fall through */
+ case 1:
+ gen_base_offset_addr(ctx, t0, 29, 0);
+ gen_load_gpr(t1, 4);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ }
+
+ gen_load_gpr(t0, 29);
+
+#define DECR_AND_STORE(reg) do { \
+ tcg_gen_movi_tl(t2, -4); \
+ gen_op_addr_add(ctx, t0, t0, t2); \
+ gen_load_gpr(t1, reg); \
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_STORE(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_STORE(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_STORE(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_STORE(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_STORE(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_STORE(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_STORE(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_STORE(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_STORE(17);
+ }
+ if (do_s0) {
+ DECR_AND_STORE(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_STORE(7);
+ if (astatic > 1) {
+ DECR_AND_STORE(6);
+ if (astatic > 2) {
+ DECR_AND_STORE(5);
+ if (astatic > 3) {
+ DECR_AND_STORE(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_STORE
+
+ tcg_gen_movi_tl(t2, -framesize);
+ gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_mips16_restore (DisasContext *ctx,
+ int xsregs, int aregs,
+ int do_ra, int do_s0, int do_s1,
+ int framesize)
+{
+ int astatic;
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t2, framesize);
+ gen_op_addr_add(ctx, t0, cpu_gpr[29], t2);
+
+#define DECR_AND_LOAD(reg) do { \
+ tcg_gen_movi_tl(t2, -4); \
+ gen_op_addr_add(ctx, t0, t0, t2); \
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL); \
+ gen_store_gpr(t1, reg); \
+ } while (0)
+
+ if (do_ra) {
+ DECR_AND_LOAD(31);
+ }
+
+ switch (xsregs) {
+ case 7:
+ DECR_AND_LOAD(30);
+ /* Fall through */
+ case 6:
+ DECR_AND_LOAD(23);
+ /* Fall through */
+ case 5:
+ DECR_AND_LOAD(22);
+ /* Fall through */
+ case 4:
+ DECR_AND_LOAD(21);
+ /* Fall through */
+ case 3:
+ DECR_AND_LOAD(20);
+ /* Fall through */
+ case 2:
+ DECR_AND_LOAD(19);
+ /* Fall through */
+ case 1:
+ DECR_AND_LOAD(18);
+ }
+
+ if (do_s1) {
+ DECR_AND_LOAD(17);
+ }
+ if (do_s0) {
+ DECR_AND_LOAD(16);
+ }
+
+ switch (aregs) {
+ case 0:
+ case 4:
+ case 8:
+ case 12:
+ case 14:
+ astatic = 0;
+ break;
+ case 1:
+ case 5:
+ case 9:
+ case 13:
+ astatic = 1;
+ break;
+ case 2:
+ case 6:
+ case 10:
+ astatic = 2;
+ break;
+ case 3:
+ case 7:
+ astatic = 3;
+ break;
+ case 11:
+ astatic = 4;
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ if (astatic > 0) {
+ DECR_AND_LOAD(7);
+ if (astatic > 1) {
+ DECR_AND_LOAD(6);
+ if (astatic > 2) {
+ DECR_AND_LOAD(5);
+ if (astatic > 3) {
+ DECR_AND_LOAD(4);
+ }
+ }
+ }
+ }
+#undef DECR_AND_LOAD
+
+ tcg_gen_movi_tl(t2, framesize);
+ gen_op_addr_add(ctx, cpu_gpr[29], cpu_gpr[29], t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_addiupc (DisasContext *ctx, int rx, int imm,
+ int is_64_bit, int extended)
+{
+ TCGv t0;
+
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ tcg_gen_movi_tl(t0, pc_relative_pc(ctx));
+ tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
+ if (!is_64_bit) {
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ }
+
+ tcg_temp_free(t0);
+}
+
+static void gen_cache_operation(DisasContext *ctx, uint32_t op, int base,
+ int16_t offset)
+{
+ TCGv_i32 t0 = tcg_const_i32(op);
+ TCGv t1 = tcg_temp_new();
+ gen_base_offset_addr(ctx, t1, base, offset);
+ gen_helper_cache(cpu_env, t1, t0);
+}
+
+#if defined(TARGET_MIPS64)
+static void decode_i64_mips16 (DisasContext *ctx,
+ int ry, int funct, int16_t offset,
+ int extended)
+{
+ switch (funct) {
+ case I64_LDSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_ld(ctx, OPC_LD, ry, 29, offset);
+ break;
+ case I64_SDSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 3;
+ gen_st(ctx, OPC_SD, ry, 29, offset);
+ break;
+ case I64_SDRASP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : (ctx->opcode & 0xff) << 3;
+ gen_st(ctx, OPC_SD, 31, 29, offset);
+ break;
+ case I64_DADJSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)ctx->opcode) << 3;
+ gen_arith_imm(ctx, OPC_DADDIU, 29, 29, offset);
+ break;
+ case I64_LDPC:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ if (extended && (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ offset = extended ? offset : offset << 3;
+ gen_ld(ctx, OPC_LDPC, ry, 0, offset);
+ }
+ break;
+ case I64_DADDIU5:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : ((int8_t)(offset << 3)) >> 3;
+ gen_arith_imm(ctx, OPC_DADDIU, ry, ry, offset);
+ break;
+ case I64_DADDIUPC:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_addiupc(ctx, ry, offset, 1, extended);
+ break;
+ case I64_DADDIUSP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ offset = extended ? offset : offset << 2;
+ gen_arith_imm(ctx, OPC_DADDIU, ry, 29, offset);
+ break;
+ }
+}
+#endif
+
+static int decode_extended_mips16_opc (CPUMIPSState *env, DisasContext *ctx)
+{
+ int extend = cpu_lduw_code(env, ctx->pc + 2);
+ int op, rx, ry, funct, sa;
+ int16_t imm, offset;
+
+ ctx->opcode = (ctx->opcode << 16) | extend;
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 22) & 0x1f;
+ funct = (ctx->opcode >> 8) & 0x7;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11
+ | ((ctx->opcode >> 21) & 0x3f) << 5
+ | (ctx->opcode & 0x1f));
+
+ /* The extended opcodes cleverly reuse the opcodes from their 16-bit
+ counterparts. */
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm);
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, imm, 0, 1);
+ break;
+ case M16_OPC_B:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa);
+#else
+ generate_exception_end(ctx, EXCP_RI);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ imm = ctx->opcode & 0xf;
+ imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4;
+ imm = imm | ((ctx->opcode >> 16) & 0xf) << 11;
+ imm = (int16_t) (imm << 1) >> 1;
+ if ((ctx->opcode >> 4) & 0x1) {
+#if defined(TARGET_MIPS64)
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ generate_exception_end(ctx, EXCP_RI);
+#endif
+ } else {
+ gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm);
+ break;
+ case M16_OPC_SLTI:
+ gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm);
+ break;
+ case M16_OPC_SLTIU:
+ gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm);
+ break;
+ case M16_OPC_I8:
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1, 0);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1, 0);
+ break;
+ case I8_SWRASP:
+ gen_st(ctx, OPC_SW, 31, 29, imm);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm);
+ break;
+ case I8_SVRS:
+ check_insn(ctx, ISA_MIPS32);
+ {
+ int xsregs = (ctx->opcode >> 24) & 0x7;
+ int aregs = (ctx->opcode >> 16) & 0xf;
+ int do_ra = (ctx->opcode >> 6) & 0x1;
+ int do_s0 = (ctx->opcode >> 5) & 0x1;
+ int do_s1 = (ctx->opcode >> 4) & 0x1;
+ int framesize = (((ctx->opcode >> 20) & 0xf) << 4
+ | (ctx->opcode & 0xf)) << 3;
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ } else {
+ gen_mips16_restore(ctx, xsregs, aregs,
+ do_ra, do_s0, do_s1,
+ framesize);
+ }
+ }
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case M16_OPC_LI:
+ tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm);
+ break;
+ case M16_OPC_CMPI:
+ tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st(ctx, OPC_SD, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ld(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ld(ctx, OPC_LH, ry, rx, offset);
+ break;
+ case M16_OPC_LWSP:
+ gen_ld(ctx, OPC_LW, rx, 29, offset);
+ break;
+ case M16_OPC_LW:
+ gen_ld(ctx, OPC_LW, ry, rx, offset);
+ break;
+ case M16_OPC_LBU:
+ gen_ld(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ld(ctx, OPC_LHU, ry, rx, offset);
+ break;
+ case M16_OPC_LWPC:
+ gen_ld(ctx, OPC_LWPC, rx, 0, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LWU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LWU, ry, rx, offset);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_st(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_st(ctx, OPC_SH, ry, rx, offset);
+ break;
+ case M16_OPC_SWSP:
+ gen_st(ctx, OPC_SW, rx, 29, offset);
+ break;
+ case M16_OPC_SW:
+ gen_st(ctx, OPC_SW, ry, rx, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ decode_i64_mips16(ctx, ry, funct, offset, 1);
+ break;
+#endif
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ return 4;
+}
+
+static inline bool is_uhi(int sdbbp_code)
+{
+#ifdef CONFIG_USER_ONLY
+ return false;
+#else
+ return semihosting_enabled() && sdbbp_code == 1;
+#endif
+}
+
+static int decode_mips16_opc (CPUMIPSState *env, DisasContext *ctx)
+{
+ int rx, ry;
+ int sa;
+ int op, cnvt_op, op1, offset;
+ int funct;
+ int n_bytes;
+
+ op = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 2) & 0x7;
+ sa = sa == 0 ? 8 : sa;
+ rx = xlat((ctx->opcode >> 8) & 0x7);
+ cnvt_op = (ctx->opcode >> 5) & 0x7;
+ ry = xlat((ctx->opcode >> 5) & 0x7);
+ op1 = offset = ctx->opcode & 0x1f;
+
+ n_bytes = 2;
+
+ switch (op) {
+ case M16_OPC_ADDIUSP:
+ {
+ int16_t imm = ((uint8_t) ctx->opcode) << 2;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rx, 29, imm);
+ }
+ break;
+ case M16_OPC_ADDIUPC:
+ gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2, 0, 0);
+ break;
+ case M16_OPC_B:
+ offset = (ctx->opcode & 0x7ff) << 1;
+ offset = (int16_t)(offset << 4) >> 4;
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_JAL:
+ offset = cpu_lduw_code(env, ctx->pc + 2);
+ offset = (((ctx->opcode & 0x1f) << 21)
+ | ((ctx->opcode >> 5) & 0x1f) << 16
+ | offset) << 2;
+ op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL;
+ gen_compute_branch(ctx, op, 4, rx, ry, offset, 2);
+ n_bytes = 4;
+ break;
+ case M16_OPC_BEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_BNEQZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, rx, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ /* No delay slot, so just process as a normal instruction */
+ break;
+ case M16_OPC_SHIFT:
+ switch (ctx->opcode & 0x3) {
+ case 0x0:
+ gen_shift_imm(ctx, OPC_SLL, rx, ry, sa);
+ break;
+ case 0x1:
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSLL, rx, ry, sa);
+#else
+ generate_exception_end(ctx, EXCP_RI);
+#endif
+ break;
+ case 0x2:
+ gen_shift_imm(ctx, OPC_SRL, rx, ry, sa);
+ break;
+ case 0x3:
+ gen_shift_imm(ctx, OPC_SRA, rx, ry, sa);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_LD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_RRIA:
+ {
+ int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4;
+
+ if ((ctx->opcode >> 4) & 1) {
+#if defined(TARGET_MIPS64)
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, OPC_DADDIU, ry, rx, imm);
+#else
+ generate_exception_end(ctx, EXCP_RI);
+#endif
+ } else {
+ gen_arith_imm(ctx, OPC_ADDIU, ry, rx, imm);
+ }
+ }
+ break;
+ case M16_OPC_ADDIU8:
+ {
+ int16_t imm = (int8_t) ctx->opcode;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rx, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+ gen_slt_imm(ctx, OPC_SLTI, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_SLTIU:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+ gen_slt_imm(ctx, OPC_SLTIU, 24, rx, imm);
+ }
+ break;
+ case M16_OPC_I8:
+ {
+ int reg32;
+
+ funct = (ctx->opcode >> 8) & 0x7;
+ switch (funct) {
+ case I8_BTEQZ:
+ gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ break;
+ case I8_BTNEZ:
+ gen_compute_branch(ctx, OPC_BNE, 2, 24, 0,
+ ((int8_t)ctx->opcode) << 1, 0);
+ break;
+ case I8_SWRASP:
+ gen_st(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2);
+ break;
+ case I8_ADJSP:
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29,
+ ((int8_t)ctx->opcode) << 3);
+ break;
+ case I8_SVRS:
+ check_insn(ctx, ISA_MIPS32);
+ {
+ int do_ra = ctx->opcode & (1 << 6);
+ int do_s0 = ctx->opcode & (1 << 5);
+ int do_s1 = ctx->opcode & (1 << 4);
+ int framesize = ctx->opcode & 0xf;
+
+ if (framesize == 0) {
+ framesize = 128;
+ } else {
+ framesize = framesize << 3;
+ }
+
+ if (ctx->opcode & (1 << 7)) {
+ gen_mips16_save(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ } else {
+ gen_mips16_restore(ctx, 0, 0,
+ do_ra, do_s0, do_s1, framesize);
+ }
+ }
+ break;
+ case I8_MOV32R:
+ {
+ int rz = xlat(ctx->opcode & 0x7);
+
+ reg32 = (((ctx->opcode >> 3) & 0x3) << 3) |
+ ((ctx->opcode >> 5) & 0x7);
+ gen_arith(ctx, OPC_ADDU, reg32, rz, 0);
+ }
+ break;
+ case I8_MOVR32:
+ reg32 = ctx->opcode & 0x1f;
+ gen_arith(ctx, OPC_ADDU, ry, reg32, 0);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ }
+ break;
+ case M16_OPC_LI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rx, 0, imm);
+ }
+ break;
+ case M16_OPC_CMPI:
+ {
+ int16_t imm = (uint8_t) ctx->opcode;
+ gen_logic_imm(ctx, OPC_XORI, 24, rx, imm);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_SD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st(ctx, OPC_SD, ry, rx, offset << 3);
+ break;
+#endif
+ case M16_OPC_LB:
+ gen_ld(ctx, OPC_LB, ry, rx, offset);
+ break;
+ case M16_OPC_LH:
+ gen_ld(ctx, OPC_LH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWSP:
+ gen_ld(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_LW:
+ gen_ld(ctx, OPC_LW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_LBU:
+ gen_ld(ctx, OPC_LBU, ry, rx, offset);
+ break;
+ case M16_OPC_LHU:
+ gen_ld(ctx, OPC_LHU, ry, rx, offset << 1);
+ break;
+ case M16_OPC_LWPC:
+ gen_ld(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2);
+ break;
+#if defined (TARGET_MIPS64)
+ case M16_OPC_LWU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, OPC_LWU, ry, rx, offset << 2);
+ break;
+#endif
+ case M16_OPC_SB:
+ gen_st(ctx, OPC_SB, ry, rx, offset);
+ break;
+ case M16_OPC_SH:
+ gen_st(ctx, OPC_SH, ry, rx, offset << 1);
+ break;
+ case M16_OPC_SWSP:
+ gen_st(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2);
+ break;
+ case M16_OPC_SW:
+ gen_st(ctx, OPC_SW, ry, rx, offset << 2);
+ break;
+ case M16_OPC_RRR:
+ {
+ int rz = xlat((ctx->opcode >> 2) & 0x7);
+ int mips32_op;
+
+ switch (ctx->opcode & 0x3) {
+ case RRR_ADDU:
+ mips32_op = OPC_ADDU;
+ break;
+ case RRR_SUBU:
+ mips32_op = OPC_SUBU;
+ break;
+#if defined(TARGET_MIPS64)
+ case RRR_DADDU:
+ mips32_op = OPC_DADDU;
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ break;
+ case RRR_DSUBU:
+ mips32_op = OPC_DSUBU;
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ break;
+#endif
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ goto done;
+ }
+
+ gen_arith(ctx, mips32_op, rz, rx, ry);
+ done:
+ ;
+ }
+ break;
+ case M16_OPC_RR:
+ switch (op1) {
+ case RR_JR:
+ {
+ int nd = (ctx->opcode >> 7) & 0x1;
+ int link = (ctx->opcode >> 6) & 0x1;
+ int ra = (ctx->opcode >> 5) & 0x1;
+
+ if (nd) {
+ check_insn(ctx, ISA_MIPS32);
+ }
+
+ if (link) {
+ op = OPC_JALR;
+ } else {
+ op = OPC_JR;
+ }
+
+ gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0,
+ (nd ? 0 : 2));
+ }
+ break;
+ case RR_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 5, 6))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ /* XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(ctx, ISA_MIPS32);
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ break;
+ case RR_SLT:
+ gen_slt(ctx, OPC_SLT, 24, rx, ry);
+ break;
+ case RR_SLTU:
+ gen_slt(ctx, OPC_SLTU, 24, rx, ry);
+ break;
+ case RR_BREAK:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case RR_SLLV:
+ gen_shift(ctx, OPC_SLLV, ry, rx, ry);
+ break;
+ case RR_SRLV:
+ gen_shift(ctx, OPC_SRLV, ry, rx, ry);
+ break;
+ case RR_SRAV:
+ gen_shift(ctx, OPC_SRAV, ry, rx, ry);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DSRL:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSRL, ry, ry, sa);
+ break;
+#endif
+ case RR_CMP:
+ gen_logic(ctx, OPC_XOR, 24, rx, ry);
+ break;
+ case RR_NEG:
+ gen_arith(ctx, OPC_SUBU, rx, 0, ry);
+ break;
+ case RR_AND:
+ gen_logic(ctx, OPC_AND, rx, rx, ry);
+ break;
+ case RR_OR:
+ gen_logic(ctx, OPC_OR, rx, rx, ry);
+ break;
+ case RR_XOR:
+ gen_logic(ctx, OPC_XOR, rx, rx, ry);
+ break;
+ case RR_NOT:
+ gen_logic(ctx, OPC_NOR, rx, ry, 0);
+ break;
+ case RR_MFHI:
+ gen_HILO(ctx, OPC_MFHI, 0, rx);
+ break;
+ case RR_CNVT:
+ check_insn(ctx, ISA_MIPS32);
+ switch (cnvt_op) {
+ case RR_RY_CNVT_ZEB:
+ tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_ZEH:
+ tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEB:
+ tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEH:
+ tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_RY_CNVT_ZEW:
+ check_insn(ctx, ISA_MIPS64);
+ check_mips_64(ctx);
+ tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+ case RR_RY_CNVT_SEW:
+ check_insn(ctx, ISA_MIPS64);
+ check_mips_64(ctx);
+ tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
+ break;
+#endif
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case RR_MFLO:
+ gen_HILO(ctx, OPC_MFLO, 0, rx);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DSRA:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, OPC_DSRA, ry, ry, sa);
+ break;
+ case RR_DSLLV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, OPC_DSLLV, ry, rx, ry);
+ break;
+ case RR_DSRLV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, OPC_DSRLV, ry, rx, ry);
+ break;
+ case RR_DSRAV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, OPC_DSRAV, ry, rx, ry);
+ break;
+#endif
+ case RR_MULT:
+ gen_muldiv(ctx, OPC_MULT, 0, rx, ry);
+ break;
+ case RR_MULTU:
+ gen_muldiv(ctx, OPC_MULTU, 0, rx, ry);
+ break;
+ case RR_DIV:
+ gen_muldiv(ctx, OPC_DIV, 0, rx, ry);
+ break;
+ case RR_DIVU:
+ gen_muldiv(ctx, OPC_DIVU, 0, rx, ry);
+ break;
+#if defined (TARGET_MIPS64)
+ case RR_DMULT:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULT, 0, rx, ry);
+ break;
+ case RR_DMULTU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DMULTU, 0, rx, ry);
+ break;
+ case RR_DDIV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIV, 0, rx, ry);
+ break;
+ case RR_DDIVU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, OPC_DDIVU, 0, rx, ry);
+ break;
+#endif
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case M16_OPC_EXTEND:
+ decode_extended_mips16_opc(env, ctx);
+ n_bytes = 4;
+ break;
+#if defined(TARGET_MIPS64)
+ case M16_OPC_I64:
+ funct = (ctx->opcode >> 8) & 0x7;
+ decode_i64_mips16(ctx, ry, funct, offset, 0);
+ break;
+#endif
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ return n_bytes;
+}
+
+/* microMIPS extension to MIPS32/MIPS64 */
+
+/*
+ * microMIPS32/microMIPS64 major opcodes
+ *
+ * 1. MIPS Architecture for Programmers Volume II-B:
+ * The microMIPS32 Instruction Set (Revision 3.05)
+ *
+ * Table 6.2 microMIPS32 Encoding of Major Opcode Field
+ *
+ * 2. MIPS Architecture For Programmers Volume II-A:
+ * The MIPS64 Instruction Set (Revision 3.51)
+ */
+
+enum {
+ POOL32A = 0x00,
+ POOL16A = 0x01,
+ LBU16 = 0x02,
+ MOVE16 = 0x03,
+ ADDI32 = 0x04,
+ R6_LUI = 0x04,
+ AUI = 0x04,
+ LBU32 = 0x05,
+ SB32 = 0x06,
+ LB32 = 0x07,
+
+ POOL32B = 0x08,
+ POOL16B = 0x09,
+ LHU16 = 0x0a,
+ ANDI16 = 0x0b,
+ ADDIU32 = 0x0c,
+ LHU32 = 0x0d,
+ SH32 = 0x0e,
+ LH32 = 0x0f,
+
+ POOL32I = 0x10,
+ POOL16C = 0x11,
+ LWSP16 = 0x12,
+ POOL16D = 0x13,
+ ORI32 = 0x14,
+ POOL32F = 0x15,
+ POOL32S = 0x16, /* MIPS64 */
+ DADDIU32 = 0x17, /* MIPS64 */
+
+ POOL32C = 0x18,
+ LWGP16 = 0x19,
+ LW16 = 0x1a,
+ POOL16E = 0x1b,
+ XORI32 = 0x1c,
+ JALS32 = 0x1d,
+ BOVC = 0x1d,
+ BEQC = 0x1d,
+ BEQZALC = 0x1d,
+ ADDIUPC = 0x1e,
+ PCREL = 0x1e,
+ BNVC = 0x1f,
+ BNEC = 0x1f,
+ BNEZALC = 0x1f,
+
+ R6_BEQZC = 0x20,
+ JIC = 0x20,
+ POOL16F = 0x21,
+ SB16 = 0x22,
+ BEQZ16 = 0x23,
+ BEQZC16 = 0x23,
+ SLTI32 = 0x24,
+ BEQ32 = 0x25,
+ BC = 0x25,
+ SWC132 = 0x26,
+ LWC132 = 0x27,
+
+ /* 0x29 is reserved */
+ RES_29 = 0x29,
+ R6_BNEZC = 0x28,
+ JIALC = 0x28,
+ SH16 = 0x2a,
+ BNEZ16 = 0x2b,
+ BNEZC16 = 0x2b,
+ SLTIU32 = 0x2c,
+ BNE32 = 0x2d,
+ BALC = 0x2d,
+ SDC132 = 0x2e,
+ LDC132 = 0x2f,
+
+ /* 0x31 is reserved */
+ RES_31 = 0x31,
+ BLEZALC = 0x30,
+ BGEZALC = 0x30,
+ BGEUC = 0x30,
+ SWSP16 = 0x32,
+ B16 = 0x33,
+ BC16 = 0x33,
+ ANDI32 = 0x34,
+ J32 = 0x35,
+ BGTZC = 0x35,
+ BLTZC = 0x35,
+ BLTC = 0x35,
+ SD32 = 0x36, /* MIPS64 */
+ LD32 = 0x37, /* MIPS64 */
+
+ /* 0x39 is reserved */
+ RES_39 = 0x39,
+ BGTZALC = 0x38,
+ BLTZALC = 0x38,
+ BLTUC = 0x38,
+ SW16 = 0x3a,
+ LI16 = 0x3b,
+ JALX32 = 0x3c,
+ JAL32 = 0x3d,
+ BLEZC = 0x3d,
+ BGEZC = 0x3d,
+ BGEC = 0x3d,
+ SW32 = 0x3e,
+ LW32 = 0x3f
+};
+
+/* PCREL Instructions perform PC-Relative address calculation. bits 20..16 */
+enum {
+ ADDIUPC_00 = 0x00,
+ ADDIUPC_07 = 0x07,
+ AUIPC = 0x1e,
+ ALUIPC = 0x1f,
+ LWPC_08 = 0x08,
+ LWPC_0F = 0x0F,
+};
+
+/* POOL32A encoding of minor opcode field */
+
+enum {
+ /* These opcodes are distinguished only by bits 9..6; those bits are
+ * what are recorded below. */
+ SLL32 = 0x0,
+ SRL32 = 0x1,
+ SRA = 0x2,
+ ROTR = 0x3,
+ SELEQZ = 0x5,
+ SELNEZ = 0x6,
+ R6_RDHWR = 0x7,
+
+ SLLV = 0x0,
+ SRLV = 0x1,
+ SRAV = 0x2,
+ ROTRV = 0x3,
+ ADD = 0x4,
+ ADDU32 = 0x5,
+ SUB = 0x6,
+ SUBU32 = 0x7,
+ MUL = 0x8,
+ AND = 0x9,
+ OR32 = 0xa,
+ NOR = 0xb,
+ XOR32 = 0xc,
+ SLT = 0xd,
+ SLTU = 0xe,
+
+ MOVN = 0x0,
+ R6_MUL = 0x0,
+ MOVZ = 0x1,
+ MUH = 0x1,
+ MULU = 0x2,
+ MUHU = 0x3,
+ LWXS = 0x4,
+ R6_DIV = 0x4,
+ MOD = 0x5,
+ R6_DIVU = 0x6,
+ MODU = 0x7,
+
+ /* The following can be distinguished by their lower 6 bits. */
+ BREAK32 = 0x07,
+ INS = 0x0c,
+ LSA = 0x0f,
+ ALIGN = 0x1f,
+ EXT = 0x2c,
+ POOL32AXF = 0x3c,
+ SIGRIE = 0x3f
+};
+
+/* POOL32AXF encoding of minor opcode field extension */
+
+/*
+ * 1. MIPS Architecture for Programmers Volume II-B:
+ * The microMIPS32 Instruction Set (Revision 3.05)
+ *
+ * Table 6.5 POOL32Axf Encoding of Minor Opcode Extension Field
+ *
+ * 2. MIPS Architecture for Programmers VolumeIV-e:
+ * The MIPS DSP Application-Specific Extension
+ * to the microMIPS32 Architecture (Revision 2.34)
+ *
+ * Table 5.5 POOL32Axf Encoding of Minor Opcode Extension Field
+ */
+
+enum {
+ /* bits 11..6 */
+ TEQ = 0x00,
+ TGE = 0x08,
+ TGEU = 0x10,
+ TLT = 0x20,
+ TLTU = 0x28,
+ TNE = 0x30,
+
+ MFC0 = 0x03,
+ MTC0 = 0x0b,
+
+ /* begin of microMIPS32 DSP */
+
+ /* bits 13..12 for 0x01 */
+ MFHI_ACC = 0x0,
+ MFLO_ACC = 0x1,
+ MTHI_ACC = 0x2,
+ MTLO_ACC = 0x3,
+
+ /* bits 13..12 for 0x2a */
+ MADD_ACC = 0x0,
+ MADDU_ACC = 0x1,
+ MSUB_ACC = 0x2,
+ MSUBU_ACC = 0x3,
+
+ /* bits 13..12 for 0x32 */
+ MULT_ACC = 0x0,
+ MULTU_ACC = 0x1,
+
+ /* end of microMIPS32 DSP */
+
+ /* bits 15..12 for 0x2c */
+ BITSWAP = 0x0,
+ SEB = 0x2,
+ SEH = 0x3,
+ CLO = 0x4,
+ CLZ = 0x5,
+ RDHWR = 0x6,
+ WSBH = 0x7,
+ MULT = 0x8,
+ MULTU = 0x9,
+ DIV = 0xa,
+ DIVU = 0xb,
+ MADD = 0xc,
+ MADDU = 0xd,
+ MSUB = 0xe,
+ MSUBU = 0xf,
+
+ /* bits 15..12 for 0x34 */
+ MFC2 = 0x4,
+ MTC2 = 0x5,
+ MFHC2 = 0x8,
+ MTHC2 = 0x9,
+ CFC2 = 0xc,
+ CTC2 = 0xd,
+
+ /* bits 15..12 for 0x3c */
+ JALR = 0x0,
+ JR = 0x0, /* alias */
+ JALRC = 0x0,
+ JRC = 0x0,
+ JALR_HB = 0x1,
+ JALRC_HB = 0x1,
+ JALRS = 0x4,
+ JALRS_HB = 0x5,
+
+ /* bits 15..12 for 0x05 */
+ RDPGPR = 0xe,
+ WRPGPR = 0xf,
+
+ /* bits 15..12 for 0x0d */
+ TLBP = 0x0,
+ TLBR = 0x1,
+ TLBWI = 0x2,
+ TLBWR = 0x3,
+ TLBINV = 0x4,
+ TLBINVF = 0x5,
+ WAIT = 0x9,
+ IRET = 0xd,
+ DERET = 0xe,
+ ERET = 0xf,
+
+ /* bits 15..12 for 0x15 */
+ DMT = 0x0,
+ DVPE = 0x1,
+ EMT = 0x2,
+ EVPE = 0x3,
+
+ /* bits 15..12 for 0x1d */
+ DI = 0x4,
+ EI = 0x5,
+
+ /* bits 15..12 for 0x2d */
+ SYNC = 0x6,
+ SYSCALL = 0x8,
+ SDBBP = 0xd,
+
+ /* bits 15..12 for 0x35 */
+ MFHI32 = 0x0,
+ MFLO32 = 0x1,
+ MTHI32 = 0x2,
+ MTLO32 = 0x3,
+};
+
+/* POOL32B encoding of minor opcode field (bits 15..12) */
+
+enum {
+ LWC2 = 0x0,
+ LWP = 0x1,
+ LDP = 0x4,
+ LWM32 = 0x5,
+ CACHE = 0x6,
+ LDM = 0x7,
+ SWC2 = 0x8,
+ SWP = 0x9,
+ SDP = 0xc,
+ SWM32 = 0xd,
+ SDM = 0xf
+};
+
+/* POOL32C encoding of minor opcode field (bits 15..12) */
+
+enum {
+ LWL = 0x0,
+ SWL = 0x8,
+ LWR = 0x1,
+ SWR = 0x9,
+ PREF = 0x2,
+ /* 0xa is reserved */
+ LL = 0x3,
+ SC = 0xb,
+ LDL = 0x4,
+ SDL = 0xc,
+ LDR = 0x5,
+ SDR = 0xd,
+ /* 0x6 is reserved */
+ LWU = 0xe,
+ LLD = 0x7,
+ SCD = 0xf
+};
+
+/* POOL32F encoding of minor opcode field (bits 5..0) */
+
+enum {
+ /* These are the bit 7..6 values */
+ ADD_FMT = 0x0,
+
+ SUB_FMT = 0x1,
+
+ MUL_FMT = 0x2,
+
+ DIV_FMT = 0x3,
+
+ /* These are the bit 8..6 values */
+ MOVN_FMT = 0x0,
+ RSQRT2_FMT = 0x0,
+ MOVF_FMT = 0x0,
+ RINT_FMT = 0x0,
+ SELNEZ_FMT = 0x0,
+
+ MOVZ_FMT = 0x1,
+ LWXC1 = 0x1,
+ MOVT_FMT = 0x1,
+ CLASS_FMT = 0x1,
+ SELEQZ_FMT = 0x1,
+
+ PLL_PS = 0x2,
+ SWXC1 = 0x2,
+ SEL_FMT = 0x2,
+
+ PLU_PS = 0x3,
+ LDXC1 = 0x3,
+
+ MOVN_FMT_04 = 0x4,
+ PUL_PS = 0x4,
+ SDXC1 = 0x4,
+ RECIP2_FMT = 0x4,
+
+ MOVZ_FMT_05 = 0x05,
+ PUU_PS = 0x5,
+ LUXC1 = 0x5,
+
+ CVT_PS_S = 0x6,
+ SUXC1 = 0x6,
+ ADDR_PS = 0x6,
+ PREFX = 0x6,
+ MADDF_FMT = 0x6,
+
+ MULR_PS = 0x7,
+ MSUBF_FMT = 0x7,
+
+ MADD_S = 0x01,
+ MADD_D = 0x09,
+ MADD_PS = 0x11,
+ ALNV_PS = 0x19,
+ MSUB_S = 0x21,
+ MSUB_D = 0x29,
+ MSUB_PS = 0x31,
+
+ NMADD_S = 0x02,
+ NMADD_D = 0x0a,
+ NMADD_PS = 0x12,
+ NMSUB_S = 0x22,
+ NMSUB_D = 0x2a,
+ NMSUB_PS = 0x32,
+
+ MIN_FMT = 0x3,
+ MAX_FMT = 0xb,
+ MINA_FMT = 0x23,
+ MAXA_FMT = 0x2b,
+ POOL32FXF = 0x3b,
+
+ CABS_COND_FMT = 0x1c, /* MIPS3D */
+ C_COND_FMT = 0x3c,
+
+ CMP_CONDN_S = 0x5,
+ CMP_CONDN_D = 0x15
+};
+
+/* POOL32Fxf encoding of minor opcode extension field */
+
+enum {
+ CVT_L = 0x04,
+ RSQRT_FMT = 0x08,
+ FLOOR_L = 0x0c,
+ CVT_PW_PS = 0x1c,
+ CVT_W = 0x24,
+ SQRT_FMT = 0x28,
+ FLOOR_W = 0x2c,
+ CVT_PS_PW = 0x3c,
+ CFC1 = 0x40,
+ RECIP_FMT = 0x48,
+ CEIL_L = 0x4c,
+ CTC1 = 0x60,
+ CEIL_W = 0x6c,
+ MFC1 = 0x80,
+ CVT_S_PL = 0x84,
+ TRUNC_L = 0x8c,
+ MTC1 = 0xa0,
+ CVT_S_PU = 0xa4,
+ TRUNC_W = 0xac,
+ MFHC1 = 0xc0,
+ ROUND_L = 0xcc,
+ MTHC1 = 0xe0,
+ ROUND_W = 0xec,
+
+ MOV_FMT = 0x01,
+ MOVF = 0x05,
+ ABS_FMT = 0x0d,
+ RSQRT1_FMT = 0x1d,
+ MOVT = 0x25,
+ NEG_FMT = 0x2d,
+ CVT_D = 0x4d,
+ RECIP1_FMT = 0x5d,
+ CVT_S = 0x6d
+};
+
+/* POOL32I encoding of minor opcode field (bits 25..21) */
+
+enum {
+ BLTZ = 0x00,
+ BLTZAL = 0x01,
+ BGEZ = 0x02,
+ BGEZAL = 0x03,
+ BLEZ = 0x04,
+ BNEZC = 0x05,
+ BGTZ = 0x06,
+ BEQZC = 0x07,
+ TLTI = 0x08,
+ BC1EQZC = 0x08,
+ TGEI = 0x09,
+ BC1NEZC = 0x09,
+ TLTIU = 0x0a,
+ BC2EQZC = 0x0a,
+ TGEIU = 0x0b,
+ BC2NEZC = 0x0a,
+ TNEI = 0x0c,
+ R6_SYNCI = 0x0c,
+ LUI = 0x0d,
+ TEQI = 0x0e,
+ SYNCI = 0x10,
+ BLTZALS = 0x11,
+ BGEZALS = 0x13,
+ BC2F = 0x14,
+ BC2T = 0x15,
+ BPOSGE64 = 0x1a,
+ BPOSGE32 = 0x1b,
+ /* These overlap and are distinguished by bit16 of the instruction */
+ BC1F = 0x1c,
+ BC1T = 0x1d,
+ BC1ANY2F = 0x1c,
+ BC1ANY2T = 0x1d,
+ BC1ANY4F = 0x1e,
+ BC1ANY4T = 0x1f
+};
+
+/* POOL16A encoding of minor opcode field */
+
+enum {
+ ADDU16 = 0x0,
+ SUBU16 = 0x1
+};
+
+/* POOL16B encoding of minor opcode field */
+
+enum {
+ SLL16 = 0x0,
+ SRL16 = 0x1
+};
+
+/* POOL16C encoding of minor opcode field */
+
+enum {
+ NOT16 = 0x00,
+ XOR16 = 0x04,
+ AND16 = 0x08,
+ OR16 = 0x0c,
+ LWM16 = 0x10,
+ SWM16 = 0x14,
+ JR16 = 0x18,
+ JRC16 = 0x1a,
+ JALR16 = 0x1c,
+ JALR16S = 0x1e,
+ MFHI16 = 0x20,
+ MFLO16 = 0x24,
+ BREAK16 = 0x28,
+ SDBBP16 = 0x2c,
+ JRADDIUSP = 0x30
+};
+
+/* R6 POOL16C encoding of minor opcode field (bits 0..5) */
+
+enum {
+ R6_NOT16 = 0x00,
+ R6_AND16 = 0x01,
+ R6_LWM16 = 0x02,
+ R6_JRC16 = 0x03,
+ MOVEP = 0x04,
+ MOVEP_07 = 0x07,
+ R6_XOR16 = 0x08,
+ R6_OR16 = 0x09,
+ R6_SWM16 = 0x0a,
+ JALRC16 = 0x0b,
+ MOVEP_0C = 0x0c,
+ MOVEP_0F = 0x0f,
+ JRCADDIUSP = 0x13,
+ R6_BREAK16 = 0x1b,
+ R6_SDBBP16 = 0x3b
+};
+
+/* POOL16D encoding of minor opcode field */
+
+enum {
+ ADDIUS5 = 0x0,
+ ADDIUSP = 0x1
+};
+
+/* POOL16E encoding of minor opcode field */
+
+enum {
+ ADDIUR2 = 0x0,
+ ADDIUR1SP = 0x1
+};
+
+static int mmreg (int r)
+{
+ static const int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+/* Used for 16-bit store instructions. */
+static int mmreg2 (int r)
+{
+ static const int map[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+ return map[r];
+}
+
+#define uMIPS_RD(op) ((op >> 7) & 0x7)
+#define uMIPS_RS(op) ((op >> 4) & 0x7)
+#define uMIPS_RS2(op) uMIPS_RS(op)
+#define uMIPS_RS1(op) ((op >> 1) & 0x7)
+#define uMIPS_RD5(op) ((op >> 5) & 0x1f)
+#define uMIPS_RS5(op) (op & 0x1f)
+
+/* Signed immediate */
+#define SIMM(op, start, width) \
+ ((int32_t)(((op >> start) & ((~0U) >> (32-width))) \
+ << (32-width)) \
+ >> (32-width))
+/* Zero-extended immediate */
+#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32-width)))
+
+static void gen_addiur1sp(DisasContext *ctx)
+{
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+
+ gen_arith_imm(ctx, OPC_ADDIU, rd, 29, ((ctx->opcode >> 1) & 0x3f) << 2);
+}
+
+static void gen_addiur2(DisasContext *ctx)
+{
+ static const int decoded_imm[] = { 1, 4, 8, 12, 16, 20, 24, -1 };
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs = mmreg(uMIPS_RS(ctx->opcode));
+
+ gen_arith_imm(ctx, OPC_ADDIU, rd, rs, decoded_imm[ZIMM(ctx->opcode, 1, 3)]);
+}
+
+static void gen_addiusp(DisasContext *ctx)
+{
+ int encoded = ZIMM(ctx->opcode, 1, 9);
+ int decoded;
+
+ if (encoded <= 1) {
+ decoded = 256 + encoded;
+ } else if (encoded <= 255) {
+ decoded = encoded;
+ } else if (encoded <= 509) {
+ decoded = encoded - 512;
+ } else {
+ decoded = encoded - 768;
+ }
+
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, decoded << 2);
+}
+
+static void gen_addius5(DisasContext *ctx)
+{
+ int imm = SIMM(ctx->opcode, 1, 4);
+ int rd = (ctx->opcode >> 5) & 0x1f;
+
+ gen_arith_imm(ctx, OPC_ADDIU, rd, rd, imm);
+}
+
+static void gen_andi16(DisasContext *ctx)
+{
+ static const int decoded_imm[] = { 128, 1, 2, 3, 4, 7, 8, 15, 16,
+ 31, 32, 63, 64, 255, 32768, 65535 };
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs = mmreg(uMIPS_RS(ctx->opcode));
+ int encoded = ZIMM(ctx->opcode, 0, 4);
+
+ gen_logic_imm(ctx, OPC_ANDI, rd, rs, decoded_imm[encoded]);
+}
+
+static void gen_ldst_multiple (DisasContext *ctx, uint32_t opc, int reglist,
+ int base, int16_t offset)
+{
+ TCGv t0, t1;
+ TCGv_i32 t2;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK) {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ t1 = tcg_const_tl(reglist);
+ t2 = tcg_const_i32(ctx->mem_idx);
+
+ save_cpu_state(ctx, 1);
+ switch (opc) {
+ case LWM32:
+ gen_helper_lwm(cpu_env, t0, t1, t2);
+ break;
+ case SWM32:
+ gen_helper_swm(cpu_env, t0, t1, t2);
+ break;
+#ifdef TARGET_MIPS64
+ case LDM:
+ gen_helper_ldm(cpu_env, t0, t1, t2);
+ break;
+ case SDM:
+ gen_helper_sdm(cpu_env, t0, t1, t2);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free_i32(t2);
+}
+
+
+static void gen_pool16c_insn(DisasContext *ctx)
+{
+ int rd = mmreg((ctx->opcode >> 3) & 0x7);
+ int rs = mmreg(ctx->opcode & 0x7);
+
+ switch (((ctx->opcode) >> 4) & 0x3f) {
+ case NOT16 + 0:
+ case NOT16 + 1:
+ case NOT16 + 2:
+ case NOT16 + 3:
+ gen_logic(ctx, OPC_NOR, rd, rs, 0);
+ break;
+ case XOR16 + 0:
+ case XOR16 + 1:
+ case XOR16 + 2:
+ case XOR16 + 3:
+ gen_logic(ctx, OPC_XOR, rd, rd, rs);
+ break;
+ case AND16 + 0:
+ case AND16 + 1:
+ case AND16 + 2:
+ case AND16 + 3:
+ gen_logic(ctx, OPC_AND, rd, rd, rs);
+ break;
+ case OR16 + 0:
+ case OR16 + 1:
+ case OR16 + 2:
+ case OR16 + 3:
+ gen_logic(ctx, OPC_OR, rd, rd, rs);
+ break;
+ case LWM16 + 0:
+ case LWM16 + 1:
+ case LWM16 + 2:
+ case LWM16 + 3:
+ {
+ static const int lwm_convert[] = { 0x11, 0x12, 0x13, 0x14 };
+ int offset = ZIMM(ctx->opcode, 0, 4);
+
+ gen_ldst_multiple(ctx, LWM32, lwm_convert[(ctx->opcode >> 4) & 0x3],
+ 29, offset << 2);
+ }
+ break;
+ case SWM16 + 0:
+ case SWM16 + 1:
+ case SWM16 + 2:
+ case SWM16 + 3:
+ {
+ static const int swm_convert[] = { 0x11, 0x12, 0x13, 0x14 };
+ int offset = ZIMM(ctx->opcode, 0, 4);
+
+ gen_ldst_multiple(ctx, SWM32, swm_convert[(ctx->opcode >> 4) & 0x3],
+ 29, offset << 2);
+ }
+ break;
+ case JR16 + 0:
+ case JR16 + 1:
+ {
+ int reg = ctx->opcode & 0x1f;
+
+ gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 4);
+ }
+ break;
+ case JRC16 + 0:
+ case JRC16 + 1:
+ {
+ int reg = ctx->opcode & 0x1f;
+ gen_compute_branch(ctx, OPC_JR, 2, reg, 0, 0, 0);
+ /* Let normal delay slot handling in our caller take us
+ to the branch target. */
+ }
+ break;
+ case JALR16 + 0:
+ case JALR16 + 1:
+ gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case JALR16S + 0:
+ case JALR16S + 1:
+ gen_compute_branch(ctx, OPC_JALR, 2, ctx->opcode & 0x1f, 31, 0, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case MFHI16 + 0:
+ case MFHI16 + 1:
+ gen_HILO(ctx, OPC_MFHI, 0, uMIPS_RS5(ctx->opcode));
+ break;
+ case MFLO16 + 0:
+ case MFLO16 + 1:
+ gen_HILO(ctx, OPC_MFLO, 0, uMIPS_RS5(ctx->opcode));
+ break;
+ case BREAK16:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case SDBBP16:
+ if (is_uhi(extract32(ctx->opcode, 0, 4))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ /* XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(ctx, ISA_MIPS32);
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ break;
+ case JRADDIUSP + 0:
+ case JRADDIUSP + 1:
+ {
+ int imm = ZIMM(ctx->opcode, 0, 5);
+ gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0);
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2);
+ /* Let normal delay slot handling in our caller take us
+ to the branch target. */
+ }
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static inline void gen_movep(DisasContext *ctx, int enc_dest, int enc_rt,
+ int enc_rs)
+{
+ int rd, rs, re, rt;
+ static const int rd_enc[] = { 5, 5, 6, 4, 4, 4, 4, 4 };
+ static const int re_enc[] = { 6, 7, 7, 21, 22, 5, 6, 7 };
+ static const int rs_rt_enc[] = { 0, 17, 2, 3, 16, 18, 19, 20 };
+ rd = rd_enc[enc_dest];
+ re = re_enc[enc_dest];
+ rs = rs_rt_enc[enc_rs];
+ rt = rs_rt_enc[enc_rt];
+ if (rs) {
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
+ }
+ if (rt) {
+ tcg_gen_mov_tl(cpu_gpr[re], cpu_gpr[rt]);
+ } else {
+ tcg_gen_movi_tl(cpu_gpr[re], 0);
+ }
+}
+
+static void gen_pool16c_r6_insn(DisasContext *ctx)
+{
+ int rt = mmreg((ctx->opcode >> 7) & 0x7);
+ int rs = mmreg((ctx->opcode >> 4) & 0x7);
+
+ switch (ctx->opcode & 0xf) {
+ case R6_NOT16:
+ gen_logic(ctx, OPC_NOR, rt, rs, 0);
+ break;
+ case R6_AND16:
+ gen_logic(ctx, OPC_AND, rt, rt, rs);
+ break;
+ case R6_LWM16:
+ {
+ int lwm_converted = 0x11 + extract32(ctx->opcode, 8, 2);
+ int offset = extract32(ctx->opcode, 4, 4);
+ gen_ldst_multiple(ctx, LWM32, lwm_converted, 29, offset << 2);
+ }
+ break;
+ case R6_JRC16: /* JRCADDIUSP */
+ if ((ctx->opcode >> 4) & 1) {
+ /* JRCADDIUSP */
+ int imm = extract32(ctx->opcode, 5, 5);
+ gen_compute_branch(ctx, OPC_JR, 2, 31, 0, 0, 0);
+ gen_arith_imm(ctx, OPC_ADDIU, 29, 29, imm << 2);
+ } else {
+ /* JRC16 */
+ int rs = extract32(ctx->opcode, 5, 5);
+ gen_compute_branch(ctx, OPC_JR, 2, rs, 0, 0, 0);
+ }
+ break;
+ case MOVEP ... MOVEP_07:
+ case MOVEP_0C ... MOVEP_0F:
+ {
+ int enc_dest = uMIPS_RD(ctx->opcode);
+ int enc_rt = uMIPS_RS2(ctx->opcode);
+ int enc_rs = (ctx->opcode & 3) | ((ctx->opcode >> 1) & 4);
+ gen_movep(ctx, enc_dest, enc_rt, enc_rs);
+ }
+ break;
+ case R6_XOR16:
+ gen_logic(ctx, OPC_XOR, rt, rt, rs);
+ break;
+ case R6_OR16:
+ gen_logic(ctx, OPC_OR, rt, rt, rs);
+ break;
+ case R6_SWM16:
+ {
+ int swm_converted = 0x11 + extract32(ctx->opcode, 8, 2);
+ int offset = extract32(ctx->opcode, 4, 4);
+ gen_ldst_multiple(ctx, SWM32, swm_converted, 29, offset << 2);
+ }
+ break;
+ case JALRC16: /* BREAK16, SDBBP16 */
+ switch (ctx->opcode & 0x3f) {
+ case JALRC16:
+ case JALRC16 + 0x20:
+ /* JALRC16 */
+ gen_compute_branch(ctx, OPC_JALR, 2, (ctx->opcode >> 5) & 0x1f,
+ 31, 0, 0);
+ break;
+ case R6_BREAK16:
+ /* BREAK16 */
+ generate_exception(ctx, EXCP_BREAK);
+ break;
+ case R6_SDBBP16:
+ /* SDBBP16 */
+ if (is_uhi(extract32(ctx->opcode, 6, 4))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ generate_exception(ctx, EXCP_RI);
+ } else {
+ generate_exception(ctx, EXCP_DBp);
+ }
+ }
+ break;
+ }
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void gen_ldxs (DisasContext *ctx, int base, int index, int rd)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, base);
+
+ if (index != 0) {
+ gen_load_gpr(t1, index);
+ tcg_gen_shli_tl(t1, t1, 2);
+ gen_op_addr_add(ctx, t0, t1, t0);
+ }
+
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t1, rd);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_ldst_pair (DisasContext *ctx, uint32_t opc, int rd,
+ int base, int16_t offset)
+{
+ TCGv t0, t1;
+
+ if (ctx->hflags & MIPS_HFLAG_BMASK || rd == 31) {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_base_offset_addr(ctx, t0, base, offset);
+
+ switch (opc) {
+ case LWP:
+ if (rd == base) {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t1, rd);
+ tcg_gen_movi_tl(t1, 4);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t1, rd+1);
+ break;
+ case SWP:
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ tcg_gen_movi_tl(t1, 4);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ gen_load_gpr(t1, rd+1);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEUL);
+ break;
+#ifdef TARGET_MIPS64
+ case LDP:
+ if (rd == base) {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t1, rd);
+ tcg_gen_movi_tl(t1, 8);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ tcg_gen_qemu_ld_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t1, rd+1);
+ break;
+ case SDP:
+ gen_load_gpr(t1, rd);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ tcg_gen_movi_tl(t1, 8);
+ gen_op_addr_add(ctx, t0, t0, t1);
+ gen_load_gpr(t1, rd+1);
+ tcg_gen_qemu_st_tl(t1, t0, ctx->mem_idx, MO_TEQ);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void gen_sync(int stype)
+{
+ TCGBar tcg_mo = TCG_BAR_SC;
+
+ switch (stype) {
+ case 0x4: /* SYNC_WMB */
+ tcg_mo |= TCG_MO_ST_ST;
+ break;
+ case 0x10: /* SYNC_MB */
+ tcg_mo |= TCG_MO_ALL;
+ break;
+ case 0x11: /* SYNC_ACQUIRE */
+ tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST;
+ break;
+ case 0x12: /* SYNC_RELEASE */
+ tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST;
+ break;
+ case 0x13: /* SYNC_RMB */
+ tcg_mo |= TCG_MO_LD_LD;
+ break;
+ default:
+ tcg_mo |= TCG_MO_ALL;
+ break;
+ }
+
+ tcg_gen_mb(tcg_mo);
+}
+
+static void gen_pool32axf (CPUMIPSState *env, DisasContext *ctx, int rt, int rs)
+{
+ int extension = (ctx->opcode >> 6) & 0x3f;
+ int minor = (ctx->opcode >> 12) & 0xf;
+ uint32_t mips32_op;
+
+ switch (extension) {
+ case TEQ:
+ mips32_op = OPC_TEQ;
+ goto do_trap;
+ case TGE:
+ mips32_op = OPC_TGE;
+ goto do_trap;
+ case TGEU:
+ mips32_op = OPC_TGEU;
+ goto do_trap;
+ case TLT:
+ mips32_op = OPC_TLT;
+ goto do_trap;
+ case TLTU:
+ mips32_op = OPC_TLTU;
+ goto do_trap;
+ case TNE:
+ mips32_op = OPC_TNE;
+ do_trap:
+ gen_trap(ctx, mips32_op, rs, rt, -1);
+ break;
+#ifndef CONFIG_USER_ONLY
+ case MFC0:
+ case MFC0 + 32:
+ check_cp0_enabled(ctx);
+ if (rt == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ gen_mfc0(ctx, cpu_gpr[rt], rs, (ctx->opcode >> 11) & 0x7);
+ break;
+ case MTC0:
+ case MTC0 + 32:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_mtc0(ctx, t0, rs, (ctx->opcode >> 11) & 0x7);
+ tcg_temp_free(t0);
+ }
+ break;
+#endif
+ case 0x2a:
+ switch (minor & 3) {
+ case MADD_ACC:
+ gen_muldiv(ctx, OPC_MADD, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MADDU_ACC:
+ gen_muldiv(ctx, OPC_MADDU, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MSUB_ACC:
+ gen_muldiv(ctx, OPC_MSUB, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MSUBU_ACC:
+ gen_muldiv(ctx, OPC_MSUBU, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x32:
+ switch (minor & 3) {
+ case MULT_ACC:
+ gen_muldiv(ctx, OPC_MULT, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ case MULTU_ACC:
+ gen_muldiv(ctx, OPC_MULTU, (ctx->opcode >> 14) & 3, rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x2c:
+ switch (minor) {
+ case BITSWAP:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_bitswap(ctx, OPC_BITSWAP, rs, rt);
+ break;
+ case SEB:
+ gen_bshfl(ctx, OPC_SEB, rs, rt);
+ break;
+ case SEH:
+ gen_bshfl(ctx, OPC_SEH, rs, rt);
+ break;
+ case CLO:
+ mips32_op = OPC_CLO;
+ goto do_cl;
+ case CLZ:
+ mips32_op = OPC_CLZ;
+ do_cl:
+ check_insn(ctx, ISA_MIPS32);
+ gen_cl(ctx, mips32_op, rt, rs);
+ break;
+ case RDHWR:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_rdhwr(ctx, rt, rs, 0);
+ break;
+ case WSBH:
+ gen_bshfl(ctx, OPC_WSBH, rs, rt);
+ break;
+ case MULT:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MULT;
+ goto do_mul;
+ case MULTU:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MULTU;
+ goto do_mul;
+ case DIV:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_DIV;
+ goto do_div;
+ case DIVU:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_DIVU;
+ goto do_div;
+ do_div:
+ check_insn(ctx, ISA_MIPS32);
+ gen_muldiv(ctx, mips32_op, 0, rs, rt);
+ break;
+ case MADD:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MADD;
+ goto do_mul;
+ case MADDU:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MADDU;
+ goto do_mul;
+ case MSUB:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MSUB;
+ goto do_mul;
+ case MSUBU:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MSUBU;
+ do_mul:
+ check_insn(ctx, ISA_MIPS32);
+ gen_muldiv(ctx, mips32_op, 0, rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x34:
+ switch (minor) {
+ case MFC2:
+ case MTC2:
+ case MFHC2:
+ case MTHC2:
+ case CFC2:
+ case CTC2:
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x3c:
+ switch (minor) {
+ case JALR: /* JALRC */
+ case JALR_HB: /* JALRC_HB */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* JALRC, JALRC_HB */
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 0);
+ } else {
+ /* JALR, JALR_HB */
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ }
+ break;
+ case JALRS:
+ case JALRS_HB:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_JALR, 4, rs, rt, 0, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x05:
+ switch (minor) {
+ case RDPGPR:
+ check_cp0_enabled(ctx);
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_load_srsgpr(rs, rt);
+ break;
+ case WRPGPR:
+ check_cp0_enabled(ctx);
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_store_srsgpr(rs, rt);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+#ifndef CONFIG_USER_ONLY
+ case 0x0d:
+ switch (minor) {
+ case TLBP:
+ mips32_op = OPC_TLBP;
+ goto do_cp0;
+ case TLBR:
+ mips32_op = OPC_TLBR;
+ goto do_cp0;
+ case TLBWI:
+ mips32_op = OPC_TLBWI;
+ goto do_cp0;
+ case TLBWR:
+ mips32_op = OPC_TLBWR;
+ goto do_cp0;
+ case TLBINV:
+ mips32_op = OPC_TLBINV;
+ goto do_cp0;
+ case TLBINVF:
+ mips32_op = OPC_TLBINVF;
+ goto do_cp0;
+ case WAIT:
+ mips32_op = OPC_WAIT;
+ goto do_cp0;
+ case DERET:
+ mips32_op = OPC_DERET;
+ goto do_cp0;
+ case ERET:
+ mips32_op = OPC_ERET;
+ do_cp0:
+ gen_cp0(env, ctx, mips32_op, rt, rs);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x1d:
+ switch (minor) {
+ case DI:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ save_cpu_state(ctx, 1);
+ gen_helper_di(t0, cpu_env);
+ gen_store_gpr(t0, rs);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ tcg_temp_free(t0);
+ }
+ break;
+ case EI:
+ check_cp0_enabled(ctx);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ save_cpu_state(ctx, 1);
+ gen_helper_ei(t0, cpu_env);
+ gen_store_gpr(t0, rs);
+ /* Stop translation as we may have switched the execution mode */
+ ctx->bstate = BS_STOP;
+ tcg_temp_free(t0);
+ }
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+#endif
+ case 0x2d:
+ switch (minor) {
+ case SYNC:
+ gen_sync(extract32(ctx->opcode, 16, 5));
+ break;
+ case SYSCALL:
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ break;
+ case SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 16, 10))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ check_insn(ctx, ISA_MIPS32);
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ }
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x01:
+ switch (minor & 3) {
+ case MFHI_ACC:
+ gen_HILO(ctx, OPC_MFHI, minor >> 2, rs);
+ break;
+ case MFLO_ACC:
+ gen_HILO(ctx, OPC_MFLO, minor >> 2, rs);
+ break;
+ case MTHI_ACC:
+ gen_HILO(ctx, OPC_MTHI, minor >> 2, rs);
+ break;
+ case MTLO_ACC:
+ gen_HILO(ctx, OPC_MTLO, minor >> 2, rs);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ case 0x35:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ switch (minor) {
+ case MFHI32:
+ gen_HILO(ctx, OPC_MFHI, 0, rs);
+ break;
+ case MFLO32:
+ gen_HILO(ctx, OPC_MFLO, 0, rs);
+ break;
+ case MTHI32:
+ gen_HILO(ctx, OPC_MTHI, 0, rs);
+ break;
+ case MTLO32:
+ gen_HILO(ctx, OPC_MTLO, 0, rs);
+ break;
+ default:
+ goto pool32axf_invalid;
+ }
+ break;
+ default:
+ pool32axf_invalid:
+ MIPS_INVAL("pool32axf");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+/* Values for microMIPS fmt field. Variable-width, depending on which
+ formats the instruction supports. */
+
+enum {
+ FMT_SD_S = 0,
+ FMT_SD_D = 1,
+
+ FMT_SDPS_S = 0,
+ FMT_SDPS_D = 1,
+ FMT_SDPS_PS = 2,
+
+ FMT_SWL_S = 0,
+ FMT_SWL_W = 1,
+ FMT_SWL_L = 2,
+
+ FMT_DWL_D = 0,
+ FMT_DWL_W = 1,
+ FMT_DWL_L = 2
+};
+
+static void gen_pool32fxf(DisasContext *ctx, int rt, int rs)
+{
+ int extension = (ctx->opcode >> 6) & 0x3ff;
+ uint32_t mips32_op;
+
+#define FLOAT_1BIT_FMT(opc, fmt) (fmt << 8) | opc
+#define FLOAT_2BIT_FMT(opc, fmt) (fmt << 7) | opc
+#define COND_FLOAT_MOV(opc, cond) (cond << 7) | opc
+
+ switch (extension) {
+ case FLOAT_1BIT_FMT(CFC1, 0):
+ mips32_op = OPC_CFC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(CTC1, 0):
+ mips32_op = OPC_CTC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MFC1, 0):
+ mips32_op = OPC_MFC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MTC1, 0):
+ mips32_op = OPC_MTC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MFHC1, 0):
+ mips32_op = OPC_MFHC1;
+ goto do_cp1;
+ case FLOAT_1BIT_FMT(MTHC1, 0):
+ mips32_op = OPC_MTHC1;
+ do_cp1:
+ gen_cp1(ctx, mips32_op, rt, rs);
+ break;
+
+ /* Reciprocal square root */
+ case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_S):
+ mips32_op = OPC_RSQRT_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(RSQRT_FMT, FMT_SD_D):
+ mips32_op = OPC_RSQRT_D;
+ goto do_unaryfp;
+
+ /* Square root */
+ case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_S):
+ mips32_op = OPC_SQRT_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(SQRT_FMT, FMT_SD_D):
+ mips32_op = OPC_SQRT_D;
+ goto do_unaryfp;
+
+ /* Reciprocal */
+ case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_S):
+ mips32_op = OPC_RECIP_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(RECIP_FMT, FMT_SD_D):
+ mips32_op = OPC_RECIP_D;
+ goto do_unaryfp;
+
+ /* Floor */
+ case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_S):
+ mips32_op = OPC_FLOOR_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(FLOOR_L, FMT_SD_D):
+ mips32_op = OPC_FLOOR_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_S):
+ mips32_op = OPC_FLOOR_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(FLOOR_W, FMT_SD_D):
+ mips32_op = OPC_FLOOR_W_D;
+ goto do_unaryfp;
+
+ /* Ceiling */
+ case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_S):
+ mips32_op = OPC_CEIL_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CEIL_L, FMT_SD_D):
+ mips32_op = OPC_CEIL_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_S):
+ mips32_op = OPC_CEIL_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CEIL_W, FMT_SD_D):
+ mips32_op = OPC_CEIL_W_D;
+ goto do_unaryfp;
+
+ /* Truncation */
+ case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_S):
+ mips32_op = OPC_TRUNC_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(TRUNC_L, FMT_SD_D):
+ mips32_op = OPC_TRUNC_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_S):
+ mips32_op = OPC_TRUNC_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(TRUNC_W, FMT_SD_D):
+ mips32_op = OPC_TRUNC_W_D;
+ goto do_unaryfp;
+
+ /* Round */
+ case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_S):
+ mips32_op = OPC_ROUND_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(ROUND_L, FMT_SD_D):
+ mips32_op = OPC_ROUND_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_S):
+ mips32_op = OPC_ROUND_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(ROUND_W, FMT_SD_D):
+ mips32_op = OPC_ROUND_W_D;
+ goto do_unaryfp;
+
+ /* Integer to floating-point conversion */
+ case FLOAT_1BIT_FMT(CVT_L, FMT_SD_S):
+ mips32_op = OPC_CVT_L_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_L, FMT_SD_D):
+ mips32_op = OPC_CVT_L_D;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_W, FMT_SD_S):
+ mips32_op = OPC_CVT_W_S;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_W, FMT_SD_D):
+ mips32_op = OPC_CVT_W_D;
+ goto do_unaryfp;
+
+ /* Paired-foo conversions */
+ case FLOAT_1BIT_FMT(CVT_S_PL, 0):
+ mips32_op = OPC_CVT_S_PL;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_S_PU, 0):
+ mips32_op = OPC_CVT_S_PU;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_PW_PS, 0):
+ mips32_op = OPC_CVT_PW_PS;
+ goto do_unaryfp;
+ case FLOAT_1BIT_FMT(CVT_PS_PW, 0):
+ mips32_op = OPC_CVT_PS_PW;
+ goto do_unaryfp;
+
+ /* Floating-point moves */
+ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_S):
+ mips32_op = OPC_MOV_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_D):
+ mips32_op = OPC_MOV_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(MOV_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_MOV_PS;
+ goto do_unaryfp;
+
+ /* Absolute value */
+ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_S):
+ mips32_op = OPC_ABS_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_D):
+ mips32_op = OPC_ABS_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(ABS_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_ABS_PS;
+ goto do_unaryfp;
+
+ /* Negation */
+ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_S):
+ mips32_op = OPC_NEG_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_D):
+ mips32_op = OPC_NEG_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(NEG_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_NEG_PS;
+ goto do_unaryfp;
+
+ /* Reciprocal square root step */
+ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_S):
+ mips32_op = OPC_RSQRT1_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_D):
+ mips32_op = OPC_RSQRT1_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RSQRT1_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_RSQRT1_PS;
+ goto do_unaryfp;
+
+ /* Reciprocal step */
+ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_S):
+ mips32_op = OPC_RECIP1_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_D):
+ mips32_op = OPC_RECIP1_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(RECIP1_FMT, FMT_SDPS_PS):
+ mips32_op = OPC_RECIP1_PS;
+ goto do_unaryfp;
+
+ /* Conversions from double */
+ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_S):
+ mips32_op = OPC_CVT_D_S;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_W):
+ mips32_op = OPC_CVT_D_W;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_D, FMT_SWL_L):
+ mips32_op = OPC_CVT_D_L;
+ goto do_unaryfp;
+
+ /* Conversions from single */
+ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_D):
+ mips32_op = OPC_CVT_S_D;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_W):
+ mips32_op = OPC_CVT_S_W;
+ goto do_unaryfp;
+ case FLOAT_2BIT_FMT(CVT_S, FMT_DWL_L):
+ mips32_op = OPC_CVT_S_L;
+ do_unaryfp:
+ gen_farith(ctx, mips32_op, -1, rs, rt, 0);
+ break;
+
+ /* Conditional moves on floating-point codes */
+ case COND_FLOAT_MOV(MOVT, 0):
+ case COND_FLOAT_MOV(MOVT, 1):
+ case COND_FLOAT_MOV(MOVT, 2):
+ case COND_FLOAT_MOV(MOVT, 3):
+ case COND_FLOAT_MOV(MOVT, 4):
+ case COND_FLOAT_MOV(MOVT, 5):
+ case COND_FLOAT_MOV(MOVT, 6):
+ case COND_FLOAT_MOV(MOVT, 7):
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 1);
+ break;
+ case COND_FLOAT_MOV(MOVF, 0):
+ case COND_FLOAT_MOV(MOVF, 1):
+ case COND_FLOAT_MOV(MOVF, 2):
+ case COND_FLOAT_MOV(MOVF, 3):
+ case COND_FLOAT_MOV(MOVF, 4):
+ case COND_FLOAT_MOV(MOVF, 5):
+ case COND_FLOAT_MOV(MOVF, 6):
+ case COND_FLOAT_MOV(MOVF, 7):
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_movci(ctx, rt, rs, (ctx->opcode >> 13) & 0x7, 0);
+ break;
+ default:
+ MIPS_INVAL("pool32fxf");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_micromips32_opc(CPUMIPSState *env, DisasContext *ctx)
+{
+ int32_t offset;
+ uint16_t insn;
+ int rt, rs, rd, rr;
+ int16_t imm;
+ uint32_t op, minor, mips32_op;
+ uint32_t cond, fmt, cc;
+
+ insn = cpu_lduw_code(env, ctx->pc + 2);
+ ctx->opcode = (ctx->opcode << 16) | insn;
+
+ rt = (ctx->opcode >> 21) & 0x1f;
+ rs = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ rr = (ctx->opcode >> 6) & 0x1f;
+ imm = (int16_t) ctx->opcode;
+
+ op = (ctx->opcode >> 26) & 0x3f;
+ switch (op) {
+ case POOL32A:
+ minor = ctx->opcode & 0x3f;
+ switch (minor) {
+ case 0x00:
+ minor = (ctx->opcode >> 6) & 0xf;
+ switch (minor) {
+ case SLL32:
+ mips32_op = OPC_SLL;
+ goto do_shifti;
+ case SRA:
+ mips32_op = OPC_SRA;
+ goto do_shifti;
+ case SRL32:
+ mips32_op = OPC_SRL;
+ goto do_shifti;
+ case ROTR:
+ mips32_op = OPC_ROTR;
+ do_shifti:
+ gen_shift_imm(ctx, mips32_op, rt, rs, rd);
+ break;
+ case SELEQZ:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_cond_move(ctx, OPC_SELEQZ, rd, rs, rt);
+ break;
+ case SELNEZ:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_cond_move(ctx, OPC_SELNEZ, rd, rs, rt);
+ break;
+ case R6_RDHWR:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_rdhwr(ctx, rt, rs, extract32(ctx->opcode, 11, 3));
+ break;
+ default:
+ goto pool32a_invalid;
+ }
+ break;
+ case 0x10:
+ minor = (ctx->opcode >> 6) & 0xf;
+ switch (minor) {
+ /* Arithmetic */
+ case ADD:
+ mips32_op = OPC_ADD;
+ goto do_arith;
+ case ADDU32:
+ mips32_op = OPC_ADDU;
+ goto do_arith;
+ case SUB:
+ mips32_op = OPC_SUB;
+ goto do_arith;
+ case SUBU32:
+ mips32_op = OPC_SUBU;
+ goto do_arith;
+ case MUL:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MUL;
+ do_arith:
+ gen_arith(ctx, mips32_op, rd, rs, rt);
+ break;
+ /* Shifts */
+ case SLLV:
+ mips32_op = OPC_SLLV;
+ goto do_shift;
+ case SRLV:
+ mips32_op = OPC_SRLV;
+ goto do_shift;
+ case SRAV:
+ mips32_op = OPC_SRAV;
+ goto do_shift;
+ case ROTRV:
+ mips32_op = OPC_ROTRV;
+ do_shift:
+ gen_shift(ctx, mips32_op, rd, rs, rt);
+ break;
+ /* Logical operations */
+ case AND:
+ mips32_op = OPC_AND;
+ goto do_logic;
+ case OR32:
+ mips32_op = OPC_OR;
+ goto do_logic;
+ case NOR:
+ mips32_op = OPC_NOR;
+ goto do_logic;
+ case XOR32:
+ mips32_op = OPC_XOR;
+ do_logic:
+ gen_logic(ctx, mips32_op, rd, rs, rt);
+ break;
+ /* Set less than */
+ case SLT:
+ mips32_op = OPC_SLT;
+ goto do_slt;
+ case SLTU:
+ mips32_op = OPC_SLTU;
+ do_slt:
+ gen_slt(ctx, mips32_op, rd, rs, rt);
+ break;
+ default:
+ goto pool32a_invalid;
+ }
+ break;
+ case 0x18:
+ minor = (ctx->opcode >> 6) & 0xf;
+ switch (minor) {
+ /* Conditional moves */
+ case MOVN: /* MUL */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* MUL */
+ gen_r6_muldiv(ctx, R6_OPC_MUL, rd, rs, rt);
+ } else {
+ /* MOVN */
+ gen_cond_move(ctx, OPC_MOVN, rd, rs, rt);
+ }
+ break;
+ case MOVZ: /* MUH */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* MUH */
+ gen_r6_muldiv(ctx, R6_OPC_MUH, rd, rs, rt);
+ } else {
+ /* MOVZ */
+ gen_cond_move(ctx, OPC_MOVZ, rd, rs, rt);
+ }
+ break;
+ case MULU:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_muldiv(ctx, R6_OPC_MULU, rd, rs, rt);
+ break;
+ case MUHU:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_muldiv(ctx, R6_OPC_MUHU, rd, rs, rt);
+ break;
+ case LWXS: /* DIV */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* DIV */
+ gen_r6_muldiv(ctx, R6_OPC_DIV, rd, rs, rt);
+ } else {
+ /* LWXS */
+ gen_ldxs(ctx, rs, rt, rd);
+ }
+ break;
+ case MOD:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_muldiv(ctx, R6_OPC_MOD, rd, rs, rt);
+ break;
+ case R6_DIVU:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_muldiv(ctx, R6_OPC_DIVU, rd, rs, rt);
+ break;
+ case MODU:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_muldiv(ctx, R6_OPC_MODU, rd, rs, rt);
+ break;
+ default:
+ goto pool32a_invalid;
+ }
+ break;
+ case INS:
+ gen_bitops(ctx, OPC_INS, rt, rs, rr, rd);
+ return;
+ case LSA:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_lsa(ctx, OPC_LSA, rd, rs, rt,
+ extract32(ctx->opcode, 9, 2));
+ break;
+ case ALIGN:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_align(ctx, OPC_ALIGN, rd, rs, rt,
+ extract32(ctx->opcode, 9, 2));
+ break;
+ case EXT:
+ gen_bitops(ctx, OPC_EXT, rt, rs, rr, rd);
+ return;
+ case POOL32AXF:
+ gen_pool32axf(env, ctx, rt, rs);
+ break;
+ case BREAK32:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case SIGRIE:
+ check_insn(ctx, ISA_MIPS32R6);
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ default:
+ pool32a_invalid:
+ MIPS_INVAL("pool32a");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case POOL32B:
+ minor = (ctx->opcode >> 12) & 0xf;
+ switch (minor) {
+ case CACHE:
+ check_cp0_enabled(ctx);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ break;
+ case LWC2:
+ case SWC2:
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ break;
+#ifdef TARGET_MIPS64
+ case LDP:
+ case SDP:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ /* Fallthrough */
+#endif
+ case LWP:
+ case SWP:
+ gen_ldst_pair(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12));
+ break;
+#ifdef TARGET_MIPS64
+ case LDM:
+ case SDM:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ /* Fallthrough */
+#endif
+ case LWM32:
+ case SWM32:
+ gen_ldst_multiple(ctx, minor, rt, rs, SIMM(ctx->opcode, 0, 12));
+ break;
+ default:
+ MIPS_INVAL("pool32b");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case POOL32F:
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ minor = ctx->opcode & 0x3f;
+ check_cp1_enabled(ctx);
+ switch (minor) {
+ case ALNV_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_ALNV_PS;
+ goto do_madd;
+ case MADD_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MADD_S;
+ goto do_madd;
+ case MADD_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MADD_D;
+ goto do_madd;
+ case MADD_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MADD_PS;
+ goto do_madd;
+ case MSUB_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MSUB_S;
+ goto do_madd;
+ case MSUB_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MSUB_D;
+ goto do_madd;
+ case MSUB_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_MSUB_PS;
+ goto do_madd;
+ case NMADD_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_NMADD_S;
+ goto do_madd;
+ case NMADD_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_NMADD_D;
+ goto do_madd;
+ case NMADD_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_NMADD_PS;
+ goto do_madd;
+ case NMSUB_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_NMSUB_S;
+ goto do_madd;
+ case NMSUB_D:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_NMSUB_D;
+ goto do_madd;
+ case NMSUB_PS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_NMSUB_PS;
+ do_madd:
+ gen_flt3_arith(ctx, mips32_op, rd, rr, rs, rt);
+ break;
+ case CABS_COND_FMT:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ cond = (ctx->opcode >> 6) & 0xf;
+ cc = (ctx->opcode >> 13) & 0x7;
+ fmt = (ctx->opcode >> 10) & 0x3;
+ switch (fmt) {
+ case 0x0:
+ gen_cmpabs_s(ctx, cond, rt, rs, cc);
+ break;
+ case 0x1:
+ gen_cmpabs_d(ctx, cond, rt, rs, cc);
+ break;
+ case 0x2:
+ gen_cmpabs_ps(ctx, cond, rt, rs, cc);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case C_COND_FMT:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ cond = (ctx->opcode >> 6) & 0xf;
+ cc = (ctx->opcode >> 13) & 0x7;
+ fmt = (ctx->opcode >> 10) & 0x3;
+ switch (fmt) {
+ case 0x0:
+ gen_cmp_s(ctx, cond, rt, rs, cc);
+ break;
+ case 0x1:
+ gen_cmp_d(ctx, cond, rt, rs, cc);
+ break;
+ case 0x2:
+ gen_cmp_ps(ctx, cond, rt, rs, cc);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case CMP_CONDN_S:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_cmp_s(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd);
+ break;
+ case CMP_CONDN_D:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_r6_cmp_d(ctx, (ctx->opcode >> 6) & 0x1f, rt, rs, rd);
+ break;
+ case POOL32FXF:
+ gen_pool32fxf(ctx, rt, rs);
+ break;
+ case 0x00:
+ /* PLL foo */
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case PLL_PS:
+ mips32_op = OPC_PLL_PS;
+ goto do_ps;
+ case PLU_PS:
+ mips32_op = OPC_PLU_PS;
+ goto do_ps;
+ case PUL_PS:
+ mips32_op = OPC_PUL_PS;
+ goto do_ps;
+ case PUU_PS:
+ mips32_op = OPC_PUU_PS;
+ goto do_ps;
+ case CVT_PS_S:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_CVT_PS_S;
+ do_ps:
+ gen_farith(ctx, mips32_op, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MIN_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MIN_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MIN_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x08:
+ /* [LS][WDU]XC1 */
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case LWXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LWXC1;
+ goto do_ldst_cp1;
+ case SWXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SWXC1;
+ goto do_ldst_cp1;
+ case LDXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LDXC1;
+ goto do_ldst_cp1;
+ case SDXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SDXC1;
+ goto do_ldst_cp1;
+ case LUXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LUXC1;
+ goto do_ldst_cp1;
+ case SUXC1:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SUXC1;
+ do_ldst_cp1:
+ gen_flt3_ldst(ctx, mips32_op, rd, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MAX_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MAX_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MAX_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x18:
+ /* 3D insns */
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ fmt = (ctx->opcode >> 9) & 0x3;
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case RSQRT2_FMT:
+ switch (fmt) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_RSQRT2_S;
+ goto do_3d;
+ case FMT_SDPS_D:
+ mips32_op = OPC_RSQRT2_D;
+ goto do_3d;
+ case FMT_SDPS_PS:
+ mips32_op = OPC_RSQRT2_PS;
+ goto do_3d;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case RECIP2_FMT:
+ switch (fmt) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_RECIP2_S;
+ goto do_3d;
+ case FMT_SDPS_D:
+ mips32_op = OPC_RECIP2_D;
+ goto do_3d;
+ case FMT_SDPS_PS:
+ mips32_op = OPC_RECIP2_PS;
+ goto do_3d;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case ADDR_PS:
+ mips32_op = OPC_ADDR_PS;
+ goto do_3d;
+ case MULR_PS:
+ mips32_op = OPC_MULR_PS;
+ do_3d:
+ gen_farith(ctx, mips32_op, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x20:
+ /* MOV[FT].fmt, PREFX, RINT.fmt, CLASS.fmt*/
+ cc = (ctx->opcode >> 13) & 0x7;
+ fmt = (ctx->opcode >> 9) & 0x3;
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case MOVF_FMT: /* RINT_FMT */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* RINT_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_RINT_S, 0, rt, rs, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_RINT_D, 0, rt, rs, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVF_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_movcf_s(ctx, rs, rt, cc, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_movcf_d(ctx, rs, rt, cc, 0);
+ break;
+ case FMT_SDPS_PS:
+ check_ps(ctx);
+ gen_movcf_ps(ctx, rs, rt, cc, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ }
+ break;
+ case MOVT_FMT: /* CLASS_FMT */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* CLASS_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_CLASS_S, 0, rt, rs, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_CLASS_D, 0, rt, rs, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVT_FMT */
+ switch (fmt) {
+ case FMT_SDPS_S:
+ gen_movcf_s(ctx, rs, rt, cc, 1);
+ break;
+ case FMT_SDPS_D:
+ gen_movcf_d(ctx, rs, rt, cc, 1);
+ break;
+ case FMT_SDPS_PS:
+ check_ps(ctx);
+ gen_movcf_ps(ctx, rs, rt, cc, 1);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ }
+ break;
+ case PREFX:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+#define FINSN_3ARG_SDPS(prfx) \
+ switch ((ctx->opcode >> 8) & 0x3) { \
+ case FMT_SDPS_S: \
+ mips32_op = OPC_##prfx##_S; \
+ goto do_fpop; \
+ case FMT_SDPS_D: \
+ mips32_op = OPC_##prfx##_D; \
+ goto do_fpop; \
+ case FMT_SDPS_PS: \
+ check_ps(ctx); \
+ mips32_op = OPC_##prfx##_PS; \
+ goto do_fpop; \
+ default: \
+ goto pool32f_invalid; \
+ }
+ case MINA_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MINA_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MINA_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MAXA_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_farith(ctx, OPC_MAXA_S, rt, rs, rd, 0);
+ break;
+ case FMT_SDPS_D:
+ gen_farith(ctx, OPC_MAXA_D, rt, rs, rd, 0);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x30:
+ /* regular FP ops */
+ switch ((ctx->opcode >> 6) & 0x3) {
+ case ADD_FMT:
+ FINSN_3ARG_SDPS(ADD);
+ break;
+ case SUB_FMT:
+ FINSN_3ARG_SDPS(SUB);
+ break;
+ case MUL_FMT:
+ FINSN_3ARG_SDPS(MUL);
+ break;
+ case DIV_FMT:
+ fmt = (ctx->opcode >> 8) & 0x3;
+ if (fmt == 1) {
+ mips32_op = OPC_DIV_D;
+ } else if (fmt == 0) {
+ mips32_op = OPC_DIV_S;
+ } else {
+ goto pool32f_invalid;
+ }
+ goto do_fpop;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case 0x38:
+ /* cmovs */
+ switch ((ctx->opcode >> 6) & 0x7) {
+ case MOVN_FMT: /* SELNEZ_FMT */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* SELNEZ_FMT */
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_sel_s(ctx, OPC_SELNEZ_S, rd, rt, rs);
+ break;
+ case FMT_SDPS_D:
+ gen_sel_d(ctx, OPC_SELNEZ_D, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVN_FMT */
+ FINSN_3ARG_SDPS(MOVN);
+ }
+ break;
+ case MOVN_FMT_04:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ FINSN_3ARG_SDPS(MOVN);
+ break;
+ case MOVZ_FMT: /* SELEQZ_FMT */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* SELEQZ_FMT */
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_sel_s(ctx, OPC_SELEQZ_S, rd, rt, rs);
+ break;
+ case FMT_SDPS_D:
+ gen_sel_d(ctx, OPC_SELEQZ_D, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ } else {
+ /* MOVZ_FMT */
+ FINSN_3ARG_SDPS(MOVZ);
+ }
+ break;
+ case MOVZ_FMT_05:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ FINSN_3ARG_SDPS(MOVZ);
+ break;
+ case SEL_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ gen_sel_s(ctx, OPC_SEL_S, rd, rt, rs);
+ break;
+ case FMT_SDPS_D:
+ gen_sel_d(ctx, OPC_SEL_D, rd, rt, rs);
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MADDF_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_MADDF_S;
+ goto do_fpop;
+ case FMT_SDPS_D:
+ mips32_op = OPC_MADDF_D;
+ goto do_fpop;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ case MSUBF_FMT:
+ check_insn(ctx, ISA_MIPS32R6);
+ switch ((ctx->opcode >> 9) & 0x3) {
+ case FMT_SDPS_S:
+ mips32_op = OPC_MSUBF_S;
+ goto do_fpop;
+ case FMT_SDPS_D:
+ mips32_op = OPC_MSUBF_D;
+ goto do_fpop;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ default:
+ goto pool32f_invalid;
+ }
+ break;
+ do_fpop:
+ gen_farith(ctx, mips32_op, rt, rs, rd, 0);
+ break;
+ default:
+ pool32f_invalid:
+ MIPS_INVAL("pool32f");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ case POOL32I:
+ minor = (ctx->opcode >> 21) & 0x1f;
+ switch (minor) {
+ case BLTZ:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BLTZ, 4, rs, -1, imm << 1, 4);
+ break;
+ case BLTZAL:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BLTZALS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BLTZAL, 4, rs, -1, imm << 1, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BGEZ:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BGEZ, 4, rs, -1, imm << 1, 4);
+ break;
+ case BGEZAL:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BGEZALS:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BGEZAL, 4, rs, -1, imm << 1, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case BLEZ:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BLEZ, 4, rs, -1, imm << 1, 4);
+ break;
+ case BGTZ:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, OPC_BGTZ, 4, rs, -1, imm << 1, 4);
+ break;
+
+ /* Traps */
+ case TLTI: /* BC1EQZC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* BC1EQZC */
+ check_cp1_enabled(ctx);
+ gen_compute_branch1_r6(ctx, OPC_BC1EQZ, rs, imm << 1, 0);
+ } else {
+ /* TLTI */
+ mips32_op = OPC_TLTI;
+ goto do_trapi;
+ }
+ break;
+ case TGEI: /* BC1NEZC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* BC1NEZC */
+ check_cp1_enabled(ctx);
+ gen_compute_branch1_r6(ctx, OPC_BC1NEZ, rs, imm << 1, 0);
+ } else {
+ /* TGEI */
+ mips32_op = OPC_TGEI;
+ goto do_trapi;
+ }
+ break;
+ case TLTIU:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_TLTIU;
+ goto do_trapi;
+ case TGEIU:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_TGEIU;
+ goto do_trapi;
+ case TNEI: /* SYNCI */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* SYNCI */
+ /* Break the TB to be able to sync copied instructions
+ immediately */
+ ctx->bstate = BS_STOP;
+ } else {
+ /* TNEI */
+ mips32_op = OPC_TNEI;
+ goto do_trapi;
+ }
+ break;
+ case TEQI:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_TEQI;
+ do_trapi:
+ gen_trap(ctx, mips32_op, rs, -1, imm);
+ break;
+
+ case BNEZC:
+ case BEQZC:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch(ctx, minor == BNEZC ? OPC_BNE : OPC_BEQ,
+ 4, rs, 0, imm << 1, 0);
+ /* Compact branches don't have a delay slot, so just let
+ the normal delay slot handling take us to the branch
+ target. */
+ break;
+ case LUI:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_logic_imm(ctx, OPC_LUI, rs, 0, imm);
+ break;
+ case SYNCI:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* Break the TB to be able to sync copied instructions
+ immediately */
+ ctx->bstate = BS_STOP;
+ break;
+ case BC2F:
+ case BC2T:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ break;
+ case BC1F:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1FANY2 : OPC_BC1F;
+ goto do_cp1branch;
+ case BC1T:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = (ctx->opcode & (1 << 16)) ? OPC_BC1TANY2 : OPC_BC1T;
+ goto do_cp1branch;
+ case BC1ANY4F:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_BC1FANY4;
+ goto do_cp1mips3d;
+ case BC1ANY4T:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_BC1TANY4;
+ do_cp1mips3d:
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ /* Fall through */
+ do_cp1branch:
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ gen_compute_branch1(ctx, mips32_op,
+ (ctx->opcode >> 18) & 0x7, imm << 1);
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ case BPOSGE64:
+ case BPOSGE32:
+ /* MIPS DSP: not implemented */
+ /* Fall through */
+ default:
+ MIPS_INVAL("pool32i");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case POOL32C:
+ minor = (ctx->opcode >> 12) & 0xf;
+ offset = sextract32(ctx->opcode, 0,
+ (ctx->insn_flags & ISA_MIPS32R6) ? 9 : 12);
+ switch (minor) {
+ case LWL:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LWL;
+ goto do_ld_lr;
+ case SWL:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SWL;
+ goto do_st_lr;
+ case LWR:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LWR;
+ goto do_ld_lr;
+ case SWR:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SWR;
+ goto do_st_lr;
+#if defined(TARGET_MIPS64)
+ case LDL:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LDL;
+ goto do_ld_lr;
+ case SDL:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SDL;
+ goto do_st_lr;
+ case LDR:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_LDR;
+ goto do_ld_lr;
+ case SDR:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ mips32_op = OPC_SDR;
+ goto do_st_lr;
+ case LWU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_LWU;
+ goto do_ld_lr;
+ case LLD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_LLD;
+ goto do_ld_lr;
+#endif
+ case LL:
+ mips32_op = OPC_LL;
+ goto do_ld_lr;
+ do_ld_lr:
+ gen_ld(ctx, mips32_op, rt, rs, offset);
+ break;
+ do_st_lr:
+ gen_st(ctx, mips32_op, rt, rs, SIMM(ctx->opcode, 0, 12));
+ break;
+ case SC:
+ gen_st_cond(ctx, OPC_SC, rt, rs, offset);
+ break;
+#if defined(TARGET_MIPS64)
+ case SCD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st_cond(ctx, OPC_SCD, rt, rs, offset);
+ break;
+#endif
+ case PREF:
+ /* Treat as no-op */
+ if ((ctx->insn_flags & ISA_MIPS32R6) && (rt >= 24)) {
+ /* hint codes 24-31 are reserved and signal RI */
+ generate_exception(ctx, EXCP_RI);
+ }
+ break;
+ default:
+ MIPS_INVAL("pool32c");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case ADDI32: /* AUI, LUI */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* AUI, LUI */
+ gen_logic_imm(ctx, OPC_LUI, rt, rs, imm);
+ } else {
+ /* ADDI32 */
+ mips32_op = OPC_ADDI;
+ goto do_addi;
+ }
+ break;
+ case ADDIU32:
+ mips32_op = OPC_ADDIU;
+ do_addi:
+ gen_arith_imm(ctx, mips32_op, rt, rs, imm);
+ break;
+
+ /* Logical operations */
+ case ORI32:
+ mips32_op = OPC_ORI;
+ goto do_logici;
+ case XORI32:
+ mips32_op = OPC_XORI;
+ goto do_logici;
+ case ANDI32:
+ mips32_op = OPC_ANDI;
+ do_logici:
+ gen_logic_imm(ctx, mips32_op, rt, rs, imm);
+ break;
+
+ /* Set less than immediate */
+ case SLTI32:
+ mips32_op = OPC_SLTI;
+ goto do_slti;
+ case SLTIU32:
+ mips32_op = OPC_SLTIU;
+ do_slti:
+ gen_slt_imm(ctx, mips32_op, rt, rs, imm);
+ break;
+ case JALX32:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, OPC_JALX, 4, rt, rs, offset, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ break;
+ case JALS32: /* BOVC, BEQC, BEQZALC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs >= rt) {
+ /* BOVC */
+ mips32_op = OPC_BOVC;
+ } else if (rs < rt && rs == 0) {
+ /* BEQZALC */
+ mips32_op = OPC_BEQZALC;
+ } else {
+ /* BEQC */
+ mips32_op = OPC_BEQC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ } else {
+ /* JALS32 */
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 1;
+ gen_compute_branch(ctx, OPC_JAL, 4, rt, rs, offset, 2);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ }
+ break;
+ case BEQ32: /* BC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* BC */
+ gen_compute_compact_branch(ctx, OPC_BC, 0, 0,
+ sextract32(ctx->opcode << 1, 0, 27));
+ } else {
+ /* BEQ32 */
+ gen_compute_branch(ctx, OPC_BEQ, 4, rt, rs, imm << 1, 4);
+ }
+ break;
+ case BNE32: /* BALC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* BALC */
+ gen_compute_compact_branch(ctx, OPC_BALC, 0, 0,
+ sextract32(ctx->opcode << 1, 0, 27));
+ } else {
+ /* BNE32 */
+ gen_compute_branch(ctx, OPC_BNE, 4, rt, rs, imm << 1, 4);
+ }
+ break;
+ case J32: /* BGTZC, BLTZC, BLTC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs == 0 && rt != 0) {
+ /* BGTZC */
+ mips32_op = OPC_BGTZC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BLTZC */
+ mips32_op = OPC_BLTZC;
+ } else {
+ /* BLTC */
+ mips32_op = OPC_BLTC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ } else {
+ /* J32 */
+ gen_compute_branch(ctx, OPC_J, 4, rt, rs,
+ (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4);
+ }
+ break;
+ case JAL32: /* BLEZC, BGEZC, BGEC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs == 0 && rt != 0) {
+ /* BLEZC */
+ mips32_op = OPC_BLEZC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BGEZC */
+ mips32_op = OPC_BGEZC;
+ } else {
+ /* BGEC */
+ mips32_op = OPC_BGEC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ } else {
+ /* JAL32 */
+ gen_compute_branch(ctx, OPC_JAL, 4, rt, rs,
+ (int32_t)(ctx->opcode & 0x3FFFFFF) << 1, 4);
+ ctx->hflags |= MIPS_HFLAG_BDS_STRICT;
+ }
+ break;
+ /* Floating point (COP1) */
+ case LWC132:
+ mips32_op = OPC_LWC1;
+ goto do_cop1;
+ case LDC132:
+ mips32_op = OPC_LDC1;
+ goto do_cop1;
+ case SWC132:
+ mips32_op = OPC_SWC1;
+ goto do_cop1;
+ case SDC132:
+ mips32_op = OPC_SDC1;
+ do_cop1:
+ gen_cop1_ldst(ctx, mips32_op, rt, rs, imm);
+ break;
+ case ADDIUPC: /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* PCREL: ADDIUPC, AUIPC, ALUIPC, LWPC */
+ switch ((ctx->opcode >> 16) & 0x1f) {
+ case ADDIUPC_00 ... ADDIUPC_07:
+ gen_pcrel(ctx, OPC_ADDIUPC, ctx->pc & ~0x3, rt);
+ break;
+ case AUIPC:
+ gen_pcrel(ctx, OPC_AUIPC, ctx->pc, rt);
+ break;
+ case ALUIPC:
+ gen_pcrel(ctx, OPC_ALUIPC, ctx->pc, rt);
+ break;
+ case LWPC_08 ... LWPC_0F:
+ gen_pcrel(ctx, R6_OPC_LWPC, ctx->pc & ~0x3, rt);
+ break;
+ default:
+ generate_exception(ctx, EXCP_RI);
+ break;
+ }
+ } else {
+ /* ADDIUPC */
+ int reg = mmreg(ZIMM(ctx->opcode, 23, 3));
+ int offset = SIMM(ctx->opcode, 0, 23) << 2;
+
+ gen_addiupc(ctx, reg, offset, 0, 0);
+ }
+ break;
+ case BNVC: /* BNEC, BNEZALC */
+ check_insn(ctx, ISA_MIPS32R6);
+ if (rs >= rt) {
+ /* BNVC */
+ mips32_op = OPC_BNVC;
+ } else if (rs < rt && rs == 0) {
+ /* BNEZALC */
+ mips32_op = OPC_BNEZALC;
+ } else {
+ /* BNEC */
+ mips32_op = OPC_BNEC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ break;
+ case R6_BNEZC: /* JIALC */
+ check_insn(ctx, ISA_MIPS32R6);
+ if (rt != 0) {
+ /* BNEZC */
+ gen_compute_compact_branch(ctx, OPC_BNEZC, rt, 0,
+ sextract32(ctx->opcode << 1, 0, 22));
+ } else {
+ /* JIALC */
+ gen_compute_compact_branch(ctx, OPC_JIALC, 0, rs, imm);
+ }
+ break;
+ case R6_BEQZC: /* JIC */
+ check_insn(ctx, ISA_MIPS32R6);
+ if (rt != 0) {
+ /* BEQZC */
+ gen_compute_compact_branch(ctx, OPC_BEQZC, rt, 0,
+ sextract32(ctx->opcode << 1, 0, 22));
+ } else {
+ /* JIC */
+ gen_compute_compact_branch(ctx, OPC_JIC, 0, rs, imm);
+ }
+ break;
+ case BLEZALC: /* BGEZALC, BGEUC */
+ check_insn(ctx, ISA_MIPS32R6);
+ if (rs == 0 && rt != 0) {
+ /* BLEZALC */
+ mips32_op = OPC_BLEZALC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BGEZALC */
+ mips32_op = OPC_BGEZALC;
+ } else {
+ /* BGEUC */
+ mips32_op = OPC_BGEUC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ break;
+ case BGTZALC: /* BLTZALC, BLTUC */
+ check_insn(ctx, ISA_MIPS32R6);
+ if (rs == 0 && rt != 0) {
+ /* BGTZALC */
+ mips32_op = OPC_BGTZALC;
+ } else if (rs != 0 && rt != 0 && rs == rt) {
+ /* BLTZALC */
+ mips32_op = OPC_BLTZALC;
+ } else {
+ /* BLTUC */
+ mips32_op = OPC_BLTUC;
+ }
+ gen_compute_compact_branch(ctx, mips32_op, rs, rt, imm << 1);
+ break;
+ /* Loads and stores */
+ case LB32:
+ mips32_op = OPC_LB;
+ goto do_ld;
+ case LBU32:
+ mips32_op = OPC_LBU;
+ goto do_ld;
+ case LH32:
+ mips32_op = OPC_LH;
+ goto do_ld;
+ case LHU32:
+ mips32_op = OPC_LHU;
+ goto do_ld;
+ case LW32:
+ mips32_op = OPC_LW;
+ goto do_ld;
+#ifdef TARGET_MIPS64
+ case LD32:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_LD;
+ goto do_ld;
+ case SD32:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ mips32_op = OPC_SD;
+ goto do_st;
+#endif
+ case SB32:
+ mips32_op = OPC_SB;
+ goto do_st;
+ case SH32:
+ mips32_op = OPC_SH;
+ goto do_st;
+ case SW32:
+ mips32_op = OPC_SW;
+ goto do_st;
+ do_ld:
+ gen_ld(ctx, mips32_op, rt, rs, imm);
+ break;
+ do_st:
+ gen_st(ctx, mips32_op, rt, rs, imm);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static int decode_micromips_opc (CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t op;
+
+ /* make sure instructions are on a halfword boundary */
+ if (ctx->pc & 0x1) {
+ env->CP0_BadVAddr = ctx->pc;
+ generate_exception_end(ctx, EXCP_AdEL);
+ return 2;
+ }
+
+ op = (ctx->opcode >> 10) & 0x3f;
+ /* Enforce properly-sized instructions in a delay slot */
+ if (ctx->hflags & MIPS_HFLAG_BDS_STRICT) {
+ switch (op & 0x7) { /* MSB-3..MSB-5 */
+ case 0:
+ /* POOL32A, POOL32B, POOL32I, POOL32C */
+ case 4:
+ /* ADDI32, ADDIU32, ORI32, XORI32, SLTI32, SLTIU32, ANDI32, JALX32 */
+ case 5:
+ /* LBU32, LHU32, POOL32F, JALS32, BEQ32, BNE32, J32, JAL32 */
+ case 6:
+ /* SB32, SH32, ADDIUPC, SWC132, SDC132, SW32 */
+ case 7:
+ /* LB32, LH32, LWC132, LDC132, LW32 */
+ if (ctx->hflags & MIPS_HFLAG_BDS16) {
+ generate_exception_end(ctx, EXCP_RI);
+ return 2;
+ }
+ break;
+ case 1:
+ /* POOL16A, POOL16B, POOL16C, LWGP16, POOL16F */
+ case 2:
+ /* LBU16, LHU16, LWSP16, LW16, SB16, SH16, SWSP16, SW16 */
+ case 3:
+ /* MOVE16, ANDI16, POOL16D, POOL16E, BEQZ16, BNEZ16, B16, LI16 */
+ if (ctx->hflags & MIPS_HFLAG_BDS32) {
+ generate_exception_end(ctx, EXCP_RI);
+ return 2;
+ }
+ break;
+ }
+ }
+
+ switch (op) {
+ case POOL16A:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs1 = mmreg(uMIPS_RS1(ctx->opcode));
+ int rs2 = mmreg(uMIPS_RS2(ctx->opcode));
+ uint32_t opc = 0;
+
+ switch (ctx->opcode & 0x1) {
+ case ADDU16:
+ opc = OPC_ADDU;
+ break;
+ case SUBU16:
+ opc = OPC_SUBU;
+ break;
+ }
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* In the Release 6 the register number location in
+ * the instruction encoding has changed.
+ */
+ gen_arith(ctx, opc, rs1, rd, rs2);
+ } else {
+ gen_arith(ctx, opc, rd, rs1, rs2);
+ }
+ }
+ break;
+ case POOL16B:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rs = mmreg(uMIPS_RS(ctx->opcode));
+ int amount = (ctx->opcode >> 1) & 0x7;
+ uint32_t opc = 0;
+ amount = amount == 0 ? 8 : amount;
+
+ switch (ctx->opcode & 0x1) {
+ case SLL16:
+ opc = OPC_SLL;
+ break;
+ case SRL16:
+ opc = OPC_SRL;
+ break;
+ }
+
+ gen_shift_imm(ctx, opc, rd, rs, amount);
+ }
+ break;
+ case POOL16C:
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ gen_pool16c_r6_insn(ctx);
+ } else {
+ gen_pool16c_insn(ctx);
+ }
+ break;
+ case LWGP16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = 28; /* GP */
+ int16_t offset = SIMM(ctx->opcode, 0, 7) << 2;
+
+ gen_ld(ctx, OPC_LW, rd, rb, offset);
+ }
+ break;
+ case POOL16F:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ if (ctx->opcode & 1) {
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ /* MOVEP */
+ int enc_dest = uMIPS_RD(ctx->opcode);
+ int enc_rt = uMIPS_RS2(ctx->opcode);
+ int enc_rs = uMIPS_RS1(ctx->opcode);
+ gen_movep(ctx, enc_dest, enc_rt, enc_rs);
+ }
+ break;
+ case LBU16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4);
+ offset = (offset == 0xf ? -1 : offset);
+
+ gen_ld(ctx, OPC_LBU, rd, rb, offset);
+ }
+ break;
+ case LHU16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1;
+
+ gen_ld(ctx, OPC_LHU, rd, rb, offset);
+ }
+ break;
+ case LWSP16:
+ {
+ int rd = (ctx->opcode >> 5) & 0x1f;
+ int rb = 29; /* SP */
+ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2;
+
+ gen_ld(ctx, OPC_LW, rd, rb, offset);
+ }
+ break;
+ case LW16:
+ {
+ int rd = mmreg(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2;
+
+ gen_ld(ctx, OPC_LW, rd, rb, offset);
+ }
+ break;
+ case SB16:
+ {
+ int rd = mmreg2(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4);
+
+ gen_st(ctx, OPC_SB, rd, rb, offset);
+ }
+ break;
+ case SH16:
+ {
+ int rd = mmreg2(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 1;
+
+ gen_st(ctx, OPC_SH, rd, rb, offset);
+ }
+ break;
+ case SWSP16:
+ {
+ int rd = (ctx->opcode >> 5) & 0x1f;
+ int rb = 29; /* SP */
+ int16_t offset = ZIMM(ctx->opcode, 0, 5) << 2;
+
+ gen_st(ctx, OPC_SW, rd, rb, offset);
+ }
+ break;
+ case SW16:
+ {
+ int rd = mmreg2(uMIPS_RD(ctx->opcode));
+ int rb = mmreg(uMIPS_RS(ctx->opcode));
+ int16_t offset = ZIMM(ctx->opcode, 0, 4) << 2;
+
+ gen_st(ctx, OPC_SW, rd, rb, offset);
+ }
+ break;
+ case MOVE16:
+ {
+ int rd = uMIPS_RD5(ctx->opcode);
+ int rs = uMIPS_RS5(ctx->opcode);
+
+ gen_arith(ctx, OPC_ADDU, rd, rs, 0);
+ }
+ break;
+ case ANDI16:
+ gen_andi16(ctx);
+ break;
+ case POOL16D:
+ switch (ctx->opcode & 0x1) {
+ case ADDIUS5:
+ gen_addius5(ctx);
+ break;
+ case ADDIUSP:
+ gen_addiusp(ctx);
+ break;
+ }
+ break;
+ case POOL16E:
+ switch (ctx->opcode & 0x1) {
+ case ADDIUR2:
+ gen_addiur2(ctx);
+ break;
+ case ADDIUR1SP:
+ gen_addiur1sp(ctx);
+ break;
+ }
+ break;
+ case B16: /* BC16 */
+ gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0,
+ sextract32(ctx->opcode, 0, 10) << 1,
+ (ctx->insn_flags & ISA_MIPS32R6) ? 0 : 4);
+ break;
+ case BNEZ16: /* BNEZC16 */
+ case BEQZ16: /* BEQZC16 */
+ gen_compute_branch(ctx, op == BNEZ16 ? OPC_BNE : OPC_BEQ, 2,
+ mmreg(uMIPS_RD(ctx->opcode)),
+ 0, sextract32(ctx->opcode, 0, 7) << 1,
+ (ctx->insn_flags & ISA_MIPS32R6) ? 0 : 4);
+
+ break;
+ case LI16:
+ {
+ int reg = mmreg(uMIPS_RD(ctx->opcode));
+ int imm = ZIMM(ctx->opcode, 0, 7);
+
+ imm = (imm == 0x7f ? -1 : imm);
+ tcg_gen_movi_tl(cpu_gpr[reg], imm);
+ }
+ break;
+ case RES_29:
+ case RES_31:
+ case RES_39:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ default:
+ decode_micromips32_opc(env, ctx);
+ return 4;
+ }
+
+ return 2;
+}
+
+/* SmartMIPS extension to MIPS32 */
+
+#if defined(TARGET_MIPS64)
+
+/* MDMX extension to MIPS64 */
+
+#endif
+
+/* MIPSDSP functions. */
+static void gen_mipsdsp_ld(DisasContext *ctx, uint32_t opc,
+ int rd, int base, int offset)
+{
+ TCGv t0;
+
+ check_dsp(ctx);
+ t0 = tcg_temp_new();
+
+ if (base == 0) {
+ gen_load_gpr(t0, offset);
+ } else if (offset == 0) {
+ gen_load_gpr(t0, base);
+ } else {
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[offset]);
+ }
+
+ switch (opc) {
+ case OPC_LBUX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_UB);
+ gen_store_gpr(t0, rd);
+ break;
+ case OPC_LHX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESW);
+ gen_store_gpr(t0, rd);
+ break;
+ case OPC_LWX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
+ gen_store_gpr(t0, rd);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_LDX:
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TEQ);
+ gen_store_gpr(t0, rd);
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_mipsdsp_arith(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2)
+{
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if (ret == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ /* OPC_MULT_G_2E is equal OPC_ADDUH_QB_DSP */
+ case OPC_MULT_G_2E:
+ check_dspr2(ctx);
+ switch (op2) {
+ case OPC_ADDUH_QB:
+ gen_helper_adduh_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDUH_R_QB:
+ gen_helper_adduh_r_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_PH:
+ gen_helper_addqh_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_R_PH:
+ gen_helper_addqh_r_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_W:
+ gen_helper_addqh_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQH_R_W:
+ gen_helper_addqh_r_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBUH_QB:
+ gen_helper_subuh_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBUH_R_QB:
+ gen_helper_subuh_r_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_PH:
+ gen_helper_subqh_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_R_PH:
+ gen_helper_subqh_r_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_W:
+ gen_helper_subqh_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBQH_R_W:
+ gen_helper_subqh_r_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ }
+ break;
+ case OPC_ABSQ_S_PH_DSP:
+ switch (op2) {
+ case OPC_ABSQ_S_QB:
+ check_dspr2(ctx);
+ gen_helper_absq_s_qb(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_absq_s_ph(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_W:
+ check_dsp(ctx);
+ gen_helper_absq_s_w(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_PRECEQ_W_PHL:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(cpu_gpr[ret], v2_t, 0xFFFF0000);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ case OPC_PRECEQ_W_PHR:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(cpu_gpr[ret], v2_t, 0x0000FFFF);
+ tcg_gen_shli_tl(cpu_gpr[ret], cpu_gpr[ret], 16);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ case OPC_PRECEQU_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_PH_QBLA:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_PH_QBRA:
+ check_dsp(ctx);
+ gen_helper_precequ_ph_qbra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBLA:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_PH_QBRA:
+ check_dsp(ctx);
+ gen_helper_preceu_ph_qbra(cpu_gpr[ret], v2_t);
+ break;
+ }
+ break;
+ case OPC_ADDU_QB_DSP:
+ switch (op2) {
+ case OPC_ADDQ_PH:
+ check_dsp(ctx);
+ gen_helper_addq_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_addq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_W:
+ check_dsp(ctx);
+ gen_helper_addq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_QB:
+ check_dsp(ctx);
+ gen_helper_addu_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_QB:
+ check_dsp(ctx);
+ gen_helper_addu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_PH:
+ check_dspr2(ctx);
+ gen_helper_addu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_PH:
+ check_dspr2(ctx);
+ gen_helper_addu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_PH:
+ check_dsp(ctx);
+ gen_helper_subq_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_PH:
+ check_dsp(ctx);
+ gen_helper_subq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_W:
+ check_dsp(ctx);
+ gen_helper_subq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_QB:
+ check_dsp(ctx);
+ gen_helper_subu_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_QB:
+ check_dsp(ctx);
+ gen_helper_subu_s_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_PH:
+ check_dspr2(ctx);
+ gen_helper_subu_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_PH:
+ check_dspr2(ctx);
+ gen_helper_subu_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDSC:
+ check_dsp(ctx);
+ gen_helper_addsc(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDWC:
+ check_dsp(ctx);
+ gen_helper_addwc(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MODSUB:
+ check_dsp(ctx);
+ gen_helper_modsub(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_RADDU_W_QB:
+ check_dsp(ctx);
+ gen_helper_raddu_w_qb(cpu_gpr[ret], v1_t);
+ break;
+ }
+ break;
+ case OPC_CMPU_EQ_QB_DSP:
+ switch (op2) {
+ case OPC_PRECR_QB_PH:
+ check_dspr2(ctx);
+ gen_helper_precr_qb_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_QB_PH:
+ check_dsp(ctx);
+ gen_helper_precrq_qb_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECR_SRA_PH_W:
+ check_dspr2(ctx);
+ {
+ TCGv_i32 sa_t = tcg_const_i32(v2);
+ gen_helper_precr_sra_ph_w(cpu_gpr[ret], sa_t, v1_t,
+ cpu_gpr[ret]);
+ tcg_temp_free_i32(sa_t);
+ break;
+ }
+ case OPC_PRECR_SRA_R_PH_W:
+ check_dspr2(ctx);
+ {
+ TCGv_i32 sa_t = tcg_const_i32(v2);
+ gen_helper_precr_sra_r_ph_w(cpu_gpr[ret], sa_t, v1_t,
+ cpu_gpr[ret]);
+ tcg_temp_free_i32(sa_t);
+ break;
+ }
+ case OPC_PRECRQ_PH_W:
+ check_dsp(ctx);
+ gen_helper_precrq_ph_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_RS_PH_W:
+ check_dsp(ctx);
+ gen_helper_precrq_rs_ph_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PRECRQU_S_QB_PH:
+ check_dsp(ctx);
+ gen_helper_precrqu_s_qb_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_ABSQ_S_QH_DSP:
+ switch (op2) {
+ case OPC_PRECEQ_L_PWL:
+ check_dsp(ctx);
+ tcg_gen_andi_tl(cpu_gpr[ret], v2_t, 0xFFFFFFFF00000000ull);
+ break;
+ case OPC_PRECEQ_L_PWR:
+ check_dsp(ctx);
+ tcg_gen_shli_tl(cpu_gpr[ret], v2_t, 32);
+ break;
+ case OPC_PRECEQ_PW_QHL:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQ_PW_QHR:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQ_PW_QHLA:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQ_PW_QHRA:
+ check_dsp(ctx);
+ gen_helper_preceq_pw_qhra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBL:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBR:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBLA:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEQU_QH_OBRA:
+ check_dsp(ctx);
+ gen_helper_precequ_qh_obra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBL:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obl(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBR:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obr(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBLA:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obla(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_PRECEU_QH_OBRA:
+ check_dsp(ctx);
+ gen_helper_preceu_qh_obra(cpu_gpr[ret], v2_t);
+ break;
+ case OPC_ABSQ_S_OB:
+ check_dspr2(ctx);
+ gen_helper_absq_s_ob(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_PW:
+ check_dsp(ctx);
+ gen_helper_absq_s_pw(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ case OPC_ABSQ_S_QH:
+ check_dsp(ctx);
+ gen_helper_absq_s_qh(cpu_gpr[ret], v2_t, cpu_env);
+ break;
+ }
+ break;
+ case OPC_ADDU_OB_DSP:
+ switch (op2) {
+ case OPC_RADDU_L_OB:
+ check_dsp(ctx);
+ gen_helper_raddu_l_ob(cpu_gpr[ret], v1_t);
+ break;
+ case OPC_SUBQ_PW:
+ check_dsp(ctx);
+ gen_helper_subq_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_PW:
+ check_dsp(ctx);
+ gen_helper_subq_s_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_QH:
+ check_dsp(ctx);
+ gen_helper_subq_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBQ_S_QH:
+ check_dsp(ctx);
+ gen_helper_subq_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_OB:
+ check_dsp(ctx);
+ gen_helper_subu_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_OB:
+ check_dsp(ctx);
+ gen_helper_subu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_QH:
+ check_dspr2(ctx);
+ gen_helper_subu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBU_S_QH:
+ check_dspr2(ctx);
+ gen_helper_subu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SUBUH_OB:
+ check_dspr2(ctx);
+ gen_helper_subuh_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SUBUH_R_OB:
+ check_dspr2(ctx);
+ gen_helper_subuh_r_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDQ_PW:
+ check_dsp(ctx);
+ gen_helper_addq_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_PW:
+ check_dsp(ctx);
+ gen_helper_addq_s_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_QH:
+ check_dsp(ctx);
+ gen_helper_addq_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDQ_S_QH:
+ check_dsp(ctx);
+ gen_helper_addq_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_OB:
+ check_dsp(ctx);
+ gen_helper_addu_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_OB:
+ check_dsp(ctx);
+ gen_helper_addu_s_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_QH:
+ check_dspr2(ctx);
+ gen_helper_addu_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDU_S_QH:
+ check_dspr2(ctx);
+ gen_helper_addu_s_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_ADDUH_OB:
+ check_dspr2(ctx);
+ gen_helper_adduh_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_ADDUH_R_OB:
+ check_dspr2(ctx);
+ gen_helper_adduh_r_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ }
+ break;
+ case OPC_CMPU_EQ_OB_DSP:
+ switch (op2) {
+ case OPC_PRECR_OB_QH:
+ check_dspr2(ctx);
+ gen_helper_precr_ob_qh(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECR_SRA_QH_PW:
+ check_dspr2(ctx);
+ {
+ TCGv_i32 ret_t = tcg_const_i32(ret);
+ gen_helper_precr_sra_qh_pw(v2_t, v1_t, v2_t, ret_t);
+ tcg_temp_free_i32(ret_t);
+ break;
+ }
+ case OPC_PRECR_SRA_R_QH_PW:
+ check_dspr2(ctx);
+ {
+ TCGv_i32 sa_v = tcg_const_i32(ret);
+ gen_helper_precr_sra_r_qh_pw(v2_t, v1_t, v2_t, sa_v);
+ tcg_temp_free_i32(sa_v);
+ break;
+ }
+ case OPC_PRECRQ_OB_QH:
+ check_dsp(ctx);
+ gen_helper_precrq_ob_qh(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_PW_L:
+ check_dsp(ctx);
+ gen_helper_precrq_pw_l(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_QH_PW:
+ check_dsp(ctx);
+ gen_helper_precrq_qh_pw(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PRECRQ_RS_QH_PW:
+ check_dsp(ctx);
+ gen_helper_precrq_rs_qh_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PRECRQU_S_OB_QH:
+ check_dsp(ctx);
+ gen_helper_precrqu_s_ob_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_shift(DisasContext *ctx, uint32_t opc,
+ int ret, int v1, int v2)
+{
+ uint32_t op2;
+ TCGv t0;
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if (ret == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ tcg_gen_movi_tl(t0, v1);
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (opc) {
+ case OPC_SHLL_QB_DSP:
+ {
+ op2 = MASK_SHLL_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_SHLL_QB:
+ check_dsp(ctx);
+ gen_helper_shll_qb(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_QB:
+ check_dsp(ctx);
+ gen_helper_shll_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHLL_PH:
+ check_dsp(ctx);
+ gen_helper_shll_ph(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_PH:
+ check_dsp(ctx);
+ gen_helper_shll_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHLL_S_PH:
+ check_dsp(ctx);
+ gen_helper_shll_s_ph(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_S_PH:
+ check_dsp(ctx);
+ gen_helper_shll_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHLL_S_W:
+ check_dsp(ctx);
+ gen_helper_shll_s_w(cpu_gpr[ret], t0, v2_t, cpu_env);
+ break;
+ case OPC_SHLLV_S_W:
+ check_dsp(ctx);
+ gen_helper_shll_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_SHRL_QB:
+ check_dsp(ctx);
+ gen_helper_shrl_qb(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRLV_QB:
+ check_dsp(ctx);
+ gen_helper_shrl_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRL_PH:
+ check_dspr2(ctx);
+ gen_helper_shrl_ph(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRLV_PH:
+ check_dspr2(ctx);
+ gen_helper_shrl_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRA_QB:
+ check_dspr2(ctx);
+ gen_helper_shra_qb(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRA_R_QB:
+ check_dspr2(ctx);
+ gen_helper_shra_r_qb(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRAV_QB:
+ check_dspr2(ctx);
+ gen_helper_shra_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRAV_R_QB:
+ check_dspr2(ctx);
+ gen_helper_shra_r_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRA_PH:
+ check_dsp(ctx);
+ gen_helper_shra_ph(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRA_R_PH:
+ check_dsp(ctx);
+ gen_helper_shra_r_ph(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRAV_PH:
+ check_dsp(ctx);
+ gen_helper_shra_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRAV_R_PH:
+ check_dsp(ctx);
+ gen_helper_shra_r_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_SHRA_R_W:
+ check_dsp(ctx);
+ gen_helper_shra_r_w(cpu_gpr[ret], t0, v2_t);
+ break;
+ case OPC_SHRAV_R_W:
+ check_dsp(ctx);
+ gen_helper_shra_r_w(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK SHLL.QB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ }
+#ifdef TARGET_MIPS64
+ case OPC_SHLL_OB_DSP:
+ op2 = MASK_SHLL_OB(ctx->opcode);
+ switch (op2) {
+ case OPC_SHLL_PW:
+ check_dsp(ctx);
+ gen_helper_shll_pw(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_PW:
+ check_dsp(ctx);
+ gen_helper_shll_pw(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_S_PW:
+ check_dsp(ctx);
+ gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_S_PW:
+ check_dsp(ctx);
+ gen_helper_shll_s_pw(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_OB:
+ check_dsp(ctx);
+ gen_helper_shll_ob(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_OB:
+ check_dsp(ctx);
+ gen_helper_shll_ob(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_QH:
+ check_dsp(ctx);
+ gen_helper_shll_qh(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_QH:
+ check_dsp(ctx);
+ gen_helper_shll_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHLL_S_QH:
+ check_dsp(ctx);
+ gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, t0, cpu_env);
+ break;
+ case OPC_SHLLV_S_QH:
+ check_dsp(ctx);
+ gen_helper_shll_s_qh(cpu_gpr[ret], v2_t, v1_t, cpu_env);
+ break;
+ case OPC_SHRA_OB:
+ check_dspr2(ctx);
+ gen_helper_shra_ob(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_OB:
+ check_dspr2(ctx);
+ gen_helper_shra_ob(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_R_OB:
+ check_dspr2(ctx);
+ gen_helper_shra_r_ob(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_R_OB:
+ check_dspr2(ctx);
+ gen_helper_shra_r_ob(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_PW:
+ check_dsp(ctx);
+ gen_helper_shra_pw(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_PW:
+ check_dsp(ctx);
+ gen_helper_shra_pw(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_R_PW:
+ check_dsp(ctx);
+ gen_helper_shra_r_pw(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_R_PW:
+ check_dsp(ctx);
+ gen_helper_shra_r_pw(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_QH:
+ check_dsp(ctx);
+ gen_helper_shra_qh(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_QH:
+ check_dsp(ctx);
+ gen_helper_shra_qh(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRA_R_QH:
+ check_dsp(ctx);
+ gen_helper_shra_r_qh(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRAV_R_QH:
+ check_dsp(ctx);
+ gen_helper_shra_r_qh(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRL_OB:
+ check_dsp(ctx);
+ gen_helper_shrl_ob(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRLV_OB:
+ check_dsp(ctx);
+ gen_helper_shrl_ob(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ case OPC_SHRL_QH:
+ check_dspr2(ctx);
+ gen_helper_shrl_qh(cpu_gpr[ret], v2_t, t0);
+ break;
+ case OPC_SHRLV_QH:
+ check_dspr2(ctx);
+ gen_helper_shrl_qh(cpu_gpr[ret], v2_t, v1_t);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK SHLL.OB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_multiply(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2, int check_ret)
+{
+ TCGv_i32 t0;
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if ((ret == 0) && (check_ret == 1)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new_i32();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ tcg_gen_movi_i32(t0, ret);
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
+ * the same mask and op1. */
+ case OPC_MULT_G_2E:
+ check_dspr2(ctx);
+ switch (op2) {
+ case OPC_MUL_PH:
+ gen_helper_mul_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MUL_S_PH:
+ gen_helper_mul_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_S_W:
+ gen_helper_mulq_s_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_RS_W:
+ gen_helper_mulq_rs_w(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+ case OPC_DPA_W_PH_DSP:
+ switch (op2) {
+ case OPC_DPAU_H_QBL:
+ check_dsp(ctx);
+ gen_helper_dpau_h_qbl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAU_H_QBR:
+ check_dsp(ctx);
+ gen_helper_dpau_h_qbr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSU_H_QBL:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_qbl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSU_H_QBR:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_qbr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPA_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAX_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpax_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_dpaq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQX_S_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpaqx_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQX_SA_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpaqx_sa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPS_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dps_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSX_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpsx_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_dpsq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQX_S_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpsqx_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQX_SA_W_PH:
+ check_dspr2(ctx);
+ gen_helper_dpsqx_sa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULSAQ_S_W_PH:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPAQ_SA_L_W:
+ check_dsp(ctx);
+ gen_helper_dpaq_sa_l_w(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_DPSQ_SA_L_W:
+ check_dsp(ctx);
+ gen_helper_dpsq_sa_l_w(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_S_W_PHL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_phl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_S_W_PHR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_phr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_PHL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_phl(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_PHR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_phr(t0, v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULSA_W_PH:
+ check_dspr2(ctx);
+ gen_helper_mulsa_w_ph(t0, v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_DPAQ_W_QH_DSP:
+ {
+ int ac = ret & 0x03;
+ tcg_gen_movi_i32(t0, ac);
+
+ switch (op2) {
+ case OPC_DMADD:
+ check_dsp(ctx);
+ gen_helper_dmadd(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DMADDU:
+ check_dsp(ctx);
+ gen_helper_dmaddu(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DMSUB:
+ check_dsp(ctx);
+ gen_helper_dmsub(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DMSUBU:
+ check_dsp(ctx);
+ gen_helper_dmsubu(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPA_W_QH:
+ check_dspr2(ctx);
+ gen_helper_dpa_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAQ_S_W_QH:
+ check_dsp(ctx);
+ gen_helper_dpaq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAQ_SA_L_PW:
+ check_dsp(ctx);
+ gen_helper_dpaq_sa_l_pw(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAU_H_OBL:
+ check_dsp(ctx);
+ gen_helper_dpau_h_obl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPAU_H_OBR:
+ check_dsp(ctx);
+ gen_helper_dpau_h_obr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPS_W_QH:
+ check_dspr2(ctx);
+ gen_helper_dps_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSQ_S_W_QH:
+ check_dsp(ctx);
+ gen_helper_dpsq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSQ_SA_L_PW:
+ check_dsp(ctx);
+ gen_helper_dpsq_sa_l_pw(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSU_H_OBL:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_obl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_DPSU_H_OBR:
+ check_dsp(ctx);
+ gen_helper_dpsu_h_obr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_L_PWL:
+ check_dsp(ctx);
+ gen_helper_maq_s_l_pwl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_L_PWR:
+ check_dsp(ctx);
+ gen_helper_maq_s_l_pwr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHLL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhll(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHLL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhll(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHLR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhlr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHLR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhlr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHRL:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhrl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHRL:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhrl(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_S_W_QHRR:
+ check_dsp(ctx);
+ gen_helper_maq_s_w_qhrr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MAQ_SA_W_QHRR:
+ check_dsp(ctx);
+ gen_helper_maq_sa_w_qhrr(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MULSAQ_S_L_PW:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_l_pw(v1_t, v2_t, t0, cpu_env);
+ break;
+ case OPC_MULSAQ_S_W_QH:
+ check_dsp(ctx);
+ gen_helper_mulsaq_s_w_qh(v1_t, v2_t, t0, cpu_env);
+ break;
+ }
+ }
+ break;
+#endif
+ case OPC_ADDU_QB_DSP:
+ switch (op2) {
+ case OPC_MULEU_S_PH_QBL:
+ check_dsp(ctx);
+ gen_helper_muleu_s_ph_qbl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEU_S_PH_QBR:
+ check_dsp(ctx);
+ gen_helper_muleu_s_ph_qbr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_RS_PH:
+ check_dsp(ctx);
+ gen_helper_mulq_rs_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEQ_S_W_PHL:
+ check_dsp(ctx);
+ gen_helper_muleq_s_w_phl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEQ_S_W_PHR:
+ check_dsp(ctx);
+ gen_helper_muleq_s_w_phr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_S_PH:
+ check_dspr2(ctx);
+ gen_helper_mulq_s_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_ADDU_OB_DSP:
+ switch (op2) {
+ case OPC_MULEQ_S_PW_QHL:
+ check_dsp(ctx);
+ gen_helper_muleq_s_pw_qhl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEQ_S_PW_QHR:
+ check_dsp(ctx);
+ gen_helper_muleq_s_pw_qhr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEU_S_QH_OBL:
+ check_dsp(ctx);
+ gen_helper_muleu_s_qh_obl(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULEU_S_QH_OBR:
+ check_dsp(ctx);
+ gen_helper_muleu_s_qh_obr(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_MULQ_RS_QH:
+ check_dsp(ctx);
+ gen_helper_mulq_rs_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_bitinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int val)
+{
+ int16_t imm;
+ TCGv t0;
+ TCGv val_t;
+
+ if (ret == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ val_t = tcg_temp_new();
+ gen_load_gpr(val_t, val);
+
+ switch (op1) {
+ case OPC_ABSQ_S_PH_DSP:
+ switch (op2) {
+ case OPC_BITREV:
+ check_dsp(ctx);
+ gen_helper_bitrev(cpu_gpr[ret], val_t);
+ break;
+ case OPC_REPL_QB:
+ check_dsp(ctx);
+ {
+ target_long result;
+ imm = (ctx->opcode >> 16) & 0xFF;
+ result = (uint32_t)imm << 24 |
+ (uint32_t)imm << 16 |
+ (uint32_t)imm << 8 |
+ (uint32_t)imm;
+ result = (int32_t)result;
+ tcg_gen_movi_tl(cpu_gpr[ret], result);
+ }
+ break;
+ case OPC_REPLV_QB:
+ check_dsp(ctx);
+ tcg_gen_ext8u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 8);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ case OPC_REPL_PH:
+ check_dsp(ctx);
+ {
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ imm = (int16_t)(imm << 6) >> 6;
+ tcg_gen_movi_tl(cpu_gpr[ret], \
+ (target_long)((int32_t)imm << 16 | \
+ (uint16_t)imm));
+ }
+ break;
+ case OPC_REPLV_PH:
+ check_dsp(ctx);
+ tcg_gen_ext16u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_ext32s_tl(cpu_gpr[ret], cpu_gpr[ret]);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_ABSQ_S_QH_DSP:
+ switch (op2) {
+ case OPC_REPL_OB:
+ check_dsp(ctx);
+ {
+ target_long temp;
+
+ imm = (ctx->opcode >> 16) & 0xFF;
+ temp = ((uint64_t)imm << 8) | (uint64_t)imm;
+ temp = (temp << 16) | temp;
+ temp = (temp << 32) | temp;
+ tcg_gen_movi_tl(cpu_gpr[ret], temp);
+ break;
+ }
+ case OPC_REPL_PW:
+ check_dsp(ctx);
+ {
+ target_long temp;
+
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ imm = (int16_t)(imm << 6) >> 6;
+ temp = ((target_long)imm << 32) \
+ | ((target_long)imm & 0xFFFFFFFF);
+ tcg_gen_movi_tl(cpu_gpr[ret], temp);
+ break;
+ }
+ case OPC_REPL_QH:
+ check_dsp(ctx);
+ {
+ target_long temp;
+
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ imm = (int16_t)(imm << 6) >> 6;
+
+ temp = ((uint64_t)(uint16_t)imm << 48) |
+ ((uint64_t)(uint16_t)imm << 32) |
+ ((uint64_t)(uint16_t)imm << 16) |
+ (uint64_t)(uint16_t)imm;
+ tcg_gen_movi_tl(cpu_gpr[ret], temp);
+ break;
+ }
+ case OPC_REPLV_OB:
+ check_dsp(ctx);
+ tcg_gen_ext8u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 8);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 32);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ break;
+ case OPC_REPLV_PW:
+ check_dsp(ctx);
+ tcg_gen_ext32u_i64(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 32);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ break;
+ case OPC_REPLV_QH:
+ check_dsp(ctx);
+ tcg_gen_ext16u_tl(cpu_gpr[ret], val_t);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 16);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ tcg_gen_shli_tl(t0, cpu_gpr[ret], 32);
+ tcg_gen_or_tl(cpu_gpr[ret], cpu_gpr[ret], t0);
+ break;
+ }
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(val_t);
+}
+
+static void gen_mipsdsp_add_cmp_pick(DisasContext *ctx,
+ uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2, int check_ret)
+{
+ TCGv t1;
+ TCGv v1_t;
+ TCGv v2_t;
+
+ if ((ret == 0) && (check_ret == 1)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t1 = tcg_temp_new();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ case OPC_CMPU_EQ_QB_DSP:
+ switch (op2) {
+ case OPC_CMPU_EQ_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_eq_qb(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LT_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_lt_qb(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LE_QB:
+ check_dsp(ctx);
+ gen_helper_cmpu_le_qb(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGU_EQ_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_eq_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LT_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_lt_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LE_QB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_le_qb(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGDU_EQ_QB:
+ check_dspr2(ctx);
+ gen_helper_cmpgu_eq_qb(t1, v1_t, v2_t);
+ tcg_gen_mov_tl(cpu_gpr[ret], t1);
+ tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF);
+ tcg_gen_shli_tl(t1, t1, 24);
+ tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1);
+ break;
+ case OPC_CMPGDU_LT_QB:
+ check_dspr2(ctx);
+ gen_helper_cmpgu_lt_qb(t1, v1_t, v2_t);
+ tcg_gen_mov_tl(cpu_gpr[ret], t1);
+ tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF);
+ tcg_gen_shli_tl(t1, t1, 24);
+ tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1);
+ break;
+ case OPC_CMPGDU_LE_QB:
+ check_dspr2(ctx);
+ gen_helper_cmpgu_le_qb(t1, v1_t, v2_t);
+ tcg_gen_mov_tl(cpu_gpr[ret], t1);
+ tcg_gen_andi_tl(cpu_dspctrl, cpu_dspctrl, 0xF0FFFFFF);
+ tcg_gen_shli_tl(t1, t1, 24);
+ tcg_gen_or_tl(cpu_dspctrl, cpu_dspctrl, t1);
+ break;
+ case OPC_CMP_EQ_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_ph(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LT_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_ph(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LE_PH:
+ check_dsp(ctx);
+ gen_helper_cmp_le_ph(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_QB:
+ check_dsp(ctx);
+ gen_helper_pick_qb(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_PH:
+ check_dsp(ctx);
+ gen_helper_pick_ph(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PACKRL_PH:
+ check_dsp(ctx);
+ gen_helper_packrl_ph(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_CMPU_EQ_OB_DSP:
+ switch (op2) {
+ case OPC_CMP_EQ_PW:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_pw(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LT_PW:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_pw(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LE_PW:
+ check_dsp(ctx);
+ gen_helper_cmp_le_pw(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_EQ_QH:
+ check_dsp(ctx);
+ gen_helper_cmp_eq_qh(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LT_QH:
+ check_dsp(ctx);
+ gen_helper_cmp_lt_qh(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMP_LE_QH:
+ check_dsp(ctx);
+ gen_helper_cmp_le_qh(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGDU_EQ_OB:
+ check_dspr2(ctx);
+ gen_helper_cmpgdu_eq_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGDU_LT_OB:
+ check_dspr2(ctx);
+ gen_helper_cmpgdu_lt_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGDU_LE_OB:
+ check_dspr2(ctx);
+ gen_helper_cmpgdu_le_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPGU_EQ_OB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_eq_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LT_OB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_lt_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPGU_LE_OB:
+ check_dsp(ctx);
+ gen_helper_cmpgu_le_ob(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_CMPU_EQ_OB:
+ check_dsp(ctx);
+ gen_helper_cmpu_eq_ob(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LT_OB:
+ check_dsp(ctx);
+ gen_helper_cmpu_lt_ob(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_CMPU_LE_OB:
+ check_dsp(ctx);
+ gen_helper_cmpu_le_ob(v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PACKRL_PW:
+ check_dsp(ctx);
+ gen_helper_packrl_pw(cpu_gpr[ret], v1_t, v2_t);
+ break;
+ case OPC_PICK_OB:
+ check_dsp(ctx);
+ gen_helper_pick_ob(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_PW:
+ check_dsp(ctx);
+ gen_helper_pick_pw(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ case OPC_PICK_QH:
+ check_dsp(ctx);
+ gen_helper_pick_qh(cpu_gpr[ret], v1_t, v2_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t1);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+static void gen_mipsdsp_append(CPUMIPSState *env, DisasContext *ctx,
+ uint32_t op1, int rt, int rs, int sa)
+{
+ TCGv t0;
+
+ check_dspr2(ctx);
+
+ if (rt == 0) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+
+ switch (op1) {
+ case OPC_APPEND_DSP:
+ switch (MASK_APPEND(ctx->opcode)) {
+ case OPC_APPEND:
+ if (sa != 0) {
+ tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], sa, 32 - sa);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ case OPC_PREPEND:
+ if (sa != 0) {
+ tcg_gen_ext32u_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ tcg_gen_shri_tl(cpu_gpr[rt], cpu_gpr[rt], sa);
+ tcg_gen_shli_tl(t0, t0, 32 - sa);
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ case OPC_BALIGN:
+ sa &= 3;
+ if (sa != 0 && sa != 2) {
+ tcg_gen_shli_tl(cpu_gpr[rt], cpu_gpr[rt], 8 * sa);
+ tcg_gen_ext32u_tl(t0, t0);
+ tcg_gen_shri_tl(t0, t0, 8 * (4 - sa));
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK APPEND");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_DAPPEND_DSP:
+ switch (MASK_DAPPEND(ctx->opcode)) {
+ case OPC_DAPPEND:
+ if (sa != 0) {
+ tcg_gen_deposit_tl(cpu_gpr[rt], t0, cpu_gpr[rt], sa, 64 - sa);
+ }
+ break;
+ case OPC_PREPENDD:
+ tcg_gen_shri_tl(cpu_gpr[rt], cpu_gpr[rt], 0x20 | sa);
+ tcg_gen_shli_tl(t0, t0, 64 - (0x20 | sa));
+ tcg_gen_or_tl(cpu_gpr[rt], t0, t0);
+ break;
+ case OPC_PREPENDW:
+ if (sa != 0) {
+ tcg_gen_shri_tl(cpu_gpr[rt], cpu_gpr[rt], sa);
+ tcg_gen_shli_tl(t0, t0, 64 - sa);
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ break;
+ case OPC_DBALIGN:
+ sa &= 7;
+ if (sa != 0 && sa != 2 && sa != 4) {
+ tcg_gen_shli_tl(cpu_gpr[rt], cpu_gpr[rt], 8 * sa);
+ tcg_gen_shri_tl(t0, t0, 8 * (8 - sa));
+ tcg_gen_or_tl(cpu_gpr[rt], cpu_gpr[rt], t0);
+ }
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK DAPPEND");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+#endif
+ }
+ tcg_temp_free(t0);
+}
+
+static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2,
+ int ret, int v1, int v2, int check_ret)
+
+{
+ TCGv t0;
+ TCGv t1;
+ TCGv v1_t;
+ TCGv v2_t;
+ int16_t imm;
+
+ if ((ret == 0) && (check_ret == 1)) {
+ /* Treat as NOP. */
+ return;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ v1_t = tcg_temp_new();
+ v2_t = tcg_temp_new();
+
+ gen_load_gpr(v1_t, v1);
+ gen_load_gpr(v2_t, v2);
+
+ switch (op1) {
+ case OPC_EXTR_W_DSP:
+ check_dsp(ctx);
+ switch (op2) {
+ case OPC_EXTR_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTR_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_r_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTR_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_rs_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTR_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTRV_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTRV_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTRV_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_r_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTRV_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extr_rs_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_EXTPDP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_extpdp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_EXTPDPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_extpdp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_SHILO:
+ imm = (ctx->opcode >> 20) & 0x3F;
+ tcg_gen_movi_tl(t0, ret);
+ tcg_gen_movi_tl(t1, imm);
+ gen_helper_shilo(t0, t1, cpu_env);
+ break;
+ case OPC_SHILOV:
+ tcg_gen_movi_tl(t0, ret);
+ gen_helper_shilo(t0, v1_t, cpu_env);
+ break;
+ case OPC_MTHLIP:
+ tcg_gen_movi_tl(t0, ret);
+ gen_helper_mthlip(t0, v1_t, cpu_env);
+ break;
+ case OPC_WRDSP:
+ imm = (ctx->opcode >> 11) & 0x3FF;
+ tcg_gen_movi_tl(t0, imm);
+ gen_helper_wrdsp(v1_t, t0, cpu_env);
+ break;
+ case OPC_RDDSP:
+ imm = (ctx->opcode >> 16) & 0x03FF;
+ tcg_gen_movi_tl(t0, imm);
+ gen_helper_rddsp(cpu_gpr[ret], t0, cpu_env);
+ break;
+ }
+ break;
+#ifdef TARGET_MIPS64
+ case OPC_DEXTR_W_DSP:
+ check_dsp(ctx);
+ switch (op2) {
+ case OPC_DMTHLIP:
+ tcg_gen_movi_tl(t0, ret);
+ gen_helper_dmthlip(v1_t, t0, cpu_env);
+ break;
+ case OPC_DSHILO:
+ {
+ int shift = (ctx->opcode >> 19) & 0x7F;
+ int ac = (ctx->opcode >> 11) & 0x03;
+ tcg_gen_movi_tl(t0, shift);
+ tcg_gen_movi_tl(t1, ac);
+ gen_helper_dshilo(t0, t1, cpu_env);
+ break;
+ }
+ case OPC_DSHILOV:
+ {
+ int ac = (ctx->opcode >> 11) & 0x03;
+ tcg_gen_movi_tl(t0, ac);
+ gen_helper_dshilo(v1_t, t0, cpu_env);
+ break;
+ }
+ case OPC_DEXTP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+
+ gen_helper_dextp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTPDP:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextpdp(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTPDPV:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextpdp(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTR_L:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_l(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_R_L:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_r_l(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_RS_L:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_rs_l(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_r_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_rs_w(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTR_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTRV_S_H:
+ tcg_gen_movi_tl(t0, v2);
+ tcg_gen_movi_tl(t1, v1);
+ gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, cpu_env);
+ break;
+ case OPC_DEXTRV_L:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_R_L:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_r_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_RS_L:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_rs_l(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_R_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_r_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ case OPC_DEXTRV_RS_W:
+ tcg_gen_movi_tl(t0, v2);
+ gen_helper_dextr_rs_w(cpu_gpr[ret], t0, v1_t, cpu_env);
+ break;
+ }
+ break;
+#endif
+ }
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(v1_t);
+ tcg_temp_free(v2_t);
+}
+
+/* End MIPSDSP functions. */
+
+static void decode_opc_special_r6(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_LSA:
+ gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2));
+ break;
+ case OPC_MULT ... OPC_DIVU:
+ op2 = MASK_R6_MULDIV(ctx->opcode);
+ switch (op2) {
+ case R6_OPC_MUL:
+ case R6_OPC_MUH:
+ case R6_OPC_MULU:
+ case R6_OPC_MUHU:
+ case R6_OPC_DIV:
+ case R6_OPC_MOD:
+ case R6_OPC_DIVU:
+ case R6_OPC_MODU:
+ gen_r6_muldiv(ctx, op2, rd, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("special_r6 muldiv");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_SELEQZ:
+ case OPC_SELNEZ:
+ gen_cond_move(ctx, op1, rd, rs, rt);
+ break;
+ case R6_OPC_CLO:
+ case R6_OPC_CLZ:
+ if (rt == 0 && sa == 1) {
+ /* Major opcode and function field is shared with preR6 MFHI/MTHI.
+ We need additionally to check other fields */
+ gen_cl(ctx, op1, rd, rs);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ break;
+ case R6_OPC_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 6, 20))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ if (ctx->hflags & MIPS_HFLAG_SBRI) {
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DLSA:
+ check_mips_64(ctx);
+ gen_lsa(ctx, op1, rd, rs, rt, extract32(ctx->opcode, 6, 2));
+ break;
+ case R6_OPC_DCLO:
+ case R6_OPC_DCLZ:
+ if (rt == 0 && sa == 1) {
+ /* Major opcode and function field is shared with preR6 MFHI/MTHI.
+ We need additionally to check other fields */
+ check_mips_64(ctx);
+ gen_cl(ctx, op1, rd, rs);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ break;
+ case OPC_DMULT ... OPC_DDIVU:
+ op2 = MASK_R6_MULDIV(ctx->opcode);
+ switch (op2) {
+ case R6_OPC_DMUL:
+ case R6_OPC_DMUH:
+ case R6_OPC_DMULU:
+ case R6_OPC_DMUHU:
+ case R6_OPC_DDIV:
+ case R6_OPC_DMOD:
+ case R6_OPC_DDIVU:
+ case R6_OPC_DMODU:
+ check_mips_64(ctx);
+ gen_r6_muldiv(ctx, op2, rd, rs, rt);
+ break;
+ default:
+ MIPS_INVAL("special_r6 muldiv");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special_r6");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_opc_special_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_MOVN: /* Conditional move */
+ case OPC_MOVZ:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32 |
+ INSN_LOONGSON2E | INSN_LOONGSON2F);
+ gen_cond_move(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_MFHI: /* Move from HI/LO */
+ case OPC_MFLO:
+ gen_HILO(ctx, op1, rs & 3, rd);
+ break;
+ case OPC_MTHI:
+ case OPC_MTLO: /* Move to HI/LO */
+ gen_HILO(ctx, op1, rd & 3, rs);
+ break;
+ case OPC_MOVCI:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32);
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ gen_movci(ctx, rd, rs, (ctx->opcode >> 18) & 0x7,
+ (ctx->opcode >> 16) & 1);
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+ case OPC_MULT:
+ case OPC_MULTU:
+ if (sa) {
+ check_insn(ctx, INSN_VR54XX);
+ op1 = MASK_MUL_VR54XX(ctx->opcode);
+ gen_mul_vr54xx(ctx, op1, rd, rs, rt);
+ } else {
+ gen_muldiv(ctx, op1, rd & 3, rs, rt);
+ }
+ break;
+ case OPC_DIV:
+ case OPC_DIVU:
+ gen_muldiv(ctx, op1, 0, rs, rt);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMULT ... OPC_DDIVU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_muldiv(ctx, op1, 0, rs, rt);
+ break;
+#endif
+ case OPC_JR:
+ gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4);
+ break;
+ case OPC_SPIM:
+#ifdef MIPS_STRICT_STANDARD
+ MIPS_INVAL("SPIM");
+ generate_exception_end(ctx, EXCP_RI);
+#else
+ /* Implemented as RI exception for now. */
+ MIPS_INVAL("spim (unofficial)");
+ generate_exception_end(ctx, EXCP_RI);
+#endif
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("special_legacy");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_opc_special(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL(ctx->opcode);
+ switch (op1) {
+ case OPC_SLL: /* Shift with immediate */
+ if (sa == 5 && rd == 0 &&
+ rs == 0 && rt == 0) { /* PAUSE */
+ if ((ctx->insn_flags & ISA_MIPS32R6) &&
+ (ctx->hflags & MIPS_HFLAG_BMASK)) {
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ }
+ /* Fallthrough */
+ case OPC_SRA:
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ case OPC_SRL:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* rotr is decoded as srl on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_ROTR;
+ }
+ /* Fallthrough */
+ case 0:
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_ADD ... OPC_SUBU:
+ gen_arith(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_SLLV: /* Shifts */
+ case OPC_SRAV:
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_SRLV:
+ switch ((ctx->opcode >> 6) & 0x1f) {
+ case 1:
+ /* rotrv is decoded as srlv on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_ROTRV;
+ }
+ /* Fallthrough */
+ case 0:
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_SLT: /* Set on less than */
+ case OPC_SLTU:
+ gen_slt(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_AND: /* Logic*/
+ case OPC_OR:
+ case OPC_NOR:
+ case OPC_XOR:
+ gen_logic(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_JALR:
+ gen_compute_branch(ctx, op1, 4, rs, rd, sa, 4);
+ break;
+ case OPC_TGE ... OPC_TEQ: /* Traps */
+ case OPC_TNE:
+ check_insn(ctx, ISA_MIPS2);
+ gen_trap(ctx, op1, rs, rt, -1);
+ break;
+ case OPC_LSA: /* OPC_PMON */
+ if ((ctx->insn_flags & ISA_MIPS32R6) ||
+ (env->CP0_Config3 & (1 << CP0C3_MSAP))) {
+ decode_opc_special_r6(env, ctx);
+ } else {
+ /* Pmon entry point, also R4010 selsl */
+#ifdef MIPS_STRICT_STANDARD
+ MIPS_INVAL("PMON / selsl");
+ generate_exception_end(ctx, EXCP_RI);
+#else
+ gen_helper_0e0i(pmon, sa);
+#endif
+ }
+ break;
+ case OPC_SYSCALL:
+ generate_exception_end(ctx, EXCP_SYSCALL);
+ break;
+ case OPC_BREAK:
+ generate_exception_end(ctx, EXCP_BREAK);
+ break;
+ case OPC_SYNC:
+ check_insn(ctx, ISA_MIPS2);
+ gen_sync(extract32(ctx->opcode, 6, 5));
+ break;
+
+#if defined(TARGET_MIPS64)
+ /* MIPS64 specific opcodes */
+ case OPC_DSLL:
+ case OPC_DSRA:
+ case OPC_DSLL32:
+ case OPC_DSRA32:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ case OPC_DSRL:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* drotr is decoded as dsrl on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_DROTR;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DSRL32:
+ switch ((ctx->opcode >> 21) & 0x1f) {
+ case 1:
+ /* drotr32 is decoded as dsrl32 on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_DROTR32;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift_imm(ctx, op1, rd, rt, sa);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DADD ... OPC_DSUBU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DSLLV:
+ case OPC_DSRAV:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DSRLV:
+ switch ((ctx->opcode >> 6) & 0x1f) {
+ case 1:
+ /* drotrv is decoded as dsrlv on non-R2 CPUs */
+ if (ctx->insn_flags & ISA_MIPS32R2) {
+ op1 = OPC_DROTRV;
+ }
+ /* Fallthrough */
+ case 0:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_shift(ctx, op1, rd, rs, rt);
+ break;
+ default:
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DLSA:
+ if ((ctx->insn_flags & ISA_MIPS32R6) ||
+ (env->CP0_Config3 & (1 << CP0C3_MSAP))) {
+ decode_opc_special_r6(env, ctx);
+ }
+ break;
+#endif
+ default:
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ decode_opc_special_r6(env, ctx);
+ } else {
+ decode_opc_special_legacy(env, ctx);
+ }
+ }
+}
+
+static void decode_opc_special2_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd;
+ uint32_t op1;
+
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+
+ op1 = MASK_SPECIAL2(ctx->opcode);
+ switch (op1) {
+ case OPC_MADD ... OPC_MADDU: /* Multiply and add/sub */
+ case OPC_MSUB ... OPC_MSUBU:
+ check_insn(ctx, ISA_MIPS32);
+ gen_muldiv(ctx, op1, rd & 3, rs, rt);
+ break;
+ case OPC_MUL:
+ gen_arith(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DIV_G_2F:
+ case OPC_DIVU_G_2F:
+ case OPC_MULT_G_2F:
+ case OPC_MULTU_G_2F:
+ case OPC_MOD_G_2F:
+ case OPC_MODU_G_2F:
+ check_insn(ctx, INSN_LOONGSON2F);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_CLO:
+ case OPC_CLZ:
+ check_insn(ctx, ISA_MIPS32);
+ gen_cl(ctx, op1, rd, rs);
+ break;
+ case OPC_SDBBP:
+ if (is_uhi(extract32(ctx->opcode, 6, 20))) {
+ gen_helper_do_semihosting(cpu_env);
+ } else {
+ /* XXX: not clear which exception should be raised
+ * when in debug mode...
+ */
+ check_insn(ctx, ISA_MIPS32);
+ generate_exception_end(ctx, EXCP_DBp);
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DCLO:
+ case OPC_DCLZ:
+ check_insn(ctx, ISA_MIPS64);
+ check_mips_64(ctx);
+ gen_cl(ctx, op1, rd, rs);
+ break;
+ case OPC_DMULT_G_2F:
+ case OPC_DMULTU_G_2F:
+ case OPC_DDIV_G_2F:
+ case OPC_DDIVU_G_2F:
+ case OPC_DMOD_G_2F:
+ case OPC_DMODU_G_2F:
+ check_insn(ctx, INSN_LOONGSON2F);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special2_legacy");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_opc_special3_r6(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+ int16_t imm;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+ imm = (int16_t)ctx->opcode >> 7;
+
+ op1 = MASK_SPECIAL3(ctx->opcode);
+ switch (op1) {
+ case R6_OPC_PREF:
+ if (rt >= 24) {
+ /* hint codes 24-31 are reserved and signal RI */
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ /* Treat as NOP. */
+ break;
+ case R6_OPC_CACHE:
+ check_cp0_enabled(ctx);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ break;
+ case R6_OPC_SC:
+ gen_st_cond(ctx, op1, rt, rs, imm);
+ break;
+ case R6_OPC_LL:
+ gen_ld(ctx, op1, rt, rs, imm);
+ break;
+ case OPC_BSHFL:
+ {
+ if (rd == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ op2 = MASK_BSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_ALIGN ... OPC_ALIGN_END:
+ gen_align(ctx, OPC_ALIGN, rd, rs, rt, sa & 3);
+ break;
+ case OPC_BITSWAP:
+ gen_bitswap(ctx, op2, rd, rt);
+ break;
+ }
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case R6_OPC_SCD:
+ gen_st_cond(ctx, op1, rt, rs, imm);
+ break;
+ case R6_OPC_LLD:
+ gen_ld(ctx, op1, rt, rs, imm);
+ break;
+ case OPC_DBSHFL:
+ check_mips_64(ctx);
+ {
+ if (rd == 0) {
+ /* Treat as NOP. */
+ break;
+ }
+ op2 = MASK_DBSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_DALIGN ... OPC_DALIGN_END:
+ gen_align(ctx, OPC_DALIGN, rd, rs, rt, sa & 7);
+ break;
+ case OPC_DBITSWAP:
+ gen_bitswap(ctx, op2, rd, rt);
+ break;
+ }
+
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special3_r6");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_opc_special3_legacy(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd;
+ uint32_t op1, op2;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+
+ op1 = MASK_SPECIAL3(ctx->opcode);
+ switch (op1) {
+ case OPC_DIV_G_2E ... OPC_DIVU_G_2E:
+ case OPC_MOD_G_2E ... OPC_MODU_G_2E:
+ case OPC_MULT_G_2E ... OPC_MULTU_G_2E:
+ /* OPC_MULT_G_2E, OPC_ADDUH_QB_DSP, OPC_MUL_PH_DSP have
+ * the same mask and op1. */
+ if ((ctx->insn_flags & ASE_DSPR2) && (op1 == OPC_MULT_G_2E)) {
+ op2 = MASK_ADDUH_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_ADDUH_QB:
+ case OPC_ADDUH_R_QB:
+ case OPC_ADDQH_PH:
+ case OPC_ADDQH_R_PH:
+ case OPC_ADDQH_W:
+ case OPC_ADDQH_R_W:
+ case OPC_SUBUH_QB:
+ case OPC_SUBUH_R_QB:
+ case OPC_SUBQH_PH:
+ case OPC_SUBQH_R_PH:
+ case OPC_SUBQH_W:
+ case OPC_SUBQH_R_W:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_MUL_PH:
+ case OPC_MUL_S_PH:
+ case OPC_MULQ_S_W:
+ case OPC_MULQ_RS_W:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default:
+ MIPS_INVAL("MASK ADDUH.QB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ } else if (ctx->insn_flags & INSN_LOONGSON2E) {
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ break;
+ case OPC_LX_DSP:
+ op2 = MASK_LX(ctx->opcode);
+ switch (op2) {
+#if defined(TARGET_MIPS64)
+ case OPC_LDX:
+#endif
+ case OPC_LBUX:
+ case OPC_LHX:
+ case OPC_LWX:
+ gen_mipsdsp_ld(ctx, op2, rd, rs, rt);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK LX");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_ABSQ_S_PH_DSP:
+ op2 = MASK_ABSQ_S_PH(ctx->opcode);
+ switch (op2) {
+ case OPC_ABSQ_S_QB:
+ case OPC_ABSQ_S_PH:
+ case OPC_ABSQ_S_W:
+ case OPC_PRECEQ_W_PHL:
+ case OPC_PRECEQ_W_PHR:
+ case OPC_PRECEQU_PH_QBL:
+ case OPC_PRECEQU_PH_QBR:
+ case OPC_PRECEQU_PH_QBLA:
+ case OPC_PRECEQU_PH_QBRA:
+ case OPC_PRECEU_PH_QBL:
+ case OPC_PRECEU_PH_QBR:
+ case OPC_PRECEU_PH_QBLA:
+ case OPC_PRECEU_PH_QBRA:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_BITREV:
+ case OPC_REPL_QB:
+ case OPC_REPLV_QB:
+ case OPC_REPL_PH:
+ case OPC_REPLV_PH:
+ gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt);
+ break;
+ default:
+ MIPS_INVAL("MASK ABSQ_S.PH");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_ADDU_QB_DSP:
+ op2 = MASK_ADDU_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_ADDQ_PH:
+ case OPC_ADDQ_S_PH:
+ case OPC_ADDQ_S_W:
+ case OPC_ADDU_QB:
+ case OPC_ADDU_S_QB:
+ case OPC_ADDU_PH:
+ case OPC_ADDU_S_PH:
+ case OPC_SUBQ_PH:
+ case OPC_SUBQ_S_PH:
+ case OPC_SUBQ_S_W:
+ case OPC_SUBU_QB:
+ case OPC_SUBU_S_QB:
+ case OPC_SUBU_PH:
+ case OPC_SUBU_S_PH:
+ case OPC_ADDSC:
+ case OPC_ADDWC:
+ case OPC_MODSUB:
+ case OPC_RADDU_W_QB:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_MULEU_S_PH_QBL:
+ case OPC_MULEU_S_PH_QBR:
+ case OPC_MULQ_RS_PH:
+ case OPC_MULEQ_S_W_PHL:
+ case OPC_MULEQ_S_W_PHR:
+ case OPC_MULQ_S_PH:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK ADDU.QB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+
+ }
+ break;
+ case OPC_CMPU_EQ_QB_DSP:
+ op2 = MASK_CMPU_EQ_QB(ctx->opcode);
+ switch (op2) {
+ case OPC_PRECR_SRA_PH_W:
+ case OPC_PRECR_SRA_R_PH_W:
+ gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd);
+ break;
+ case OPC_PRECR_QB_PH:
+ case OPC_PRECRQ_QB_PH:
+ case OPC_PRECRQ_PH_W:
+ case OPC_PRECRQ_RS_PH_W:
+ case OPC_PRECRQU_S_QB_PH:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_CMPU_EQ_QB:
+ case OPC_CMPU_LT_QB:
+ case OPC_CMPU_LE_QB:
+ case OPC_CMP_EQ_PH:
+ case OPC_CMP_LT_PH:
+ case OPC_CMP_LE_PH:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ case OPC_CMPGU_EQ_QB:
+ case OPC_CMPGU_LT_QB:
+ case OPC_CMPGU_LE_QB:
+ case OPC_CMPGDU_EQ_QB:
+ case OPC_CMPGDU_LT_QB:
+ case OPC_CMPGDU_LE_QB:
+ case OPC_PICK_QB:
+ case OPC_PICK_PH:
+ case OPC_PACKRL_PH:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK CMPU.EQ.QB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_SHLL_QB_DSP:
+ gen_mipsdsp_shift(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_DPA_W_PH_DSP:
+ op2 = MASK_DPA_W_PH(ctx->opcode);
+ switch (op2) {
+ case OPC_DPAU_H_QBL:
+ case OPC_DPAU_H_QBR:
+ case OPC_DPSU_H_QBL:
+ case OPC_DPSU_H_QBR:
+ case OPC_DPA_W_PH:
+ case OPC_DPAX_W_PH:
+ case OPC_DPAQ_S_W_PH:
+ case OPC_DPAQX_S_W_PH:
+ case OPC_DPAQX_SA_W_PH:
+ case OPC_DPS_W_PH:
+ case OPC_DPSX_W_PH:
+ case OPC_DPSQ_S_W_PH:
+ case OPC_DPSQX_S_W_PH:
+ case OPC_DPSQX_SA_W_PH:
+ case OPC_MULSAQ_S_W_PH:
+ case OPC_DPAQ_SA_L_W:
+ case OPC_DPSQ_SA_L_W:
+ case OPC_MAQ_S_W_PHL:
+ case OPC_MAQ_S_W_PHR:
+ case OPC_MAQ_SA_W_PHL:
+ case OPC_MAQ_SA_W_PHR:
+ case OPC_MULSA_W_PH:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK DPAW.PH");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_INSV_DSP:
+ op2 = MASK_INSV(ctx->opcode);
+ switch (op2) {
+ case OPC_INSV:
+ check_dsp(ctx);
+ {
+ TCGv t0, t1;
+
+ if (rt == 0) {
+ break;
+ }
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+
+ gen_helper_insv(cpu_gpr[rt], cpu_env, t1, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ break;
+ }
+ default: /* Invalid */
+ MIPS_INVAL("MASK INSV");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_APPEND_DSP:
+ gen_mipsdsp_append(env, ctx, op1, rt, rs, rd);
+ break;
+ case OPC_EXTR_W_DSP:
+ op2 = MASK_EXTR_W(ctx->opcode);
+ switch (op2) {
+ case OPC_EXTR_W:
+ case OPC_EXTR_R_W:
+ case OPC_EXTR_RS_W:
+ case OPC_EXTR_S_H:
+ case OPC_EXTRV_S_H:
+ case OPC_EXTRV_W:
+ case OPC_EXTRV_R_W:
+ case OPC_EXTRV_RS_W:
+ case OPC_EXTP:
+ case OPC_EXTPV:
+ case OPC_EXTPDP:
+ case OPC_EXTPDPV:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1);
+ break;
+ case OPC_RDDSP:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ case OPC_SHILO:
+ case OPC_SHILOV:
+ case OPC_MTHLIP:
+ case OPC_WRDSP:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK EXTR.W");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DDIV_G_2E ... OPC_DDIVU_G_2E:
+ case OPC_DMULT_G_2E ... OPC_DMULTU_G_2E:
+ case OPC_DMOD_G_2E ... OPC_DMODU_G_2E:
+ check_insn(ctx, INSN_LOONGSON2E);
+ gen_loongson_integer(ctx, op1, rd, rs, rt);
+ break;
+ case OPC_ABSQ_S_QH_DSP:
+ op2 = MASK_ABSQ_S_QH(ctx->opcode);
+ switch (op2) {
+ case OPC_PRECEQ_L_PWL:
+ case OPC_PRECEQ_L_PWR:
+ case OPC_PRECEQ_PW_QHL:
+ case OPC_PRECEQ_PW_QHR:
+ case OPC_PRECEQ_PW_QHLA:
+ case OPC_PRECEQ_PW_QHRA:
+ case OPC_PRECEQU_QH_OBL:
+ case OPC_PRECEQU_QH_OBR:
+ case OPC_PRECEQU_QH_OBLA:
+ case OPC_PRECEQU_QH_OBRA:
+ case OPC_PRECEU_QH_OBL:
+ case OPC_PRECEU_QH_OBR:
+ case OPC_PRECEU_QH_OBLA:
+ case OPC_PRECEU_QH_OBRA:
+ case OPC_ABSQ_S_OB:
+ case OPC_ABSQ_S_PW:
+ case OPC_ABSQ_S_QH:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_REPL_OB:
+ case OPC_REPL_PW:
+ case OPC_REPL_QH:
+ case OPC_REPLV_OB:
+ case OPC_REPLV_PW:
+ case OPC_REPLV_QH:
+ gen_mipsdsp_bitinsn(ctx, op1, op2, rd, rt);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK ABSQ_S.QH");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_ADDU_OB_DSP:
+ op2 = MASK_ADDU_OB(ctx->opcode);
+ switch (op2) {
+ case OPC_RADDU_L_OB:
+ case OPC_SUBQ_PW:
+ case OPC_SUBQ_S_PW:
+ case OPC_SUBQ_QH:
+ case OPC_SUBQ_S_QH:
+ case OPC_SUBU_OB:
+ case OPC_SUBU_S_OB:
+ case OPC_SUBU_QH:
+ case OPC_SUBU_S_QH:
+ case OPC_SUBUH_OB:
+ case OPC_SUBUH_R_OB:
+ case OPC_ADDQ_PW:
+ case OPC_ADDQ_S_PW:
+ case OPC_ADDQ_QH:
+ case OPC_ADDQ_S_QH:
+ case OPC_ADDU_OB:
+ case OPC_ADDU_S_OB:
+ case OPC_ADDU_QH:
+ case OPC_ADDU_S_QH:
+ case OPC_ADDUH_OB:
+ case OPC_ADDUH_R_OB:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_MULEQ_S_PW_QHL:
+ case OPC_MULEQ_S_PW_QHR:
+ case OPC_MULEU_S_QH_OBL:
+ case OPC_MULEU_S_QH_OBR:
+ case OPC_MULQ_RS_QH:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK ADDU.OB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_CMPU_EQ_OB_DSP:
+ op2 = MASK_CMPU_EQ_OB(ctx->opcode);
+ switch (op2) {
+ case OPC_PRECR_SRA_QH_PW:
+ case OPC_PRECR_SRA_R_QH_PW:
+ /* Return value is rt. */
+ gen_mipsdsp_arith(ctx, op1, op2, rt, rs, rd);
+ break;
+ case OPC_PRECR_OB_QH:
+ case OPC_PRECRQ_OB_QH:
+ case OPC_PRECRQ_PW_L:
+ case OPC_PRECRQ_QH_PW:
+ case OPC_PRECRQ_RS_QH_PW:
+ case OPC_PRECRQU_S_OB_QH:
+ gen_mipsdsp_arith(ctx, op1, op2, rd, rs, rt);
+ break;
+ case OPC_CMPU_EQ_OB:
+ case OPC_CMPU_LT_OB:
+ case OPC_CMPU_LE_OB:
+ case OPC_CMP_EQ_QH:
+ case OPC_CMP_LT_QH:
+ case OPC_CMP_LE_QH:
+ case OPC_CMP_EQ_PW:
+ case OPC_CMP_LT_PW:
+ case OPC_CMP_LE_PW:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ case OPC_CMPGDU_EQ_OB:
+ case OPC_CMPGDU_LT_OB:
+ case OPC_CMPGDU_LE_OB:
+ case OPC_CMPGU_EQ_OB:
+ case OPC_CMPGU_LT_OB:
+ case OPC_CMPGU_LE_OB:
+ case OPC_PACKRL_PW:
+ case OPC_PICK_OB:
+ case OPC_PICK_PW:
+ case OPC_PICK_QH:
+ gen_mipsdsp_add_cmp_pick(ctx, op1, op2, rd, rs, rt, 1);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK CMPU_EQ.OB");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DAPPEND_DSP:
+ gen_mipsdsp_append(env, ctx, op1, rt, rs, rd);
+ break;
+ case OPC_DEXTR_W_DSP:
+ op2 = MASK_DEXTR_W(ctx->opcode);
+ switch (op2) {
+ case OPC_DEXTP:
+ case OPC_DEXTPDP:
+ case OPC_DEXTPDPV:
+ case OPC_DEXTPV:
+ case OPC_DEXTR_L:
+ case OPC_DEXTR_R_L:
+ case OPC_DEXTR_RS_L:
+ case OPC_DEXTR_W:
+ case OPC_DEXTR_R_W:
+ case OPC_DEXTR_RS_W:
+ case OPC_DEXTR_S_H:
+ case OPC_DEXTRV_L:
+ case OPC_DEXTRV_R_L:
+ case OPC_DEXTRV_RS_L:
+ case OPC_DEXTRV_S_H:
+ case OPC_DEXTRV_W:
+ case OPC_DEXTRV_R_W:
+ case OPC_DEXTRV_RS_W:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rt, rs, rd, 1);
+ break;
+ case OPC_DMTHLIP:
+ case OPC_DSHILO:
+ case OPC_DSHILOV:
+ gen_mipsdsp_accinsn(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK EXTR.W");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DPAQ_W_QH_DSP:
+ op2 = MASK_DPAQ_W_QH(ctx->opcode);
+ switch (op2) {
+ case OPC_DPAU_H_OBL:
+ case OPC_DPAU_H_OBR:
+ case OPC_DPSU_H_OBL:
+ case OPC_DPSU_H_OBR:
+ case OPC_DPA_W_QH:
+ case OPC_DPAQ_S_W_QH:
+ case OPC_DPS_W_QH:
+ case OPC_DPSQ_S_W_QH:
+ case OPC_MULSAQ_S_W_QH:
+ case OPC_DPAQ_SA_L_PW:
+ case OPC_DPSQ_SA_L_PW:
+ case OPC_MULSAQ_S_L_PW:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ case OPC_MAQ_S_W_QHLL:
+ case OPC_MAQ_S_W_QHLR:
+ case OPC_MAQ_S_W_QHRL:
+ case OPC_MAQ_S_W_QHRR:
+ case OPC_MAQ_SA_W_QHLL:
+ case OPC_MAQ_SA_W_QHLR:
+ case OPC_MAQ_SA_W_QHRL:
+ case OPC_MAQ_SA_W_QHRR:
+ case OPC_MAQ_S_L_PWL:
+ case OPC_MAQ_S_L_PWR:
+ case OPC_DMADD:
+ case OPC_DMADDU:
+ case OPC_DMSUB:
+ case OPC_DMSUBU:
+ gen_mipsdsp_multiply(ctx, op1, op2, rd, rs, rt, 0);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("MASK DPAQ.W.QH");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_DINSV_DSP:
+ op2 = MASK_INSV(ctx->opcode);
+ switch (op2) {
+ case OPC_DINSV:
+ {
+ TCGv t0, t1;
+
+ if (rt == 0) {
+ break;
+ }
+ check_dsp(ctx);
+
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+
+ gen_helper_dinsv(cpu_gpr[rt], cpu_env, t1, t0);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ break;
+ }
+ default: /* Invalid */
+ MIPS_INVAL("MASK DINSV");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_SHLL_OB_DSP:
+ gen_mipsdsp_shift(ctx, op1, rd, rs, rt);
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("special3_legacy");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void decode_opc_special3(CPUMIPSState *env, DisasContext *ctx)
+{
+ int rs, rt, rd, sa;
+ uint32_t op1, op2;
+
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+
+ op1 = MASK_SPECIAL3(ctx->opcode);
+ switch (op1) {
+ case OPC_EXT:
+ case OPC_INS:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_bitops(ctx, op1, rt, rs, sa, rd);
+ break;
+ case OPC_BSHFL:
+ op2 = MASK_BSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_ALIGN ... OPC_ALIGN_END:
+ case OPC_BITSWAP:
+ check_insn(ctx, ISA_MIPS32R6);
+ decode_opc_special3_r6(env, ctx);
+ break;
+ default:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_bshfl(ctx, op2, rt, rd);
+ break;
+ }
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DEXTM ... OPC_DEXT:
+ case OPC_DINSM ... OPC_DINS:
+ check_insn(ctx, ISA_MIPS64R2);
+ check_mips_64(ctx);
+ gen_bitops(ctx, op1, rt, rs, sa, rd);
+ break;
+ case OPC_DBSHFL:
+ op2 = MASK_DBSHFL(ctx->opcode);
+ switch (op2) {
+ case OPC_DALIGN ... OPC_DALIGN_END:
+ case OPC_DBITSWAP:
+ check_insn(ctx, ISA_MIPS32R6);
+ decode_opc_special3_r6(env, ctx);
+ break;
+ default:
+ check_insn(ctx, ISA_MIPS64R2);
+ check_mips_64(ctx);
+ op2 = MASK_DBSHFL(ctx->opcode);
+ gen_bshfl(ctx, op2, rt, rd);
+ break;
+ }
+ break;
+#endif
+ case OPC_RDHWR:
+ gen_rdhwr(ctx, rt, rd, extract32(ctx->opcode, 6, 3));
+ break;
+ case OPC_FORK:
+ check_insn(ctx, ASE_MT);
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ gen_load_gpr(t0, rt);
+ gen_load_gpr(t1, rs);
+ gen_helper_fork(t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ break;
+ case OPC_YIELD:
+ check_insn(ctx, ASE_MT);
+ {
+ TCGv t0 = tcg_temp_new();
+
+ gen_load_gpr(t0, rs);
+ gen_helper_yield(t0, cpu_env, t0);
+ gen_store_gpr(t0, rd);
+ tcg_temp_free(t0);
+ }
+ break;
+ default:
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ decode_opc_special3_r6(env, ctx);
+ } else {
+ decode_opc_special3_legacy(env, ctx);
+ }
+ }
+}
+
+/* MIPS SIMD Architecture (MSA) */
+static inline int check_msa_access(DisasContext *ctx)
+{
+ if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) &&
+ !(ctx->hflags & MIPS_HFLAG_F64))) {
+ generate_exception_end(ctx, EXCP_RI);
+ return 0;
+ }
+
+ if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) {
+ if (ctx->insn_flags & ASE_MSA) {
+ generate_exception_end(ctx, EXCP_MSADIS);
+ return 0;
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt)
+{
+ /* generates tcg ops to check if any element is 0 */
+ /* Note this function only works with MSA_WRLEN = 128 */
+ uint64_t eval_zero_or_big = 0;
+ uint64_t eval_big = 0;
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ switch (df) {
+ case DF_BYTE:
+ eval_zero_or_big = 0x0101010101010101ULL;
+ eval_big = 0x8080808080808080ULL;
+ break;
+ case DF_HALF:
+ eval_zero_or_big = 0x0001000100010001ULL;
+ eval_big = 0x8000800080008000ULL;
+ break;
+ case DF_WORD:
+ eval_zero_or_big = 0x0000000100000001ULL;
+ eval_big = 0x8000000080000000ULL;
+ break;
+ case DF_DOUBLE:
+ eval_zero_or_big = 0x0000000000000001ULL;
+ eval_big = 0x8000000000000000ULL;
+ break;
+ }
+ tcg_gen_subi_i64(t0, msa_wr_d[wt<<1], eval_zero_or_big);
+ tcg_gen_andc_i64(t0, t0, msa_wr_d[wt<<1]);
+ tcg_gen_andi_i64(t0, t0, eval_big);
+ tcg_gen_subi_i64(t1, msa_wr_d[(wt<<1)+1], eval_zero_or_big);
+ tcg_gen_andc_i64(t1, t1, msa_wr_d[(wt<<1)+1]);
+ tcg_gen_andi_i64(t1, t1, eval_big);
+ tcg_gen_or_i64(t0, t0, t1);
+ /* if all bits are zero then all elements are not zero */
+ /* if some bit is non-zero then some element is zero */
+ tcg_gen_setcondi_i64(TCG_COND_NE, t0, t0, 0);
+ tcg_gen_trunc_i64_tl(tresult, t0);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static void gen_msa_branch(CPUMIPSState *env, DisasContext *ctx, uint32_t op1)
+{
+ uint8_t df = (ctx->opcode >> 21) & 0x3;
+ uint8_t wt = (ctx->opcode >> 16) & 0x1f;
+ int64_t s16 = (int16_t)ctx->opcode;
+
+ check_msa_access(ctx);
+
+ if (ctx->insn_flags & ISA_MIPS32R6 && ctx->hflags & MIPS_HFLAG_BMASK) {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+ switch (op1) {
+ case OPC_BZ_V:
+ case OPC_BNZ_V:
+ {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_or_i64(t0, msa_wr_d[wt<<1], msa_wr_d[(wt<<1)+1]);
+ tcg_gen_setcondi_i64((op1 == OPC_BZ_V) ?
+ TCG_COND_EQ : TCG_COND_NE, t0, t0, 0);
+ tcg_gen_trunc_i64_tl(bcond, t0);
+ tcg_temp_free_i64(t0);
+ }
+ break;
+ case OPC_BZ_B:
+ case OPC_BZ_H:
+ case OPC_BZ_W:
+ case OPC_BZ_D:
+ gen_check_zero_element(bcond, df, wt);
+ break;
+ case OPC_BNZ_B:
+ case OPC_BNZ_H:
+ case OPC_BNZ_W:
+ case OPC_BNZ_D:
+ gen_check_zero_element(bcond, df, wt);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, bcond, bcond, 0);
+ break;
+ }
+
+ ctx->btarget = ctx->pc + (s16 << 2) + 4;
+
+ ctx->hflags |= MIPS_HFLAG_BC;
+ ctx->hflags |= MIPS_HFLAG_BDS32;
+}
+
+static void gen_msa_i8(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24)))
+ uint8_t i8 = (ctx->opcode >> 16) & 0xff;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 ti8 = tcg_const_i32(i8);
+
+ switch (MASK_MSA_I8(ctx->opcode)) {
+ case OPC_ANDI_B:
+ gen_helper_msa_andi_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_ORI_B:
+ gen_helper_msa_ori_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_NORI_B:
+ gen_helper_msa_nori_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_XORI_B:
+ gen_helper_msa_xori_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_BMNZI_B:
+ gen_helper_msa_bmnzi_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_BMZI_B:
+ gen_helper_msa_bmzi_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_BSELI_B:
+ gen_helper_msa_bseli_b(cpu_env, twd, tws, ti8);
+ break;
+ case OPC_SHF_B:
+ case OPC_SHF_H:
+ case OPC_SHF_W:
+ {
+ uint8_t df = (ctx->opcode >> 24) & 0x3;
+ if (df == DF_DOUBLE) {
+ generate_exception_end(ctx, EXCP_RI);
+ } else {
+ TCGv_i32 tdf = tcg_const_i32(df);
+ gen_helper_msa_shf_df(cpu_env, tdf, twd, tws, ti8);
+ tcg_temp_free_i32(tdf);
+ }
+ }
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(ti8);
+}
+
+static void gen_msa_i5(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23)))
+ uint8_t df = (ctx->opcode >> 21) & 0x3;
+ int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5);
+ uint8_t u5 = (ctx->opcode >> 16) & 0x1f;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+
+ TCGv_i32 tdf = tcg_const_i32(df);
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 timm = tcg_temp_new_i32();
+ tcg_gen_movi_i32(timm, u5);
+
+ switch (MASK_MSA_I5(ctx->opcode)) {
+ case OPC_ADDVI_df:
+ gen_helper_msa_addvi_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_SUBVI_df:
+ gen_helper_msa_subvi_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_MAXI_S_df:
+ tcg_gen_movi_i32(timm, s5);
+ gen_helper_msa_maxi_s_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_MAXI_U_df:
+ gen_helper_msa_maxi_u_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_MINI_S_df:
+ tcg_gen_movi_i32(timm, s5);
+ gen_helper_msa_mini_s_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_MINI_U_df:
+ gen_helper_msa_mini_u_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_CEQI_df:
+ tcg_gen_movi_i32(timm, s5);
+ gen_helper_msa_ceqi_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_CLTI_S_df:
+ tcg_gen_movi_i32(timm, s5);
+ gen_helper_msa_clti_s_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_CLTI_U_df:
+ gen_helper_msa_clti_u_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_CLEI_S_df:
+ tcg_gen_movi_i32(timm, s5);
+ gen_helper_msa_clei_s_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_CLEI_U_df:
+ gen_helper_msa_clei_u_df(cpu_env, tdf, twd, tws, timm);
+ break;
+ case OPC_LDI_df:
+ {
+ int32_t s10 = sextract32(ctx->opcode, 11, 10);
+ tcg_gen_movi_i32(timm, s10);
+ gen_helper_msa_ldi_df(cpu_env, tdf, twd, timm);
+ }
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free_i32(tdf);
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(timm);
+}
+
+static void gen_msa_bit(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23)))
+ uint8_t dfm = (ctx->opcode >> 16) & 0x7f;
+ uint32_t df = 0, m = 0;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+
+ TCGv_i32 tdf;
+ TCGv_i32 tm;
+ TCGv_i32 twd;
+ TCGv_i32 tws;
+
+ if ((dfm & 0x40) == 0x00) {
+ m = dfm & 0x3f;
+ df = DF_DOUBLE;
+ } else if ((dfm & 0x60) == 0x40) {
+ m = dfm & 0x1f;
+ df = DF_WORD;
+ } else if ((dfm & 0x70) == 0x60) {
+ m = dfm & 0x0f;
+ df = DF_HALF;
+ } else if ((dfm & 0x78) == 0x70) {
+ m = dfm & 0x7;
+ df = DF_BYTE;
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ tdf = tcg_const_i32(df);
+ tm = tcg_const_i32(m);
+ twd = tcg_const_i32(wd);
+ tws = tcg_const_i32(ws);
+
+ switch (MASK_MSA_BIT(ctx->opcode)) {
+ case OPC_SLLI_df:
+ gen_helper_msa_slli_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_SRAI_df:
+ gen_helper_msa_srai_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_SRLI_df:
+ gen_helper_msa_srli_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_BCLRI_df:
+ gen_helper_msa_bclri_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_BSETI_df:
+ gen_helper_msa_bseti_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_BNEGI_df:
+ gen_helper_msa_bnegi_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_BINSLI_df:
+ gen_helper_msa_binsli_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_BINSRI_df:
+ gen_helper_msa_binsri_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_SAT_S_df:
+ gen_helper_msa_sat_s_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_SAT_U_df:
+ gen_helper_msa_sat_u_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_SRARI_df:
+ gen_helper_msa_srari_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ case OPC_SRLRI_df:
+ gen_helper_msa_srlri_df(cpu_env, tdf, twd, tws, tm);
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free_i32(tdf);
+ tcg_temp_free_i32(tm);
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+}
+
+static void gen_msa_3r(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23)))
+ uint8_t df = (ctx->opcode >> 21) & 0x3;
+ uint8_t wt = (ctx->opcode >> 16) & 0x1f;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+
+ TCGv_i32 tdf = tcg_const_i32(df);
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 twt = tcg_const_i32(wt);
+
+ switch (MASK_MSA_3R(ctx->opcode)) {
+ case OPC_SLL_df:
+ gen_helper_msa_sll_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ADDV_df:
+ gen_helper_msa_addv_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_CEQ_df:
+ gen_helper_msa_ceq_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ADD_A_df:
+ gen_helper_msa_add_a_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SUBS_S_df:
+ gen_helper_msa_subs_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MULV_df:
+ gen_helper_msa_mulv_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SLD_df:
+ gen_helper_msa_sld_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_VSHF_df:
+ gen_helper_msa_vshf_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SRA_df:
+ gen_helper_msa_sra_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SUBV_df:
+ gen_helper_msa_subv_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ADDS_A_df:
+ gen_helper_msa_adds_a_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SUBS_U_df:
+ gen_helper_msa_subs_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MADDV_df:
+ gen_helper_msa_maddv_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SPLAT_df:
+ gen_helper_msa_splat_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SRAR_df:
+ gen_helper_msa_srar_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SRL_df:
+ gen_helper_msa_srl_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MAX_S_df:
+ gen_helper_msa_max_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_CLT_S_df:
+ gen_helper_msa_clt_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ADDS_S_df:
+ gen_helper_msa_adds_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SUBSUS_U_df:
+ gen_helper_msa_subsus_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MSUBV_df:
+ gen_helper_msa_msubv_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_PCKEV_df:
+ gen_helper_msa_pckev_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SRLR_df:
+ gen_helper_msa_srlr_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_BCLR_df:
+ gen_helper_msa_bclr_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MAX_U_df:
+ gen_helper_msa_max_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_CLT_U_df:
+ gen_helper_msa_clt_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ADDS_U_df:
+ gen_helper_msa_adds_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_SUBSUU_S_df:
+ gen_helper_msa_subsuu_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_PCKOD_df:
+ gen_helper_msa_pckod_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_BSET_df:
+ gen_helper_msa_bset_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MIN_S_df:
+ gen_helper_msa_min_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_CLE_S_df:
+ gen_helper_msa_cle_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_AVE_S_df:
+ gen_helper_msa_ave_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ASUB_S_df:
+ gen_helper_msa_asub_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DIV_S_df:
+ gen_helper_msa_div_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ILVL_df:
+ gen_helper_msa_ilvl_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_BNEG_df:
+ gen_helper_msa_bneg_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MIN_U_df:
+ gen_helper_msa_min_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_CLE_U_df:
+ gen_helper_msa_cle_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_AVE_U_df:
+ gen_helper_msa_ave_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ASUB_U_df:
+ gen_helper_msa_asub_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DIV_U_df:
+ gen_helper_msa_div_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ILVR_df:
+ gen_helper_msa_ilvr_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_BINSL_df:
+ gen_helper_msa_binsl_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MAX_A_df:
+ gen_helper_msa_max_a_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_AVER_S_df:
+ gen_helper_msa_aver_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MOD_S_df:
+ gen_helper_msa_mod_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ILVEV_df:
+ gen_helper_msa_ilvev_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_BINSR_df:
+ gen_helper_msa_binsr_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MIN_A_df:
+ gen_helper_msa_min_a_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_AVER_U_df:
+ gen_helper_msa_aver_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MOD_U_df:
+ gen_helper_msa_mod_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_ILVOD_df:
+ gen_helper_msa_ilvod_df(cpu_env, tdf, twd, tws, twt);
+ break;
+
+ case OPC_DOTP_S_df:
+ case OPC_DOTP_U_df:
+ case OPC_DPADD_S_df:
+ case OPC_DPADD_U_df:
+ case OPC_DPSUB_S_df:
+ case OPC_HADD_S_df:
+ case OPC_DPSUB_U_df:
+ case OPC_HADD_U_df:
+ case OPC_HSUB_S_df:
+ case OPC_HSUB_U_df:
+ if (df == DF_BYTE) {
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ switch (MASK_MSA_3R(ctx->opcode)) {
+ case OPC_DOTP_S_df:
+ gen_helper_msa_dotp_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DOTP_U_df:
+ gen_helper_msa_dotp_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DPADD_S_df:
+ gen_helper_msa_dpadd_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DPADD_U_df:
+ gen_helper_msa_dpadd_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DPSUB_S_df:
+ gen_helper_msa_dpsub_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_HADD_S_df:
+ gen_helper_msa_hadd_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_DPSUB_U_df:
+ gen_helper_msa_dpsub_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_HADD_U_df:
+ gen_helper_msa_hadd_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_HSUB_S_df:
+ gen_helper_msa_hsub_s_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_HSUB_U_df:
+ gen_helper_msa_hsub_u_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ }
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(twt);
+ tcg_temp_free_i32(tdf);
+}
+
+static void gen_msa_elm_3e(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16)))
+ uint8_t source = (ctx->opcode >> 11) & 0x1f;
+ uint8_t dest = (ctx->opcode >> 6) & 0x1f;
+ TCGv telm = tcg_temp_new();
+ TCGv_i32 tsr = tcg_const_i32(source);
+ TCGv_i32 tdt = tcg_const_i32(dest);
+
+ switch (MASK_MSA_ELM_DF3E(ctx->opcode)) {
+ case OPC_CTCMSA:
+ gen_load_gpr(telm, source);
+ gen_helper_msa_ctcmsa(cpu_env, telm, tdt);
+ break;
+ case OPC_CFCMSA:
+ gen_helper_msa_cfcmsa(telm, cpu_env, tsr);
+ gen_store_gpr(telm, dest);
+ break;
+ case OPC_MOVE_V:
+ gen_helper_msa_move_v(cpu_env, tdt, tsr);
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free(telm);
+ tcg_temp_free_i32(tdt);
+ tcg_temp_free_i32(tsr);
+}
+
+static void gen_msa_elm_df(CPUMIPSState *env, DisasContext *ctx, uint32_t df,
+ uint32_t n)
+{
+#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22)))
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tn = tcg_const_i32(n);
+ TCGv_i32 tdf = tcg_const_i32(df);
+
+ switch (MASK_MSA_ELM(ctx->opcode)) {
+ case OPC_SLDI_df:
+ gen_helper_msa_sldi_df(cpu_env, tdf, twd, tws, tn);
+ break;
+ case OPC_SPLATI_df:
+ gen_helper_msa_splati_df(cpu_env, tdf, twd, tws, tn);
+ break;
+ case OPC_INSVE_df:
+ gen_helper_msa_insve_df(cpu_env, tdf, twd, tws, tn);
+ break;
+ case OPC_COPY_S_df:
+ case OPC_COPY_U_df:
+ case OPC_INSERT_df:
+#if !defined(TARGET_MIPS64)
+ /* Double format valid only for MIPS64 */
+ if (df == DF_DOUBLE) {
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+#endif
+ switch (MASK_MSA_ELM(ctx->opcode)) {
+ case OPC_COPY_S_df:
+ gen_helper_msa_copy_s_df(cpu_env, tdf, twd, tws, tn);
+ break;
+ case OPC_COPY_U_df:
+ gen_helper_msa_copy_u_df(cpu_env, tdf, twd, tws, tn);
+ break;
+ case OPC_INSERT_df:
+ gen_helper_msa_insert_df(cpu_env, tdf, twd, tws, tn);
+ break;
+ }
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(tn);
+ tcg_temp_free_i32(tdf);
+}
+
+static void gen_msa_elm(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint8_t dfn = (ctx->opcode >> 16) & 0x3f;
+ uint32_t df = 0, n = 0;
+
+ if ((dfn & 0x30) == 0x00) {
+ n = dfn & 0x0f;
+ df = DF_BYTE;
+ } else if ((dfn & 0x38) == 0x20) {
+ n = dfn & 0x07;
+ df = DF_HALF;
+ } else if ((dfn & 0x3c) == 0x30) {
+ n = dfn & 0x03;
+ df = DF_WORD;
+ } else if ((dfn & 0x3e) == 0x38) {
+ n = dfn & 0x01;
+ df = DF_DOUBLE;
+ } else if (dfn == 0x3E) {
+ /* CTCMSA, CFCMSA, MOVE.V */
+ gen_msa_elm_3e(env, ctx);
+ return;
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ return;
+ }
+
+ gen_msa_elm_df(env, ctx, df, n);
+}
+
+static void gen_msa_3rf(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22)))
+ uint8_t df = (ctx->opcode >> 21) & 0x1;
+ uint8_t wt = (ctx->opcode >> 16) & 0x1f;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 twt = tcg_const_i32(wt);
+ TCGv_i32 tdf = tcg_temp_new_i32();
+
+ /* adjust df value for floating-point instruction */
+ tcg_gen_movi_i32(tdf, df + 2);
+
+ switch (MASK_MSA_3RF(ctx->opcode)) {
+ case OPC_FCAF_df:
+ gen_helper_msa_fcaf_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FADD_df:
+ gen_helper_msa_fadd_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCUN_df:
+ gen_helper_msa_fcun_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSUB_df:
+ gen_helper_msa_fsub_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCOR_df:
+ gen_helper_msa_fcor_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCEQ_df:
+ gen_helper_msa_fceq_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMUL_df:
+ gen_helper_msa_fmul_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCUNE_df:
+ gen_helper_msa_fcune_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCUEQ_df:
+ gen_helper_msa_fcueq_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FDIV_df:
+ gen_helper_msa_fdiv_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCNE_df:
+ gen_helper_msa_fcne_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCLT_df:
+ gen_helper_msa_fclt_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMADD_df:
+ gen_helper_msa_fmadd_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MUL_Q_df:
+ tcg_gen_movi_i32(tdf, df + 1);
+ gen_helper_msa_mul_q_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCULT_df:
+ gen_helper_msa_fcult_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMSUB_df:
+ gen_helper_msa_fmsub_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MADD_Q_df:
+ tcg_gen_movi_i32(tdf, df + 1);
+ gen_helper_msa_madd_q_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCLE_df:
+ gen_helper_msa_fcle_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MSUB_Q_df:
+ tcg_gen_movi_i32(tdf, df + 1);
+ gen_helper_msa_msub_q_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FCULE_df:
+ gen_helper_msa_fcule_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FEXP2_df:
+ gen_helper_msa_fexp2_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSAF_df:
+ gen_helper_msa_fsaf_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FEXDO_df:
+ gen_helper_msa_fexdo_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSUN_df:
+ gen_helper_msa_fsun_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSOR_df:
+ gen_helper_msa_fsor_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSEQ_df:
+ gen_helper_msa_fseq_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FTQ_df:
+ gen_helper_msa_ftq_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSUNE_df:
+ gen_helper_msa_fsune_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSUEQ_df:
+ gen_helper_msa_fsueq_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSNE_df:
+ gen_helper_msa_fsne_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSLT_df:
+ gen_helper_msa_fslt_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMIN_df:
+ gen_helper_msa_fmin_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MULR_Q_df:
+ tcg_gen_movi_i32(tdf, df + 1);
+ gen_helper_msa_mulr_q_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSULT_df:
+ gen_helper_msa_fsult_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMIN_A_df:
+ gen_helper_msa_fmin_a_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MADDR_Q_df:
+ tcg_gen_movi_i32(tdf, df + 1);
+ gen_helper_msa_maddr_q_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSLE_df:
+ gen_helper_msa_fsle_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMAX_df:
+ gen_helper_msa_fmax_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_MSUBR_Q_df:
+ tcg_gen_movi_i32(tdf, df + 1);
+ gen_helper_msa_msubr_q_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FSULE_df:
+ gen_helper_msa_fsule_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ case OPC_FMAX_A_df:
+ gen_helper_msa_fmax_a_df(cpu_env, tdf, twd, tws, twt);
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(twt);
+ tcg_temp_free_i32(tdf);
+}
+
+static void gen_msa_2r(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \
+ (op & (0x7 << 18)))
+ uint8_t wt = (ctx->opcode >> 16) & 0x1f;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+ uint8_t df = (ctx->opcode >> 16) & 0x3;
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 twt = tcg_const_i32(wt);
+ TCGv_i32 tdf = tcg_const_i32(df);
+
+ switch (MASK_MSA_2R(ctx->opcode)) {
+ case OPC_FILL_df:
+#if !defined(TARGET_MIPS64)
+ /* Double format valid only for MIPS64 */
+ if (df == DF_DOUBLE) {
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+#endif
+ gen_helper_msa_fill_df(cpu_env, tdf, twd, tws); /* trs */
+ break;
+ case OPC_PCNT_df:
+ gen_helper_msa_pcnt_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_NLOC_df:
+ gen_helper_msa_nloc_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_NLZC_df:
+ gen_helper_msa_nlzc_df(cpu_env, tdf, twd, tws);
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(twt);
+ tcg_temp_free_i32(tdf);
+}
+
+static void gen_msa_2rf(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \
+ (op & (0xf << 17)))
+ uint8_t wt = (ctx->opcode >> 16) & 0x1f;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+ uint8_t df = (ctx->opcode >> 16) & 0x1;
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 twt = tcg_const_i32(wt);
+ /* adjust df value for floating-point instruction */
+ TCGv_i32 tdf = tcg_const_i32(df + 2);
+
+ switch (MASK_MSA_2RF(ctx->opcode)) {
+ case OPC_FCLASS_df:
+ gen_helper_msa_fclass_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FTRUNC_S_df:
+ gen_helper_msa_ftrunc_s_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FTRUNC_U_df:
+ gen_helper_msa_ftrunc_u_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FSQRT_df:
+ gen_helper_msa_fsqrt_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FRSQRT_df:
+ gen_helper_msa_frsqrt_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FRCP_df:
+ gen_helper_msa_frcp_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FRINT_df:
+ gen_helper_msa_frint_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FLOG2_df:
+ gen_helper_msa_flog2_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FEXUPL_df:
+ gen_helper_msa_fexupl_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FEXUPR_df:
+ gen_helper_msa_fexupr_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FFQL_df:
+ gen_helper_msa_ffql_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FFQR_df:
+ gen_helper_msa_ffqr_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FTINT_S_df:
+ gen_helper_msa_ftint_s_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FTINT_U_df:
+ gen_helper_msa_ftint_u_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FFINT_S_df:
+ gen_helper_msa_ffint_s_df(cpu_env, tdf, twd, tws);
+ break;
+ case OPC_FFINT_U_df:
+ gen_helper_msa_ffint_u_df(cpu_env, tdf, twd, tws);
+ break;
+ }
+
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(twt);
+ tcg_temp_free_i32(tdf);
+}
+
+static void gen_msa_vec_v(CPUMIPSState *env, DisasContext *ctx)
+{
+#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)))
+ uint8_t wt = (ctx->opcode >> 16) & 0x1f;
+ uint8_t ws = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv_i32 tws = tcg_const_i32(ws);
+ TCGv_i32 twt = tcg_const_i32(wt);
+
+ switch (MASK_MSA_VEC(ctx->opcode)) {
+ case OPC_AND_V:
+ gen_helper_msa_and_v(cpu_env, twd, tws, twt);
+ break;
+ case OPC_OR_V:
+ gen_helper_msa_or_v(cpu_env, twd, tws, twt);
+ break;
+ case OPC_NOR_V:
+ gen_helper_msa_nor_v(cpu_env, twd, tws, twt);
+ break;
+ case OPC_XOR_V:
+ gen_helper_msa_xor_v(cpu_env, twd, tws, twt);
+ break;
+ case OPC_BMNZ_V:
+ gen_helper_msa_bmnz_v(cpu_env, twd, tws, twt);
+ break;
+ case OPC_BMZ_V:
+ gen_helper_msa_bmz_v(cpu_env, twd, tws, twt);
+ break;
+ case OPC_BSEL_V:
+ gen_helper_msa_bsel_v(cpu_env, twd, tws, twt);
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+ tcg_temp_free_i32(twd);
+ tcg_temp_free_i32(tws);
+ tcg_temp_free_i32(twt);
+}
+
+static void gen_msa_vec(CPUMIPSState *env, DisasContext *ctx)
+{
+ switch (MASK_MSA_VEC(ctx->opcode)) {
+ case OPC_AND_V:
+ case OPC_OR_V:
+ case OPC_NOR_V:
+ case OPC_XOR_V:
+ case OPC_BMNZ_V:
+ case OPC_BMZ_V:
+ case OPC_BSEL_V:
+ gen_msa_vec_v(env, ctx);
+ break;
+ case OPC_MSA_2R:
+ gen_msa_2r(env, ctx);
+ break;
+ case OPC_MSA_2RF:
+ gen_msa_2rf(env, ctx);
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+static void gen_msa(CPUMIPSState *env, DisasContext *ctx)
+{
+ uint32_t opcode = ctx->opcode;
+ check_insn(ctx, ASE_MSA);
+ check_msa_access(ctx);
+
+ switch (MASK_MSA_MINOR(opcode)) {
+ case OPC_MSA_I8_00:
+ case OPC_MSA_I8_01:
+ case OPC_MSA_I8_02:
+ gen_msa_i8(env, ctx);
+ break;
+ case OPC_MSA_I5_06:
+ case OPC_MSA_I5_07:
+ gen_msa_i5(env, ctx);
+ break;
+ case OPC_MSA_BIT_09:
+ case OPC_MSA_BIT_0A:
+ gen_msa_bit(env, ctx);
+ break;
+ case OPC_MSA_3R_0D:
+ case OPC_MSA_3R_0E:
+ case OPC_MSA_3R_0F:
+ case OPC_MSA_3R_10:
+ case OPC_MSA_3R_11:
+ case OPC_MSA_3R_12:
+ case OPC_MSA_3R_13:
+ case OPC_MSA_3R_14:
+ case OPC_MSA_3R_15:
+ gen_msa_3r(env, ctx);
+ break;
+ case OPC_MSA_ELM:
+ gen_msa_elm(env, ctx);
+ break;
+ case OPC_MSA_3RF_1A:
+ case OPC_MSA_3RF_1B:
+ case OPC_MSA_3RF_1C:
+ gen_msa_3rf(env, ctx);
+ break;
+ case OPC_MSA_VEC:
+ gen_msa_vec(env, ctx);
+ break;
+ case OPC_LD_B:
+ case OPC_LD_H:
+ case OPC_LD_W:
+ case OPC_LD_D:
+ case OPC_ST_B:
+ case OPC_ST_H:
+ case OPC_ST_W:
+ case OPC_ST_D:
+ {
+ int32_t s10 = sextract32(ctx->opcode, 16, 10);
+ uint8_t rs = (ctx->opcode >> 11) & 0x1f;
+ uint8_t wd = (ctx->opcode >> 6) & 0x1f;
+ uint8_t df = (ctx->opcode >> 0) & 0x3;
+
+ TCGv_i32 twd = tcg_const_i32(wd);
+ TCGv taddr = tcg_temp_new();
+ gen_base_offset_addr(ctx, taddr, rs, s10 << df);
+
+ switch (MASK_MSA_MINOR(opcode)) {
+ case OPC_LD_B:
+ gen_helper_msa_ld_b(cpu_env, twd, taddr);
+ break;
+ case OPC_LD_H:
+ gen_helper_msa_ld_h(cpu_env, twd, taddr);
+ break;
+ case OPC_LD_W:
+ gen_helper_msa_ld_w(cpu_env, twd, taddr);
+ break;
+ case OPC_LD_D:
+ gen_helper_msa_ld_d(cpu_env, twd, taddr);
+ break;
+ case OPC_ST_B:
+ gen_helper_msa_st_b(cpu_env, twd, taddr);
+ break;
+ case OPC_ST_H:
+ gen_helper_msa_st_h(cpu_env, twd, taddr);
+ break;
+ case OPC_ST_W:
+ gen_helper_msa_st_w(cpu_env, twd, taddr);
+ break;
+ case OPC_ST_D:
+ gen_helper_msa_st_d(cpu_env, twd, taddr);
+ break;
+ }
+
+ tcg_temp_free_i32(twd);
+ tcg_temp_free(taddr);
+ }
+ break;
+ default:
+ MIPS_INVAL("MSA instruction");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+
+}
+
+static void decode_opc(CPUMIPSState *env, DisasContext *ctx)
+{
+ int32_t offset;
+ int rs, rt, rd, sa;
+ uint32_t op, op1;
+ int16_t imm;
+
+ /* make sure instructions are on a word boundary */
+ if (ctx->pc & 0x3) {
+ env->CP0_BadVAddr = ctx->pc;
+ generate_exception_err(ctx, EXCP_AdEL, EXCP_INST_NOTAVAIL);
+ return;
+ }
+
+ /* Handle blikely not taken case */
+ if ((ctx->hflags & MIPS_HFLAG_BMASK_BASE) == MIPS_HFLAG_BL) {
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
+ tcg_gen_movi_i32(hflags, ctx->hflags & ~MIPS_HFLAG_BMASK);
+ gen_goto_tb(ctx, 1, ctx->pc + 4);
+ gen_set_label(l1);
+ }
+
+ op = MASK_OP_MAJOR(ctx->opcode);
+ rs = (ctx->opcode >> 21) & 0x1f;
+ rt = (ctx->opcode >> 16) & 0x1f;
+ rd = (ctx->opcode >> 11) & 0x1f;
+ sa = (ctx->opcode >> 6) & 0x1f;
+ imm = (int16_t)ctx->opcode;
+ switch (op) {
+ case OPC_SPECIAL:
+ decode_opc_special(env, ctx);
+ break;
+ case OPC_SPECIAL2:
+ decode_opc_special2_legacy(env, ctx);
+ break;
+ case OPC_SPECIAL3:
+ decode_opc_special3(env, ctx);
+ break;
+ case OPC_REGIMM:
+ op1 = MASK_REGIMM(ctx->opcode);
+ switch (op1) {
+ case OPC_BLTZL: /* REGIMM branches */
+ case OPC_BGEZL:
+ case OPC_BLTZALL:
+ case OPC_BGEZALL:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* Fallthrough */
+ case OPC_BLTZ:
+ case OPC_BGEZ:
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
+ break;
+ case OPC_BLTZAL:
+ case OPC_BGEZAL:
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs == 0) {
+ /* OPC_NAL, OPC_BAL */
+ gen_compute_branch(ctx, op1, 4, 0, -1, imm << 2, 4);
+ } else {
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ } else {
+ gen_compute_branch(ctx, op1, 4, rs, -1, imm << 2, 4);
+ }
+ break;
+ case OPC_TGEI ... OPC_TEQI: /* REGIMM traps */
+ case OPC_TNEI:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_trap(ctx, op1, rs, -1, imm);
+ break;
+ case OPC_SIGRIE:
+ check_insn(ctx, ISA_MIPS32R6);
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ case OPC_SYNCI:
+ check_insn(ctx, ISA_MIPS32R2);
+ /* Break the TB to be able to sync copied instructions
+ immediately */
+ ctx->bstate = BS_STOP;
+ break;
+ case OPC_BPOSGE32: /* MIPS DSP branch */
+#if defined(TARGET_MIPS64)
+ case OPC_BPOSGE64:
+#endif
+ check_dsp(ctx);
+ gen_compute_branch(ctx, op1, 4, -1, -2, (int32_t)imm << 2, 4);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DAHI:
+ check_insn(ctx, ISA_MIPS32R6);
+ check_mips_64(ctx);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 32);
+ }
+ break;
+ case OPC_DATI:
+ check_insn(ctx, ISA_MIPS32R6);
+ check_mips_64(ctx);
+ if (rs != 0) {
+ tcg_gen_addi_tl(cpu_gpr[rs], cpu_gpr[rs], (int64_t)imm << 48);
+ }
+ break;
+#endif
+ default: /* Invalid */
+ MIPS_INVAL("regimm");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_CP0:
+ check_cp0_enabled(ctx);
+ op1 = MASK_CP0(ctx->opcode);
+ switch (op1) {
+ case OPC_MFC0:
+ case OPC_MTC0:
+ case OPC_MFTR:
+ case OPC_MTTR:
+ case OPC_MFHC0:
+ case OPC_MTHC0:
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC0:
+ case OPC_DMTC0:
+#endif
+#ifndef CONFIG_USER_ONLY
+ gen_cp0(env, ctx, op1, rt, rd);
+#endif /* !CONFIG_USER_ONLY */
+ break;
+ case OPC_C0_FIRST ... OPC_C0_LAST:
+#ifndef CONFIG_USER_ONLY
+ gen_cp0(env, ctx, MASK_C0(ctx->opcode), rt, rd);
+#endif /* !CONFIG_USER_ONLY */
+ break;
+ case OPC_MFMC0:
+#ifndef CONFIG_USER_ONLY
+ {
+ uint32_t op2;
+ TCGv t0 = tcg_temp_new();
+
+ op2 = MASK_MFMC0(ctx->opcode);
+ switch (op2) {
+ case OPC_DMT:
+ check_insn(ctx, ASE_MT);
+ gen_helper_dmt(t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_EMT:
+ check_insn(ctx, ASE_MT);
+ gen_helper_emt(t0);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_DVPE:
+ check_insn(ctx, ASE_MT);
+ gen_helper_dvpe(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_EVPE:
+ check_insn(ctx, ASE_MT);
+ gen_helper_evpe(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ break;
+ case OPC_DVP:
+ check_insn(ctx, ISA_MIPS32R6);
+ if (ctx->vp) {
+ gen_helper_dvp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ case OPC_EVP:
+ check_insn(ctx, ISA_MIPS32R6);
+ if (ctx->vp) {
+ gen_helper_evp(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ }
+ break;
+ case OPC_DI:
+ check_insn(ctx, ISA_MIPS32R2);
+ save_cpu_state(ctx, 1);
+ gen_helper_di(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /* Stop translation as we may have switched
+ the execution mode. */
+ ctx->bstate = BS_STOP;
+ break;
+ case OPC_EI:
+ check_insn(ctx, ISA_MIPS32R2);
+ save_cpu_state(ctx, 1);
+ gen_helper_ei(t0, cpu_env);
+ gen_store_gpr(t0, rt);
+ /* Stop translation as we may have switched
+ the execution mode. */
+ ctx->bstate = BS_STOP;
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("mfmc0");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ tcg_temp_free(t0);
+ }
+#endif /* !CONFIG_USER_ONLY */
+ break;
+ case OPC_RDPGPR:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_load_srsgpr(rt, rd);
+ break;
+ case OPC_WRPGPR:
+ check_insn(ctx, ISA_MIPS32R2);
+ gen_store_srsgpr(rt, rd);
+ break;
+ default:
+ MIPS_INVAL("cp0");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+ case OPC_BOVC: /* OPC_BEQZALC, OPC_BEQC, OPC_ADDI */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BOVC, OPC_BEQZALC, OPC_BEQC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_ADDI */
+ /* Arithmetic with immediate opcode */
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ }
+ break;
+ case OPC_ADDIU:
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SLTI: /* Set on less than with immediate opcode */
+ case OPC_SLTIU:
+ gen_slt_imm(ctx, op, rt, rs, imm);
+ break;
+ case OPC_ANDI: /* Arithmetic with immediate opcode */
+ case OPC_LUI: /* OPC_AUI */
+ case OPC_ORI:
+ case OPC_XORI:
+ gen_logic_imm(ctx, op, rt, rs, imm);
+ break;
+ case OPC_J ... OPC_JAL: /* Jump */
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
+ break;
+ /* Branch */
+ case OPC_BLEZC: /* OPC_BGEZC, OPC_BGEC, OPC_BLEZL */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rt == 0) {
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ /* OPC_BLEZC, OPC_BGEZC, OPC_BGEC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_BLEZL */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ }
+ break;
+ case OPC_BGTZC: /* OPC_BLTZC, OPC_BLTC, OPC_BGTZL */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rt == 0) {
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ /* OPC_BGTZC, OPC_BLTZC, OPC_BLTC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_BGTZL */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ }
+ break;
+ case OPC_BLEZALC: /* OPC_BGEZALC, OPC_BGEUC, OPC_BLEZ */
+ if (rt == 0) {
+ /* OPC_BLEZ */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ } else {
+ check_insn(ctx, ISA_MIPS32R6);
+ /* OPC_BLEZALC, OPC_BGEZALC, OPC_BGEUC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ }
+ break;
+ case OPC_BGTZALC: /* OPC_BLTZALC, OPC_BLTUC, OPC_BGTZ */
+ if (rt == 0) {
+ /* OPC_BGTZ */
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ } else {
+ check_insn(ctx, ISA_MIPS32R6);
+ /* OPC_BGTZALC, OPC_BLTZALC, OPC_BLTUC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ }
+ break;
+ case OPC_BEQL:
+ case OPC_BNEL:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* Fallthrough */
+ case OPC_BEQ:
+ case OPC_BNE:
+ gen_compute_branch(ctx, op, 4, rs, rt, imm << 2, 4);
+ break;
+ case OPC_LL: /* Load and stores */
+ check_insn(ctx, ISA_MIPS2);
+ /* Fallthrough */
+ case OPC_LWL:
+ case OPC_LWR:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* Fallthrough */
+ case OPC_LB ... OPC_LH:
+ case OPC_LW ... OPC_LHU:
+ gen_ld(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SWL:
+ case OPC_SWR:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
+ case OPC_SB ... OPC_SH:
+ case OPC_SW:
+ gen_st(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SC:
+ check_insn(ctx, ISA_MIPS2);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_st_cond(ctx, op, rt, rs, imm);
+ break;
+ case OPC_CACHE:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ check_cp0_enabled(ctx);
+ check_insn(ctx, ISA_MIPS3 | ISA_MIPS32);
+ if (ctx->hflags & MIPS_HFLAG_ITC_CACHE) {
+ gen_cache_operation(ctx, rt, rs, imm);
+ }
+ /* Treat as NOP. */
+ break;
+ case OPC_PREF:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32);
+ /* Treat as NOP. */
+ break;
+
+ /* Floating point (COP1). */
+ case OPC_LWC1:
+ case OPC_LDC1:
+ case OPC_SWC1:
+ case OPC_SDC1:
+ gen_cop1_ldst(ctx, op, rt, rs, imm);
+ break;
+
+ case OPC_CP1:
+ op1 = MASK_CP1(ctx->opcode);
+
+ switch (op1) {
+ case OPC_MFHC1:
+ case OPC_MTHC1:
+ check_cp1_enabled(ctx);
+ check_insn(ctx, ISA_MIPS32R2);
+ case OPC_MFC1:
+ case OPC_CFC1:
+ case OPC_MTC1:
+ case OPC_CTC1:
+ check_cp1_enabled(ctx);
+ gen_cp1(ctx, op1, rt, rd);
+ break;
+#if defined(TARGET_MIPS64)
+ case OPC_DMFC1:
+ case OPC_DMTC1:
+ check_cp1_enabled(ctx);
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_cp1(ctx, op1, rt, rd);
+ break;
+#endif
+ case OPC_BC1EQZ: /* OPC_BC1ANY2 */
+ check_cp1_enabled(ctx);
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BC1EQZ */
+ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode),
+ rt, imm << 2, 4);
+ } else {
+ /* OPC_BC1ANY2 */
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ gen_compute_branch1(ctx, MASK_BC1(ctx->opcode),
+ (rt >> 2) & 0x7, imm << 2);
+ }
+ break;
+ case OPC_BC1NEZ:
+ check_cp1_enabled(ctx);
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_compute_branch1_r6(ctx, MASK_CP1(ctx->opcode),
+ rt, imm << 2, 4);
+ break;
+ case OPC_BC1ANY4:
+ check_cp1_enabled(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ check_cop1x(ctx);
+ check_insn(ctx, ASE_MIPS3D);
+ /* fall through */
+ case OPC_BC1:
+ check_cp1_enabled(ctx);
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ gen_compute_branch1(ctx, MASK_BC1(ctx->opcode),
+ (rt >> 2) & 0x7, imm << 2);
+ break;
+ case OPC_PS_FMT:
+ check_ps(ctx);
+ /* fall through */
+ case OPC_S_FMT:
+ case OPC_D_FMT:
+ check_cp1_enabled(ctx);
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
+ (imm >> 8) & 0x7);
+ break;
+ case OPC_W_FMT:
+ case OPC_L_FMT:
+ {
+ int r6_op = ctx->opcode & FOP(0x3f, 0x1f);
+ check_cp1_enabled(ctx);
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ switch (r6_op) {
+ case R6_OPC_CMP_AF_S:
+ case R6_OPC_CMP_UN_S:
+ case R6_OPC_CMP_EQ_S:
+ case R6_OPC_CMP_UEQ_S:
+ case R6_OPC_CMP_LT_S:
+ case R6_OPC_CMP_ULT_S:
+ case R6_OPC_CMP_LE_S:
+ case R6_OPC_CMP_ULE_S:
+ case R6_OPC_CMP_SAF_S:
+ case R6_OPC_CMP_SUN_S:
+ case R6_OPC_CMP_SEQ_S:
+ case R6_OPC_CMP_SEUQ_S:
+ case R6_OPC_CMP_SLT_S:
+ case R6_OPC_CMP_SULT_S:
+ case R6_OPC_CMP_SLE_S:
+ case R6_OPC_CMP_SULE_S:
+ case R6_OPC_CMP_OR_S:
+ case R6_OPC_CMP_UNE_S:
+ case R6_OPC_CMP_NE_S:
+ case R6_OPC_CMP_SOR_S:
+ case R6_OPC_CMP_SUNE_S:
+ case R6_OPC_CMP_SNE_S:
+ gen_r6_cmp_s(ctx, ctx->opcode & 0x1f, rt, rd, sa);
+ break;
+ case R6_OPC_CMP_AF_D:
+ case R6_OPC_CMP_UN_D:
+ case R6_OPC_CMP_EQ_D:
+ case R6_OPC_CMP_UEQ_D:
+ case R6_OPC_CMP_LT_D:
+ case R6_OPC_CMP_ULT_D:
+ case R6_OPC_CMP_LE_D:
+ case R6_OPC_CMP_ULE_D:
+ case R6_OPC_CMP_SAF_D:
+ case R6_OPC_CMP_SUN_D:
+ case R6_OPC_CMP_SEQ_D:
+ case R6_OPC_CMP_SEUQ_D:
+ case R6_OPC_CMP_SLT_D:
+ case R6_OPC_CMP_SULT_D:
+ case R6_OPC_CMP_SLE_D:
+ case R6_OPC_CMP_SULE_D:
+ case R6_OPC_CMP_OR_D:
+ case R6_OPC_CMP_UNE_D:
+ case R6_OPC_CMP_NE_D:
+ case R6_OPC_CMP_SOR_D:
+ case R6_OPC_CMP_SUNE_D:
+ case R6_OPC_CMP_SNE_D:
+ gen_r6_cmp_d(ctx, ctx->opcode & 0x1f, rt, rd, sa);
+ break;
+ default:
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f),
+ rt, rd, sa, (imm >> 8) & 0x7);
+
+ break;
+ }
+ } else {
+ gen_farith(ctx, ctx->opcode & FOP(0x3f, 0x1f), rt, rd, sa,
+ (imm >> 8) & 0x7);
+ }
+ break;
+ }
+ case OPC_BZ_V:
+ case OPC_BNZ_V:
+ case OPC_BZ_B:
+ case OPC_BZ_H:
+ case OPC_BZ_W:
+ case OPC_BZ_D:
+ case OPC_BNZ_B:
+ case OPC_BNZ_H:
+ case OPC_BNZ_W:
+ case OPC_BNZ_D:
+ check_insn(ctx, ASE_MSA);
+ gen_msa_branch(env, ctx, op1);
+ break;
+ default:
+ MIPS_INVAL("cp1");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ break;
+
+ /* Compact branches [R6] and COP2 [non-R6] */
+ case OPC_BC: /* OPC_LWC2 */
+ case OPC_BALC: /* OPC_SWC2 */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BC, OPC_BALC */
+ gen_compute_compact_branch(ctx, op, 0, 0,
+ sextract32(ctx->opcode << 2, 0, 28));
+ } else {
+ /* OPC_LWC2, OPC_SWC2 */
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ }
+ break;
+ case OPC_BEQZC: /* OPC_JIC, OPC_LDC2 */
+ case OPC_BNEZC: /* OPC_JIALC, OPC_SDC2 */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ if (rs != 0) {
+ /* OPC_BEQZC, OPC_BNEZC */
+ gen_compute_compact_branch(ctx, op, rs, 0,
+ sextract32(ctx->opcode << 2, 0, 23));
+ } else {
+ /* OPC_JIC, OPC_JIALC */
+ gen_compute_compact_branch(ctx, op, 0, rt, imm);
+ }
+ } else {
+ /* OPC_LWC2, OPC_SWC2 */
+ /* COP2: Not implemented. */
+ generate_exception_err(ctx, EXCP_CpU, 2);
+ }
+ break;
+ case OPC_CP2:
+ check_insn(ctx, INSN_LOONGSON2F);
+ /* Note that these instructions use different fields. */
+ gen_loongson_multimedia(ctx, sa, rd, rt);
+ break;
+
+ case OPC_CP3:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ if (ctx->CP0_Config1 & (1 << CP0C1_FP)) {
+ check_cp1_enabled(ctx);
+ op1 = MASK_CP3(ctx->opcode);
+ switch (op1) {
+ case OPC_LUXC1:
+ case OPC_SUXC1:
+ check_insn(ctx, ISA_MIPS5 | ISA_MIPS32R2);
+ /* Fallthrough */
+ case OPC_LWXC1:
+ case OPC_LDXC1:
+ case OPC_SWXC1:
+ case OPC_SDXC1:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2);
+ gen_flt3_ldst(ctx, op1, sa, rd, rs, rt);
+ break;
+ case OPC_PREFX:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2);
+ /* Treat as NOP. */
+ break;
+ case OPC_ALNV_PS:
+ check_insn(ctx, ISA_MIPS5 | ISA_MIPS32R2);
+ /* Fallthrough */
+ case OPC_MADD_S:
+ case OPC_MADD_D:
+ case OPC_MADD_PS:
+ case OPC_MSUB_S:
+ case OPC_MSUB_D:
+ case OPC_MSUB_PS:
+ case OPC_NMADD_S:
+ case OPC_NMADD_D:
+ case OPC_NMADD_PS:
+ case OPC_NMSUB_S:
+ case OPC_NMSUB_D:
+ case OPC_NMSUB_PS:
+ check_insn(ctx, ISA_MIPS4 | ISA_MIPS32R2);
+ gen_flt3_arith(ctx, op1, sa, rs, rd, rt);
+ break;
+ default:
+ MIPS_INVAL("cp3");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+ } else {
+ generate_exception_err(ctx, EXCP_CpU, 1);
+ }
+ break;
+
+#if defined(TARGET_MIPS64)
+ /* MIPS64 opcodes */
+ case OPC_LDL ... OPC_LDR:
+ case OPC_LLD:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
+ case OPC_LWU:
+ case OPC_LD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_ld(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SDL ... OPC_SDR:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ /* fall through */
+ case OPC_SD:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st(ctx, op, rt, rs, imm);
+ break;
+ case OPC_SCD:
+ check_insn_opc_removed(ctx, ISA_MIPS32R6);
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_st_cond(ctx, op, rt, rs, imm);
+ break;
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC, OPC_DADDI */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ /* OPC_BNVC, OPC_BNEZALC, OPC_BNEC */
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ /* OPC_DADDI */
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ }
+ break;
+ case OPC_DADDIU:
+ check_insn(ctx, ISA_MIPS3);
+ check_mips_64(ctx);
+ gen_arith_imm(ctx, op, rt, rs, imm);
+ break;
+#else
+ case OPC_BNVC: /* OPC_BNEZALC, OPC_BNEC */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+ gen_compute_compact_branch(ctx, op, rs, rt, imm << 2);
+ } else {
+ MIPS_INVAL("major opcode");
+ generate_exception_end(ctx, EXCP_RI);
+ }
+ break;
+#endif
+ case OPC_DAUI: /* OPC_JALX */
+ if (ctx->insn_flags & ISA_MIPS32R6) {
+#if defined(TARGET_MIPS64)
+ /* OPC_DAUI */
+ check_mips_64(ctx);
+ if (rs == 0) {
+ generate_exception(ctx, EXCP_RI);
+ } else if (rt != 0) {
+ TCGv t0 = tcg_temp_new();
+ gen_load_gpr(t0, rs);
+ tcg_gen_addi_tl(cpu_gpr[rt], t0, imm << 16);
+ tcg_temp_free(t0);
+ }
+#else
+ generate_exception_end(ctx, EXCP_RI);
+ MIPS_INVAL("major opcode");
+#endif
+ } else {
+ /* OPC_JALX */
+ check_insn(ctx, ASE_MIPS16 | ASE_MICROMIPS);
+ offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
+ gen_compute_branch(ctx, op, 4, rs, rt, offset, 4);
+ }
+ break;
+ case OPC_MSA: /* OPC_MDMX */
+ /* MDMX: Not implemented. */
+ gen_msa(env, ctx);
+ break;
+ case OPC_PCREL:
+ check_insn(ctx, ISA_MIPS32R6);
+ gen_pcrel(ctx, ctx->opcode, ctx->pc, rs);
+ break;
+ default: /* Invalid */
+ MIPS_INVAL("major opcode");
+ generate_exception_end(ctx, EXCP_RI);
+ break;
+ }
+}
+
+void gen_intermediate_code(CPUMIPSState *env, struct TranslationBlock *tb)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx;
+ target_ulong pc_start;
+ target_ulong next_page_start;
+ int num_insns;
+ int max_insns;
+ int insn_bytes;
+ int is_slot;
+
+ pc_start = tb->pc;
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ ctx.pc = pc_start;
+ ctx.saved_pc = -1;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
+ ctx.insn_flags = env->insn_flags;
+ ctx.CP0_Config1 = env->CP0_Config1;
+ ctx.tb = tb;
+ ctx.bstate = BS_NONE;
+ ctx.btarget = 0;
+ ctx.kscrexist = (env->CP0_Config4 >> CP0C4_KScrExist) & 0xff;
+ ctx.rxi = (env->CP0_Config3 >> CP0C3_RXI) & 1;
+ ctx.ie = (env->CP0_Config4 >> CP0C4_IE) & 3;
+ ctx.bi = (env->CP0_Config3 >> CP0C3_BI) & 1;
+ ctx.bp = (env->CP0_Config3 >> CP0C3_BP) & 1;
+ ctx.PAMask = env->PAMask;
+ ctx.mvh = (env->CP0_Config5 >> CP0C5_MVH) & 1;
+ ctx.CP0_LLAddr_shift = env->CP0_LLAddr_shift;
+ ctx.cmgcr = (env->CP0_Config3 >> CP0C3_CMGCR) & 1;
+ /* Restore delay slot state from the tb context. */
+ ctx.hflags = (uint32_t)tb->flags; /* FIXME: maybe use 64 bits here? */
+ ctx.ulri = (env->CP0_Config3 >> CP0C3_ULRI) & 1;
+ ctx.ps = ((env->active_fpu.fcr0 >> FCR0_PS) & 1) ||
+ (env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F));
+ ctx.vp = (env->CP0_Config5 >> CP0C5_VP) & 1;
+ ctx.mrp = (env->CP0_Config5 >> CP0C5_MRP) & 1;
+ ctx.nan2008 = (env->active_fpu.fcr31 >> FCR31_NAN2008) & 1;
+ ctx.abs2008 = (env->active_fpu.fcr31 >> FCR31_ABS2008) & 1;
+ restore_cpu_state(env, &ctx);
+#ifdef CONFIG_USER_ONLY
+ ctx.mem_idx = MIPS_HFLAG_UM;
+#else
+ ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU;
+#endif
+ ctx.default_tcg_memop_mask = (ctx.insn_flags & ISA_MIPS32R6) ?
+ MO_UNALN : MO_ALIGN;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ LOG_DISAS("\ntb %p idx %d hflags %04x\n", tb, ctx.mem_idx, ctx.hflags);
+ gen_tb_start(tb);
+ while (ctx.bstate == BS_NONE) {
+ tcg_gen_insn_start(ctx.pc, ctx.hflags & MIPS_HFLAG_BMASK, ctx.btarget);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+ save_cpu_state(&ctx, 1);
+ ctx.bstate = BS_BRANCH;
+ gen_helper_raise_exception_debug(cpu_env);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.pc += 4;
+ goto done_generating;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ is_slot = ctx.hflags & MIPS_HFLAG_BMASK;
+ if (!(ctx.hflags & MIPS_HFLAG_M16)) {
+ ctx.opcode = cpu_ldl_code(env, ctx.pc);
+ insn_bytes = 4;
+ decode_opc(env, &ctx);
+ } else if (ctx.insn_flags & ASE_MICROMIPS) {
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
+ insn_bytes = decode_micromips_opc(env, &ctx);
+ } else if (ctx.insn_flags & ASE_MIPS16) {
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
+ insn_bytes = decode_mips16_opc(env, &ctx);
+ } else {
+ generate_exception_end(&ctx, EXCP_RI);
+ break;
+ }
+
+ if (ctx.hflags & MIPS_HFLAG_BMASK) {
+ if (!(ctx.hflags & (MIPS_HFLAG_BDS16 | MIPS_HFLAG_BDS32 |
+ MIPS_HFLAG_FBNSLOT))) {
+ /* force to generate branch as there is neither delay nor
+ forbidden slot */
+ is_slot = 1;
+ }
+ if ((ctx.hflags & MIPS_HFLAG_M16) &&
+ (ctx.hflags & MIPS_HFLAG_FBNSLOT)) {
+ /* Force to generate branch as microMIPS R6 doesn't restrict
+ branches in the forbidden slot. */
+ is_slot = 1;
+ }
+ }
+ if (is_slot) {
+ gen_branch(&ctx, insn_bytes);
+ }
+ ctx.pc += insn_bytes;
+
+ /* Execute a branch and its delay slot as a single instruction.
+ This is what GDB expects and is consistent with what the
+ hardware does (e.g. if a delay slot instruction faults, the
+ reported PC is the PC of the branch). */
+ if (cs->singlestep_enabled && (ctx.hflags & MIPS_HFLAG_BMASK) == 0) {
+ break;
+ }
+
+ if (ctx.pc >= next_page_start) {
+ break;
+ }
+
+ if (tcg_op_buf_full()) {
+ break;
+ }
+
+ if (num_insns >= max_insns)
+ break;
+
+ if (singlestep)
+ break;
+ }
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+ if (cs->singlestep_enabled && ctx.bstate != BS_BRANCH) {
+ save_cpu_state(&ctx, ctx.bstate != BS_EXCP);
+ gen_helper_raise_exception_debug(cpu_env);
+ } else {
+ switch (ctx.bstate) {
+ case BS_STOP:
+ gen_goto_tb(&ctx, 0, ctx.pc);
+ break;
+ case BS_NONE:
+ save_cpu_state(&ctx, 0);
+ gen_goto_tb(&ctx, 0, ctx.pc);
+ break;
+ case BS_EXCP:
+ tcg_gen_exit_tb(0);
+ break;
+ case BS_BRANCH:
+ default:
+ break;
+ }
+ }
+done_generating:
+ gen_tb_end(tb, num_insns);
+
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ LOG_DISAS("\n");
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+static void fpu_dump_state(CPUMIPSState *env, FILE *f, fprintf_function fpu_fprintf,
+ int flags)
+{
+ int i;
+ int is_fpu64 = !!(env->hflags & MIPS_HFLAG_F64);
+
+#define printfpr(fp) \
+ do { \
+ if (is_fpu64) \
+ fpu_fprintf(f, "w:%08x d:%016" PRIx64 \
+ " fd:%13g fs:%13g psu: %13g\n", \
+ (fp)->w[FP_ENDIAN_IDX], (fp)->d, \
+ (double)(fp)->fd, \
+ (double)(fp)->fs[FP_ENDIAN_IDX], \
+ (double)(fp)->fs[!FP_ENDIAN_IDX]); \
+ else { \
+ fpr_t tmp; \
+ tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \
+ tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \
+ fpu_fprintf(f, "w:%08x d:%016" PRIx64 \
+ " fd:%13g fs:%13g psu:%13g\n", \
+ tmp.w[FP_ENDIAN_IDX], tmp.d, \
+ (double)tmp.fd, \
+ (double)tmp.fs[FP_ENDIAN_IDX], \
+ (double)tmp.fs[!FP_ENDIAN_IDX]); \
+ } \
+ } while(0)
+
+
+ fpu_fprintf(f, "CP1 FCR0 0x%08x FCR31 0x%08x SR.FR %d fp_status 0x%02x\n",
+ env->active_fpu.fcr0, env->active_fpu.fcr31, is_fpu64,
+ get_float_exception_flags(&env->active_fpu.fp_status));
+ for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) {
+ fpu_fprintf(f, "%3s: ", fregnames[i]);
+ printfpr(&env->active_fpu.fpr[i]);
+ }
+
+#undef printfpr
+}
+
+void mips_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ MIPSCPU *cpu = MIPS_CPU(cs);
+ CPUMIPSState *env = &cpu->env;
+ int i;
+
+ cpu_fprintf(f, "pc=0x" TARGET_FMT_lx " HI=0x" TARGET_FMT_lx
+ " LO=0x" TARGET_FMT_lx " ds %04x "
+ TARGET_FMT_lx " " TARGET_FMT_ld "\n",
+ env->active_tc.PC, env->active_tc.HI[0], env->active_tc.LO[0],
+ env->hflags, env->btarget, env->bcond);
+ for (i = 0; i < 32; i++) {
+ if ((i & 3) == 0)
+ cpu_fprintf(f, "GPR%02d:", i);
+ cpu_fprintf(f, " %s " TARGET_FMT_lx, regnames[i], env->active_tc.gpr[i]);
+ if ((i & 3) == 3)
+ cpu_fprintf(f, "\n");
+ }
+
+ cpu_fprintf(f, "CP0 Status 0x%08x Cause 0x%08x EPC 0x" TARGET_FMT_lx "\n",
+ env->CP0_Status, env->CP0_Cause, env->CP0_EPC);
+ cpu_fprintf(f, " Config0 0x%08x Config1 0x%08x LLAddr 0x%016"
+ PRIx64 "\n",
+ env->CP0_Config0, env->CP0_Config1, env->lladdr);
+ cpu_fprintf(f, " Config2 0x%08x Config3 0x%08x\n",
+ env->CP0_Config2, env->CP0_Config3);
+ cpu_fprintf(f, " Config4 0x%08x Config5 0x%08x\n",
+ env->CP0_Config4, env->CP0_Config5);
+ if (env->hflags & MIPS_HFLAG_FPU)
+ fpu_dump_state(env, f, cpu_fprintf, flags);
+}
+
+void mips_tcg_init(void)
+{
+ int i;
+ static int inited;
+
+ /* Initialize various static tables. */
+ if (inited)
+ return;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ TCGV_UNUSED(cpu_gpr[0]);
+ for (i = 1; i < 32; i++)
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.gpr[i]),
+ regnames[i]);
+
+ for (i = 0; i < 32; i++) {
+ int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]);
+ msa_wr_d[i * 2] =
+ tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2]);
+ /* The scalar floating-point unit (FPU) registers are mapped on
+ * the MSA vector registers. */
+ fpu_f64[i] = msa_wr_d[i * 2];
+ off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]);
+ msa_wr_d[i * 2 + 1] =
+ tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]);
+ }
+
+ cpu_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.PC), "PC");
+ for (i = 0; i < MIPS_DSP_ACC; i++) {
+ cpu_HI[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.HI[i]),
+ regnames_HI[i]);
+ cpu_LO[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.LO[i]),
+ regnames_LO[i]);
+ }
+ cpu_dspctrl = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, active_tc.DSPControl),
+ "DSPControl");
+ bcond = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, bcond), "bcond");
+ btarget = tcg_global_mem_new(cpu_env,
+ offsetof(CPUMIPSState, btarget), "btarget");
+ hflags = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMIPSState, hflags), "hflags");
+
+ fpu_fcr0 = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMIPSState, active_fpu.fcr0),
+ "fcr0");
+ fpu_fcr31 = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMIPSState, active_fpu.fcr31),
+ "fcr31");
+
+ inited = 1;
+}
+
+#include "translate_init.c"
+
+MIPSCPU *cpu_mips_init(const char *cpu_model)
+{
+ MIPSCPU *cpu;
+ CPUMIPSState *env;
+ const mips_def_t *def;
+
+ def = cpu_mips_find_by_name(cpu_model);
+ if (!def)
+ return NULL;
+ cpu = MIPS_CPU(object_new(TYPE_MIPS_CPU));
+ env = &cpu->env;
+ env->cpu_model = def;
+ env->exception_base = (int32_t)0xBFC00000;
+
+#ifndef CONFIG_USER_ONLY
+ mmu_init(env, def);
+#endif
+ fpu_init(env, def);
+ mvp_init(env, def);
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+bool cpu_supports_cps_smp(const char *cpu_model)
+{
+ const mips_def_t *def = cpu_mips_find_by_name(cpu_model);
+ if (!def) {
+ return false;
+ }
+
+ return (def->CP0_Config3 & (1 << CP0C3_CMGCR)) != 0;
+}
+
+void cpu_set_exception_base(int vp_index, target_ulong address)
+{
+ MIPSCPU *vp = MIPS_CPU(qemu_get_cpu(vp_index));
+ vp->env.exception_base = address;
+}
+
+void cpu_state_reset(CPUMIPSState *env)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ /* Reset registers to their default values */
+ env->CP0_PRid = env->cpu_model->CP0_PRid;
+ env->CP0_Config0 = env->cpu_model->CP0_Config0;
+#ifdef TARGET_WORDS_BIGENDIAN
+ env->CP0_Config0 |= (1 << CP0C0_BE);
+#endif
+ env->CP0_Config1 = env->cpu_model->CP0_Config1;
+ env->CP0_Config2 = env->cpu_model->CP0_Config2;
+ env->CP0_Config3 = env->cpu_model->CP0_Config3;
+ env->CP0_Config4 = env->cpu_model->CP0_Config4;
+ env->CP0_Config4_rw_bitmask = env->cpu_model->CP0_Config4_rw_bitmask;
+ env->CP0_Config5 = env->cpu_model->CP0_Config5;
+ env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask;
+ env->CP0_Config6 = env->cpu_model->CP0_Config6;
+ env->CP0_Config7 = env->cpu_model->CP0_Config7;
+ env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask
+ << env->cpu_model->CP0_LLAddr_shift;
+ env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift;
+ env->SYNCI_Step = env->cpu_model->SYNCI_Step;
+ env->CCRes = env->cpu_model->CCRes;
+ env->CP0_Status_rw_bitmask = env->cpu_model->CP0_Status_rw_bitmask;
+ env->CP0_TCStatus_rw_bitmask = env->cpu_model->CP0_TCStatus_rw_bitmask;
+ env->CP0_SRSCtl = env->cpu_model->CP0_SRSCtl;
+ env->current_tc = 0;
+ env->SEGBITS = env->cpu_model->SEGBITS;
+ env->SEGMask = (target_ulong)((1ULL << env->cpu_model->SEGBITS) - 1);
+#if defined(TARGET_MIPS64)
+ if (env->cpu_model->insn_flags & ISA_MIPS3) {
+ env->SEGMask |= 3ULL << 62;
+ }
+#endif
+ env->PABITS = env->cpu_model->PABITS;
+ env->CP0_SRSConf0_rw_bitmask = env->cpu_model->CP0_SRSConf0_rw_bitmask;
+ env->CP0_SRSConf0 = env->cpu_model->CP0_SRSConf0;
+ env->CP0_SRSConf1_rw_bitmask = env->cpu_model->CP0_SRSConf1_rw_bitmask;
+ env->CP0_SRSConf1 = env->cpu_model->CP0_SRSConf1;
+ env->CP0_SRSConf2_rw_bitmask = env->cpu_model->CP0_SRSConf2_rw_bitmask;
+ env->CP0_SRSConf2 = env->cpu_model->CP0_SRSConf2;
+ env->CP0_SRSConf3_rw_bitmask = env->cpu_model->CP0_SRSConf3_rw_bitmask;
+ env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3;
+ env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask;
+ env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
+ env->CP0_PageGrain_rw_bitmask = env->cpu_model->CP0_PageGrain_rw_bitmask;
+ env->CP0_PageGrain = env->cpu_model->CP0_PageGrain;
+ env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
+ env->active_fpu.fcr31_rw_bitmask = env->cpu_model->CP1_fcr31_rw_bitmask;
+ env->active_fpu.fcr31 = env->cpu_model->CP1_fcr31;
+ env->msair = env->cpu_model->MSAIR;
+ env->insn_flags = env->cpu_model->insn_flags;
+
+#if defined(CONFIG_USER_ONLY)
+ env->CP0_Status = (MIPS_HFLAG_UM << CP0St_KSU);
+# ifdef TARGET_MIPS64
+ /* Enable 64-bit register mode. */
+ env->CP0_Status |= (1 << CP0St_PX);
+# endif
+# ifdef TARGET_ABI_MIPSN64
+ /* Enable 64-bit address mode. */
+ env->CP0_Status |= (1 << CP0St_UX);
+# endif
+ /* Enable access to the CPUNum, SYNCI_Step, CC, and CCRes RDHWR
+ hardware registers. */
+ env->CP0_HWREna |= 0x0000000F;
+ if (env->CP0_Config1 & (1 << CP0C1_FP)) {
+ env->CP0_Status |= (1 << CP0St_CU1);
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_DSPP)) {
+ env->CP0_Status |= (1 << CP0St_MX);
+ }
+# if defined(TARGET_MIPS64)
+ /* For MIPS64, init FR bit to 1 if FPU unit is there and bit is writable. */
+ if ((env->CP0_Config1 & (1 << CP0C1_FP)) &&
+ (env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
+ env->CP0_Status |= (1 << CP0St_FR);
+ }
+# endif
+#else
+ if (env->hflags & MIPS_HFLAG_BMASK) {
+ /* If the exception was raised from a delay slot,
+ come back to the jump. */
+ env->CP0_ErrorEPC = (env->active_tc.PC
+ - (env->hflags & MIPS_HFLAG_B16 ? 2 : 4));
+ } else {
+ env->CP0_ErrorEPC = env->active_tc.PC;
+ }
+ env->active_tc.PC = env->exception_base;
+ env->CP0_Random = env->tlb->nb_tlb - 1;
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
+ env->CP0_Wired = 0;
+ env->CP0_GlobalNumber = (cs->cpu_index & 0xFF) << CP0GN_VPId;
+ env->CP0_EBase = (cs->cpu_index & 0x3FF);
+ if (kvm_enabled()) {
+ env->CP0_EBase |= 0x40000000;
+ } else {
+ env->CP0_EBase |= 0x80000000;
+ }
+ if (env->CP0_Config3 & (1 << CP0C3_CMGCR)) {
+ env->CP0_CMGCRBase = 0x1fbf8000 >> 4;
+ }
+ env->CP0_EntryHi_ASID_mask = (env->CP0_Config4 & (1 << CP0C4_AE)) ?
+ 0x3ff : 0xff;
+ env->CP0_Status = (1 << CP0St_BEV) | (1 << CP0St_ERL);
+ /* vectored interrupts not implemented, timer on int 7,
+ no performance counters. */
+ env->CP0_IntCtl = 0xe0000000;
+ {
+ int i;
+
+ for (i = 0; i < 7; i++) {
+ env->CP0_WatchLo[i] = 0;
+ env->CP0_WatchHi[i] = 0x80000000;
+ }
+ env->CP0_WatchLo[7] = 0;
+ env->CP0_WatchHi[7] = 0;
+ }
+ /* Count register increments in debug mode, EJTAG version 1 */
+ env->CP0_Debug = (1 << CP0DB_CNT) | (0x1 << CP0DB_VER);
+
+ cpu_mips_store_count(env, 1);
+
+ if (env->CP0_Config3 & (1 << CP0C3_MT)) {
+ int i;
+
+ /* Only TC0 on VPE 0 starts as active. */
+ for (i = 0; i < ARRAY_SIZE(env->tcs); i++) {
+ env->tcs[i].CP0_TCBind = cs->cpu_index << CP0TCBd_CurVPE;
+ env->tcs[i].CP0_TCHalt = 1;
+ }
+ env->active_tc.CP0_TCHalt = 1;
+ cs->halted = 1;
+
+ if (cs->cpu_index == 0) {
+ /* VPE0 starts up enabled. */
+ env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
+ env->CP0_VPEConf0 |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
+
+ /* TC0 starts up unhalted. */
+ cs->halted = 0;
+ env->active_tc.CP0_TCHalt = 0;
+ env->tcs[0].CP0_TCHalt = 0;
+ /* With thread 0 active. */
+ env->active_tc.CP0_TCStatus = (1 << CP0TCSt_A);
+ env->tcs[0].CP0_TCStatus = (1 << CP0TCSt_A);
+ }
+ }
+#endif
+ if ((env->insn_flags & ISA_MIPS32R6) &&
+ (env->active_fpu.fcr0 & (1 << FCR0_F64))) {
+ /* Status.FR = 0 mode in 64-bit FPU not allowed in R6 */
+ env->CP0_Status |= (1 << CP0St_FR);
+ }
+
+ /* MSA */
+ if (env->CP0_Config3 & (1 << CP0C3_MSAP)) {
+ msa_reset(env);
+ }
+
+ compute_hflags(env);
+ restore_fp_status(env);
+ restore_pamask(env);
+ cs->exception_index = EXCP_NONE;
+
+ if (semihosting_get_argc()) {
+ /* UHI interface can be used to obtain argc and argv */
+ env->active_tc.gpr[4] = -1;
+ }
+}
+
+void restore_state_to_opc(CPUMIPSState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->active_tc.PC = data[0];
+ env->hflags &= ~MIPS_HFLAG_BMASK;
+ env->hflags |= data[1];
+ switch (env->hflags & MIPS_HFLAG_BMASK_BASE) {
+ case MIPS_HFLAG_BR:
+ break;
+ case MIPS_HFLAG_BC:
+ case MIPS_HFLAG_BL:
+ case MIPS_HFLAG_B:
+ env->btarget = data[2];
+ break;
+ }
+}
diff --git a/target/mips/translate_init.c b/target/mips/translate_init.c
new file mode 100644
index 0000000000..6ae23e476f
--- /dev/null
+++ b/target/mips/translate_init.c
@@ -0,0 +1,944 @@
+/*
+ * MIPS emulation for qemu: CPU initialisation routines.
+ *
+ * Copyright (c) 2004-2005 Jocelyn Mayer
+ * Copyright (c) 2007 Herve Poussineau
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* CPU / CPU family specific config register values. */
+
+/* Have config1, uncached coherency */
+#define MIPS_CONFIG0 \
+ ((1U << CP0C0_M) | (0x2 << CP0C0_K0))
+
+/* Have config2, no coprocessor2 attached, no MDMX support attached,
+ no performance counters, watch registers present,
+ no code compression, EJTAG present, no FPU */
+#define MIPS_CONFIG1 \
+((1U << CP0C1_M) | \
+ (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
+ (1 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
+ (0 << CP0C1_FP))
+
+/* Have config3, no tertiary/secondary caches implemented */
+#define MIPS_CONFIG2 \
+((1U << CP0C2_M))
+
+/* No config4, no DSP ASE, no large physaddr (PABITS),
+ no external interrupt controller, no vectored interrupts,
+ no 1kb pages, no SmartMIPS ASE, no trace logic */
+#define MIPS_CONFIG3 \
+((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
+ (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
+ (0 << CP0C3_SM) | (0 << CP0C3_TL))
+
+#define MIPS_CONFIG4 \
+((0 << CP0C4_M))
+
+#define MIPS_CONFIG5 \
+((0 << CP0C5_M))
+
+/* MMU types, the first four entries have the same layout as the
+ CP0C0_MT field. */
+enum mips_mmu_types {
+ MMU_TYPE_NONE,
+ MMU_TYPE_R4000,
+ MMU_TYPE_RESERVED,
+ MMU_TYPE_FMT,
+ MMU_TYPE_R3000,
+ MMU_TYPE_R6000,
+ MMU_TYPE_R8000
+};
+
+struct mips_def_t {
+ const char *name;
+ int32_t CP0_PRid;
+ int32_t CP0_Config0;
+ int32_t CP0_Config1;
+ int32_t CP0_Config2;
+ int32_t CP0_Config3;
+ int32_t CP0_Config4;
+ int32_t CP0_Config4_rw_bitmask;
+ int32_t CP0_Config5;
+ int32_t CP0_Config5_rw_bitmask;
+ int32_t CP0_Config6;
+ int32_t CP0_Config7;
+ target_ulong CP0_LLAddr_rw_bitmask;
+ int CP0_LLAddr_shift;
+ int32_t SYNCI_Step;
+ int32_t CCRes;
+ int32_t CP0_Status_rw_bitmask;
+ int32_t CP0_TCStatus_rw_bitmask;
+ int32_t CP0_SRSCtl;
+ int32_t CP1_fcr0;
+ int32_t CP1_fcr31_rw_bitmask;
+ int32_t CP1_fcr31;
+ int32_t MSAIR;
+ int32_t SEGBITS;
+ int32_t PABITS;
+ int32_t CP0_SRSConf0_rw_bitmask;
+ int32_t CP0_SRSConf0;
+ int32_t CP0_SRSConf1_rw_bitmask;
+ int32_t CP0_SRSConf1;
+ int32_t CP0_SRSConf2_rw_bitmask;
+ int32_t CP0_SRSConf2;
+ int32_t CP0_SRSConf3_rw_bitmask;
+ int32_t CP0_SRSConf3;
+ int32_t CP0_SRSConf4_rw_bitmask;
+ int32_t CP0_SRSConf4;
+ int32_t CP0_PageGrain_rw_bitmask;
+ int32_t CP0_PageGrain;
+ int insn_flags;
+ enum mips_mmu_types mmu_type;
+};
+
+/*****************************************************************************/
+/* MIPS CPU definitions */
+static const mips_def_t mips_defs[] =
+{
+ {
+ .name = "4Kc",
+ .CP0_PRid = 0x00018000,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "4Km",
+ .CP0_PRid = 0x00018300,
+ /* Config1 implemented, fixed mapping MMU,
+ no virtual icache, uncached coherency. */
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "4KEcR1",
+ .CP0_PRid = 0x00018400,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "4KEmR1",
+ .CP0_PRid = 0x00018500,
+ .CP0_Config0 = MIPS_CONFIG0 | (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "4KEc",
+ .CP0_PRid = 0x00019000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (0 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "4KEm",
+ .CP0_PRid = 0x00019100,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "24Kc",
+ .CP0_PRid = 0x00019300,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ /* No DSP implemented. */
+ .CP0_Status_rw_bitmask = 0x1278FF1F,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "24KEc",
+ .CP0_PRid = 0x00019600,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSPP) | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ /* we have a DSP, but no FPU */
+ .CP0_Status_rw_bitmask = 0x1378FF1F,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "24Kf",
+ .CP0_PRid = 0x00019300,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ /* No DSP implemented. */
+ .CP0_Status_rw_bitmask = 0x3678FF1F,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "34Kf",
+ .CP0_PRid = 0x00019500,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_VInt) | (1 << CP0C3_MT) |
+ (1 << CP0C3_DSPP),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3778FF1F,
+ .CP0_TCStatus_rw_bitmask = (0 << CP0TCSt_TCU3) | (0 << CP0TCSt_TCU2) |
+ (1 << CP0TCSt_TCU1) | (1 << CP0TCSt_TCU0) |
+ (0 << CP0TCSt_TMX) | (1 << CP0TCSt_DT) |
+ (1 << CP0TCSt_DA) | (1 << CP0TCSt_A) |
+ (0x3 << CP0TCSt_TKSU) | (1 << CP0TCSt_IXMT) |
+ (0xff << CP0TCSt_TASID),
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x95 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .CP0_SRSCtl = (0xf << CP0SRSCtl_HSS),
+ .CP0_SRSConf0_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf0 = (1U << CP0SRSC0_M) | (0x3fe << CP0SRSC0_SRS3) |
+ (0x3fe << CP0SRSC0_SRS2) | (0x3fe << CP0SRSC0_SRS1),
+ .CP0_SRSConf1_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf1 = (1U << CP0SRSC1_M) | (0x3fe << CP0SRSC1_SRS6) |
+ (0x3fe << CP0SRSC1_SRS5) | (0x3fe << CP0SRSC1_SRS4),
+ .CP0_SRSConf2_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf2 = (1U << CP0SRSC2_M) | (0x3fe << CP0SRSC2_SRS9) |
+ (0x3fe << CP0SRSC2_SRS8) | (0x3fe << CP0SRSC2_SRS7),
+ .CP0_SRSConf3_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf3 = (1U << CP0SRSC3_M) | (0x3fe << CP0SRSC3_SRS12) |
+ (0x3fe << CP0SRSC3_SRS11) | (0x3fe << CP0SRSC3_SRS10),
+ .CP0_SRSConf4_rw_bitmask = 0x3fffffff,
+ .CP0_SRSConf4 = (0x3fe << CP0SRSC4_SRS15) |
+ (0x3fe << CP0SRSC4_SRS14) | (0x3fe << CP0SRSC4_SRS13),
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_MT,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "74Kf",
+ .CP0_PRid = 0x00019700,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_CA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_DSP2P) | (1 << CP0C3_DSPP) |
+ (1 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3778FF1F,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x93 << FCR0_PRID),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MIPS16 | ASE_DSP | ASE_DSPR2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "M14K",
+ .CP0_PRid = 0x00019b00,
+ /* Config1 implemented, fixed mapping MMU,
+ no virtual icache, uncached coherency. */
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_KU) | (0x2 << CP0C0_K23) |
+ (0x1 << CP0C0_AR) | (MMU_TYPE_FMT << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1,
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (1 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1258FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS,
+ .mmu_type = MMU_TYPE_FMT,
+ },
+ {
+ .name = "M14Kc",
+ /* This is the TLB-based MMU core. */
+ .CP0_PRid = 0x00019c00,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (15 << CP0C1_MMU) |
+ (0 << CP0C1_IS) | (3 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (0 << CP0C1_DS) | (3 << CP0C1_DL) | (1 << CP0C1_DA),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (0x2 << CP0C3_ISA) | (0 << CP0C3_VInt),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x1278FF17,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R2 | ASE_MICROMIPS,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* FIXME:
+ * Config3: CMGCR, SC, PW, VZ, CTXTC, CDMM, TL
+ * Config4: MMUExtDef
+ * Config5: EVA, MRP
+ * FIR(FCR0): Has2008
+ * */
+ .name = "P5600",
+ .CP0_PRid = 0x0001A800,
+ .CP0_Config0 = MIPS_CONFIG0 | (1 << CP0C0_MM) | (1 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (0x3F << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_FP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) |
+ (0x1c << CP0C4_KScrExist),
+ .CP0_Config4_rw_bitmask = 0,
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_MVH) | (1 << CP0C5_LLB) |
+ (1 << CP0C5_MRP),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) |
+ (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_UFR),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3C68FF1F,
+ .CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) |
+ (1 << CP0PG_ELPA) | (1 << CP0PG_IEC),
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_UFRP) | (1 << FCR0_HAS2008) |
+ (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) | (0x03 << FCR0_PRID),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 32,
+ .PABITS = 40,
+ .insn_flags = CPU_MIPS32R5 | ASE_MSA,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* A generic CPU supporting MIPS32 Release 6 ISA.
+ FIXME: Support IEEE 754-2008 FP.
+ Eventually this should be replaced by a real CPU model. */
+ .name = "mips32r6-generic",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_BP) | (1 << CP0C3_BI) |
+ (2 << CP0C3_ISA) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1U << CP0C3_M),
+ .CP0_Config4 = MIPS_CONFIG4 | (0xfc << CP0C4_KScrExist) |
+ (3 << CP0C4_IE) | (1U << CP0C4_M),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_LLB),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_SBRI) | (1 << CP0C5_FRE) |
+ (1 << CP0C5_UFE),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3058FF1F,
+ .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
+ (1U << CP0PG_RIE),
+ .CP0_PageGrain_rw_bitmask = 0,
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
+ .SEGBITS = 32,
+ .PABITS = 32,
+ .insn_flags = CPU_MIPS32R6 | ASE_MICROMIPS,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+#if defined(TARGET_MIPS64)
+ {
+ .name = "R4000",
+ .CP0_PRid = 0x00000400,
+ /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */
+ .CP0_Config0 = (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0),
+ /* Note: Config1 is only used internally, the R4000 has only Config0. */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .CP0_LLAddr_rw_bitmask = 0xFFFFFFFF,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3678FFFF,
+ /* The R4000 has a full 64bit FPU but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0x0183FFFF,
+ .SEGBITS = 40,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS3,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "VR5432",
+ .CP0_PRid = 0x00005400,
+ /* No L2 cache, icache size 8k, dcache size 8k, uncached coherency. */
+ .CP0_Config0 = (1 << 17) | (0x1 << 9) | (0x1 << 6) | (0x2 << CP0C0_K0),
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .CP0_LLAddr_rw_bitmask = 0xFFFFFFFFL,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x3678FFFF,
+ /* The VR5432 has a full 64bit FPU but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (0x54 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 32,
+ .insn_flags = CPU_VR54XX,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5Kc",
+ .CP0_PRid = 0x00018100,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x12F8FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5Kf",
+ .CP0_PRid = 0x00018100,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x36F8FFFF,
+ /* The 5Kf has F64 / L / W but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (1 << FCR0_D) | (1 << FCR0_S) |
+ (0x81 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "20Kc",
+ /* We emulate a later version of the 20Kc, earlier ones had a broken
+ WAIT instruction. */
+ .CP0_PRid = 0x000182a0,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT) | (1 << CP0C0_VI),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (47 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 1,
+ .CP0_Status_rw_bitmask = 0x36FBFFFF,
+ /* The 20Kc has F64 / L / W but doesn't use the fcr0 bits. */
+ .CP1_fcr0 = (1 << FCR0_3D) | (1 << FCR0_PS) |
+ (1 << FCR0_D) | (1 << FCR0_S) |
+ (0x82 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64 | ASE_MIPS3D,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* A generic CPU providing MIPS64 Release 2 features.
+ FIXME: Eventually this should be replaced by a real CPU model. */
+ .name = "MIPS64R2-generic",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x36FBFFFF,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2 | ASE_MIPS3D,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5KEc",
+ .CP0_PRid = 0x00018900,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x12F8FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "5KEf",
+ .CP0_PRid = 0x00018900,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (31 << CP0C1_MMU) |
+ (1 << CP0C1_IS) | (4 << CP0C1_IL) | (1 << CP0C1_IA) |
+ (1 << CP0C1_DS) | (4 << CP0C1_DL) | (1 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3,
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 4,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x36F8FFFF,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_L) | (1 << FCR0_W) |
+ (1 << FCR0_D) | (1 << FCR0_S) |
+ (0x89 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "I6400",
+ .CP0_PRid = 0x1A900,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x2 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (15 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (0 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) |
+ (1 << CP0C3_CMGCR) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (3 << CP0C4_IE) |
+ (1 << CP0C4_AE) | (0xfc << CP0C4_KScrExist),
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_XNP) | (1 << CP0C5_VP) |
+ (1 << CP0C5_LLB) | (1 << CP0C5_MRP),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_MSAEn) | (1 << CP0C5_SBRI) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_UFE),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x30D8FFFF,
+ .CP0_PageGrain = (1 << CP0PG_IEC) | (1 << CP0PG_XIE) |
+ (1U << CP0PG_RIE),
+ .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA),
+ .CP1_fcr0 = (1 << FCR0_FREP) | (1 << FCR0_HAS2008) | (1 << FCR0_F64) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x03 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = (1 << FCR31_ABS2008) | (1 << FCR31_NAN2008),
+ .CP1_fcr31_rw_bitmask = 0x0103FFFF,
+ .MSAIR = 0x03 << MSAIR_ProcID,
+ .SEGBITS = 48,
+ .PABITS = 48,
+ .insn_flags = CPU_MIPS64R6 | ASE_MSA,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-2E",
+ .CP0_PRid = 0x6302,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) |
+ (0x1<<5) | (0x1<<4) | (0x1<<1),
+ /* Note: Config1 is only used internally,
+ Loongson-2E has only Config0. */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x35D0FFFF,
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 40,
+ .insn_flags = CPU_LOONGSON2E,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-2F",
+ .CP0_PRid = 0x6303,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = (0x1<<17) | (0x1<<16) | (0x1<<11) | (0x1<<8) |
+ (0x1<<5) | (0x1<<4) | (0x1<<1),
+ /* Note: Config1 is only used internally,
+ Loongson-2F has only Config0. */
+ .CP0_Config1 = (1 << CP0C1_FP) | (47 << CP0C1_MMU),
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0xF5D0FF1F, /* Bits 7:5 not writable. */
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 40,
+ .PABITS = 40,
+ .insn_flags = CPU_LOONGSON2F,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ /* A generic CPU providing MIPS64 ASE DSP 2 features.
+ FIXME: Eventually this should be replaced by a real CPU model. */
+ .name = "mips64dspr2",
+ .CP0_PRid = 0x00010000,
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2,
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_DSP2P) |
+ (1 << CP0C3_DSPP) | (1 << CP0C3_LPA),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .CP0_LLAddr_shift = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x37FBFFFF,
+ .CP1_fcr0 = (1 << FCR0_F64) | (1 << FCR0_3D) | (1 << FCR0_PS) |
+ (1 << FCR0_L) | (1 << FCR0_W) | (1 << FCR0_D) |
+ (1 << FCR0_S) | (0x00 << FCR0_PRID) | (0x0 << FCR0_REV),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 36,
+ .insn_flags = CPU_MIPS64R2 | ASE_DSP | ASE_DSPR2,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+
+#endif
+};
+
+static const mips_def_t *cpu_mips_find_by_name (const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mips_defs); i++) {
+ if (strcasecmp(name, mips_defs[i].name) == 0) {
+ return &mips_defs[i];
+ }
+ }
+ return NULL;
+}
+
+void mips_cpu_list (FILE *f, fprintf_function cpu_fprintf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mips_defs); i++) {
+ (*cpu_fprintf)(f, "MIPS '%s'\n",
+ mips_defs[i].name);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+static void no_mmu_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &no_mmu_map_address;
+}
+
+static void fixed_mmu_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1;
+ env->tlb->map_address = &fixed_mmu_map_address;
+}
+
+static void r4k_mmu_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ env->tlb->nb_tlb = 1 + ((def->CP0_Config1 >> CP0C1_MMU) & 63);
+ env->tlb->map_address = &r4k_map_address;
+ env->tlb->helper_tlbwi = r4k_helper_tlbwi;
+ env->tlb->helper_tlbwr = r4k_helper_tlbwr;
+ env->tlb->helper_tlbp = r4k_helper_tlbp;
+ env->tlb->helper_tlbr = r4k_helper_tlbr;
+ env->tlb->helper_tlbinv = r4k_helper_tlbinv;
+ env->tlb->helper_tlbinvf = r4k_helper_tlbinvf;
+}
+
+static void mmu_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ MIPSCPU *cpu = mips_env_get_cpu(env);
+
+ env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext));
+
+ switch (def->mmu_type) {
+ case MMU_TYPE_NONE:
+ no_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R4000:
+ r4k_mmu_init(env, def);
+ break;
+ case MMU_TYPE_FMT:
+ fixed_mmu_init(env, def);
+ break;
+ case MMU_TYPE_R3000:
+ case MMU_TYPE_R6000:
+ case MMU_TYPE_R8000:
+ default:
+ cpu_abort(CPU(cpu), "MMU type not supported\n");
+ }
+}
+#endif /* CONFIG_USER_ONLY */
+
+static void fpu_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ int i;
+
+ for (i = 0; i < MIPS_FPU_MAX; i++)
+ env->fpus[i].fcr0 = def->CP1_fcr0;
+
+ memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu));
+}
+
+static void mvp_init (CPUMIPSState *env, const mips_def_t *def)
+{
+ env->mvp = g_malloc0(sizeof(CPUMIPSMVPContext));
+
+ /* MVPConf1 implemented, TLB sharable, no gating storage support,
+ programmable cache partitioning implemented, number of allocatable
+ and sharable TLB entries, MVP has allocatable TCs, 2 VPEs
+ implemented, 5 TCs implemented. */
+ env->mvp->CP0_MVPConf0 = (1U << CP0MVPC0_M) | (1 << CP0MVPC0_TLBS) |
+ (0 << CP0MVPC0_GS) | (1 << CP0MVPC0_PCP) |
+// TODO: actually do 2 VPEs.
+// (1 << CP0MVPC0_TCA) | (0x1 << CP0MVPC0_PVPE) |
+// (0x04 << CP0MVPC0_PTC);
+ (1 << CP0MVPC0_TCA) | (0x0 << CP0MVPC0_PVPE) |
+ (0x00 << CP0MVPC0_PTC);
+#if !defined(CONFIG_USER_ONLY)
+ /* Usermode has no TLB support */
+ env->mvp->CP0_MVPConf0 |= (env->tlb->nb_tlb << CP0MVPC0_PTLBE);
+#endif
+
+ /* Allocatable CP1 have media extensions, allocatable CP1 have FP support,
+ no UDI implemented, no CP2 implemented, 1 CP1 implemented. */
+ env->mvp->CP0_MVPConf1 = (1U << CP0MVPC1_CIM) | (1 << CP0MVPC1_CIF) |
+ (0x0 << CP0MVPC1_PCX) | (0x0 << CP0MVPC1_PCP2) |
+ (0x1 << CP0MVPC1_PCP1);
+}
+
+static void msa_reset(CPUMIPSState *env)
+{
+#ifdef CONFIG_USER_ONLY
+ /* MSA access enabled */
+ env->CP0_Config5 |= 1 << CP0C5_MSAEn;
+ env->CP0_Status |= (1 << CP0St_CU1) | (1 << CP0St_FR);
+#endif
+
+ /* MSA CSR:
+ - non-signaling floating point exception mode off (NX bit is 0)
+ - Cause, Enables, and Flags are all 0
+ - round to nearest / ties to even (RM bits are 0) */
+ env->active_tc.msacsr = 0;
+
+ restore_msa_fp_status(env);
+
+ /* tininess detected after rounding.*/
+ set_float_detect_tininess(float_tininess_after_rounding,
+ &env->active_tc.msa_fp_status);
+
+ /* clear float_status exception flags */
+ set_float_exception_flags(0, &env->active_tc.msa_fp_status);
+
+ /* clear float_status nan mode */
+ set_default_nan_mode(0, &env->active_tc.msa_fp_status);
+
+ /* set proper signanling bit meaning ("1" means "quiet") */
+ set_snan_bit_is_one(0, &env->active_tc.msa_fp_status);
+}
diff --git a/target/moxie/Makefile.objs b/target/moxie/Makefile.objs
new file mode 100644
index 0000000000..6381d4d636
--- /dev/null
+++ b/target/moxie/Makefile.objs
@@ -0,0 +1,2 @@
+obj-y += translate.o helper.o machine.o cpu.o machine.o
+obj-$(CONFIG_SOFTMMU) += mmu.o
diff --git a/target/moxie/cpu.c b/target/moxie/cpu.c
new file mode 100644
index 0000000000..b0be4a7551
--- /dev/null
+++ b/target/moxie/cpu.c
@@ -0,0 +1,192 @@
+/*
+ * QEMU Moxie CPU
+ *
+ * Copyright (c) 2013 Anthony Green
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "machine.h"
+#include "exec/exec-all.h"
+
+static void moxie_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ MoxieCPU *cpu = MOXIE_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool moxie_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+static void moxie_cpu_reset(CPUState *s)
+{
+ MoxieCPU *cpu = MOXIE_CPU(s);
+ MoxieCPUClass *mcc = MOXIE_CPU_GET_CLASS(cpu);
+ CPUMoxieState *env = &cpu->env;
+
+ mcc->parent_reset(s);
+
+ memset(env, 0, sizeof(CPUMoxieState));
+ env->pc = 0x1000;
+
+ tlb_flush(s, 1);
+}
+
+static void moxie_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_arch_moxie;
+ info->print_insn = print_insn_moxie;
+}
+
+static void moxie_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ MoxieCPUClass *mcc = MOXIE_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ mcc->parent_realize(dev, errp);
+}
+
+static void moxie_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ MoxieCPU *cpu = MOXIE_CPU(obj);
+ static int inited;
+
+ cs->env_ptr = &cpu->env;
+
+ if (tcg_enabled() && !inited) {
+ inited = 1;
+ moxie_translate_init();
+ }
+}
+
+static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_MOXIE_CPU) ||
+ object_class_is_abstract(oc))) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void moxie_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ MoxieCPUClass *mcc = MOXIE_CPU_CLASS(oc);
+
+ mcc->parent_realize = dc->realize;
+ dc->realize = moxie_cpu_realizefn;
+
+ mcc->parent_reset = cc->reset;
+ cc->reset = moxie_cpu_reset;
+
+ cc->class_by_name = moxie_cpu_class_by_name;
+
+ cc->has_work = moxie_cpu_has_work;
+ cc->do_interrupt = moxie_cpu_do_interrupt;
+ cc->dump_state = moxie_cpu_dump_state;
+ cc->set_pc = moxie_cpu_set_pc;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = moxie_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = moxie_cpu_get_phys_page_debug;
+ cc->vmsd = &vmstate_moxie_cpu;
+#endif
+ cc->disas_set_info = moxie_cpu_disas_set_info;
+}
+
+static void moxielite_initfn(Object *obj)
+{
+ /* Set cpu feature flags */
+}
+
+static void moxie_any_initfn(Object *obj)
+{
+ /* Set cpu feature flags */
+}
+
+typedef struct MoxieCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+} MoxieCPUInfo;
+
+static const MoxieCPUInfo moxie_cpus[] = {
+ { .name = "MoxieLite", .initfn = moxielite_initfn },
+ { .name = "any", .initfn = moxie_any_initfn },
+};
+
+MoxieCPU *cpu_moxie_init(const char *cpu_model)
+{
+ return MOXIE_CPU(cpu_generic_init(TYPE_MOXIE_CPU, cpu_model));
+}
+
+static void cpu_register(const MoxieCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_MOXIE_CPU,
+ .instance_size = sizeof(MoxieCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(MoxieCPUClass),
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_MOXIE_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo moxie_cpu_type_info = {
+ .name = TYPE_MOXIE_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(MoxieCPU),
+ .instance_init = moxie_cpu_initfn,
+ .class_size = sizeof(MoxieCPUClass),
+ .class_init = moxie_cpu_class_init,
+};
+
+static void moxie_cpu_register_types(void)
+{
+ int i;
+ type_register_static(&moxie_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(moxie_cpus); i++) {
+ cpu_register(&moxie_cpus[i]);
+ }
+}
+
+type_init(moxie_cpu_register_types)
diff --git a/target/moxie/cpu.h b/target/moxie/cpu.h
new file mode 100644
index 0000000000..3e880facf4
--- /dev/null
+++ b/target/moxie/cpu.h
@@ -0,0 +1,143 @@
+/*
+ * Moxie emulation
+ *
+ * Copyright (c) 2008, 2010, 2013 Anthony Green
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef MOXIE_CPU_H
+#define MOXIE_CPU_H
+
+#include "qemu-common.h"
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUMoxieState
+
+#define MOXIE_EX_DIV0 0
+#define MOXIE_EX_BAD 1
+#define MOXIE_EX_IRQ 2
+#define MOXIE_EX_SWI 3
+#define MOXIE_EX_MMU_MISS 4
+#define MOXIE_EX_BREAK 16
+
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define TARGET_PAGE_BITS 12 /* 4k */
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define NB_MMU_MODES 1
+
+typedef struct CPUMoxieState {
+
+ uint32_t flags; /* general execution flags */
+ uint32_t gregs[16]; /* general registers */
+ uint32_t sregs[256]; /* special registers */
+ uint32_t pc; /* program counter */
+ /* Instead of saving the cc value, we save the cmp arguments
+ and compute cc on demand. */
+ uint32_t cc_a; /* reg a for condition code calculation */
+ uint32_t cc_b; /* reg b for condition code calculation */
+
+ void *irq[8];
+
+ CPU_COMMON
+
+} CPUMoxieState;
+
+#include "qom/cpu.h"
+
+#define TYPE_MOXIE_CPU "moxie-cpu"
+
+#define MOXIE_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(MoxieCPUClass, (klass), TYPE_MOXIE_CPU)
+#define MOXIE_CPU(obj) \
+ OBJECT_CHECK(MoxieCPU, (obj), TYPE_MOXIE_CPU)
+#define MOXIE_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(MoxieCPUClass, (obj), TYPE_MOXIE_CPU)
+
+/**
+ * MoxieCPUClass:
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Moxie CPU model.
+ */
+typedef struct MoxieCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} MoxieCPUClass;
+
+/**
+ * MoxieCPU:
+ * @env: #CPUMoxieState
+ *
+ * A Moxie CPU.
+ */
+typedef struct MoxieCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUMoxieState env;
+} MoxieCPU;
+
+static inline MoxieCPU *moxie_env_get_cpu(CPUMoxieState *env)
+{
+ return container_of(env, MoxieCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(moxie_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(MoxieCPU, env)
+
+MoxieCPU *cpu_moxie_init(const char *cpu_model);
+void moxie_cpu_do_interrupt(CPUState *cs);
+void moxie_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr moxie_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void moxie_translate_init(void);
+int cpu_moxie_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+
+#define cpu_init(cpu_model) CPU(cpu_moxie_init(cpu_model))
+
+#define cpu_signal_handler cpu_moxie_signal_handler
+
+static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
+{
+ return 0;
+}
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUMoxieState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = 0;
+}
+
+int moxie_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
+ int rw, int mmu_idx);
+
+#endif /* MOXIE_CPU_H */
diff --git a/target/moxie/helper.c b/target/moxie/helper.c
new file mode 100644
index 0000000000..330299f5a7
--- /dev/null
+++ b/target/moxie/helper.c
@@ -0,0 +1,162 @@
+/*
+ * Moxie helper routines.
+ *
+ * Copyright (c) 2008, 2009, 2010, 2013 Anthony Green
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "mmu.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+/* Try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = moxie_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr);
+ }
+ }
+ cpu_loop_exit(cs);
+}
+
+void helper_raise_exception(CPUMoxieState *env, int ex)
+{
+ CPUState *cs = CPU(moxie_env_get_cpu(env));
+
+ cs->exception_index = ex;
+ /* Stash the exception type. */
+ env->sregs[2] = ex;
+ /* Stash the address where the exception occurred. */
+ cpu_restore_state(cs, GETPC());
+ env->sregs[5] = env->pc;
+ /* Jump to the exception handline routine. */
+ env->pc = env->sregs[1];
+ cpu_loop_exit(cs);
+}
+
+uint32_t helper_div(CPUMoxieState *env, uint32_t a, uint32_t b)
+{
+ if (unlikely(b == 0)) {
+ helper_raise_exception(env, MOXIE_EX_DIV0);
+ return 0;
+ }
+ if (unlikely(a == INT_MIN && b == -1)) {
+ return INT_MIN;
+ }
+
+ return (int32_t)a / (int32_t)b;
+}
+
+uint32_t helper_udiv(CPUMoxieState *env, uint32_t a, uint32_t b)
+{
+ if (unlikely(b == 0)) {
+ helper_raise_exception(env, MOXIE_EX_DIV0);
+ return 0;
+ }
+ return a / b;
+}
+
+void helper_debug(CPUMoxieState *env)
+{
+ CPUState *cs = CPU(moxie_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+void moxie_cpu_do_interrupt(CPUState *cs)
+{
+ CPUState *cs = CPU(moxie_env_get_cpu(env));
+
+ cs->exception_index = -1;
+}
+
+int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int rw, int mmu_idx)
+{
+ MoxieCPU *cpu = MOXIE_CPU(cs);
+
+ cs->exception_index = 0xaa;
+ cpu->env.debug1 = address;
+ cpu_dump_state(cs, stderr, fprintf, 0);
+ return 1;
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+int moxie_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int rw, int mmu_idx)
+{
+ MoxieCPU *cpu = MOXIE_CPU(cs);
+ CPUMoxieState *env = &cpu->env;
+ MoxieMMUResult res;
+ int prot, miss;
+ target_ulong phy;
+ int r = 1;
+
+ address &= TARGET_PAGE_MASK;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ miss = moxie_mmu_translate(&res, env, address, rw, mmu_idx);
+ if (miss) {
+ /* handle the miss. */
+ phy = 0;
+ cs->exception_index = MOXIE_EX_MMU_MISS;
+ } else {
+ phy = res.phy;
+ r = 0;
+ }
+ tlb_set_page(cs, address, phy, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return r;
+}
+
+
+void moxie_cpu_do_interrupt(CPUState *cs)
+{
+ switch (cs->exception_index) {
+ case MOXIE_EX_BREAK:
+ break;
+ default:
+ break;
+ }
+}
+
+hwaddr moxie_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ MoxieCPU *cpu = MOXIE_CPU(cs);
+ uint32_t phy = addr;
+ MoxieMMUResult res;
+ int miss;
+
+ miss = moxie_mmu_translate(&res, &cpu->env, addr, 0, 0);
+ if (!miss) {
+ phy = res.phy;
+ }
+ return phy;
+}
+#endif
diff --git a/target/moxie/helper.h b/target/moxie/helper.h
new file mode 100644
index 0000000000..d94ef7a17e
--- /dev/null
+++ b/target/moxie/helper.h
@@ -0,0 +1,5 @@
+DEF_HELPER_2(raise_exception, void, env, int)
+DEF_HELPER_1(debug, void, env)
+
+DEF_HELPER_FLAGS_3(div, TCG_CALL_NO_WG, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(udiv, TCG_CALL_NO_WG, i32, env, i32, i32)
diff --git a/target/moxie/machine.c b/target/moxie/machine.c
new file mode 100644
index 0000000000..282dcd869f
--- /dev/null
+++ b/target/moxie/machine.c
@@ -0,0 +1,22 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "machine.h"
+#include "migration/cpu.h"
+
+const VMStateDescription vmstate_moxie_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(flags, CPUMoxieState),
+ VMSTATE_UINT32_ARRAY(gregs, CPUMoxieState, 16),
+ VMSTATE_UINT32_ARRAY(sregs, CPUMoxieState, 256),
+ VMSTATE_UINT32(pc, CPUMoxieState),
+ VMSTATE_UINT32(cc_a, CPUMoxieState),
+ VMSTATE_UINT32(cc_b, CPUMoxieState),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/moxie/machine.h b/target/moxie/machine.h
new file mode 100644
index 0000000000..a1b72907ae
--- /dev/null
+++ b/target/moxie/machine.h
@@ -0,0 +1 @@
+extern const VMStateDescription vmstate_moxie_cpu;
diff --git a/target/moxie/mmu.c b/target/moxie/mmu.c
new file mode 100644
index 0000000000..9203330b3b
--- /dev/null
+++ b/target/moxie/mmu.c
@@ -0,0 +1,33 @@
+/*
+ * Moxie mmu emulation.
+ *
+ * Copyright (c) 2008, 2013 Anthony Green
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "mmu.h"
+#include "exec/exec-all.h"
+
+int moxie_mmu_translate(MoxieMMUResult *res,
+ CPUMoxieState *env, uint32_t vaddr,
+ int rw, int mmu_idx)
+{
+ /* Perform no translation yet. */
+ res->phy = vaddr;
+ return 0;
+}
diff --git a/target/moxie/mmu.h b/target/moxie/mmu.h
new file mode 100644
index 0000000000..284a44d18e
--- /dev/null
+++ b/target/moxie/mmu.h
@@ -0,0 +1,14 @@
+#define MOXIE_MMU_ERR_EXEC 0
+#define MOXIE_MMU_ERR_READ 1
+#define MOXIE_MMU_ERR_WRITE 2
+#define MOXIE_MMU_ERR_FLUSH 3
+
+typedef struct {
+ uint32_t phy;
+ uint32_t pfn;
+ int cause_op;
+} MoxieMMUResult;
+
+int moxie_mmu_translate(MoxieMMUResult *res,
+ CPUMoxieState *env, uint32_t vaddr,
+ int rw, int mmu_idx);
diff --git a/target/moxie/translate.c b/target/moxie/translate.c
new file mode 100644
index 0000000000..0660b44c08
--- /dev/null
+++ b/target/moxie/translate.c
@@ -0,0 +1,908 @@
+/*
+ * Moxie emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2009, 2013 Anthony Green
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* For information on the Moxie architecture, see
+ * http://moxielogic.org/wiki
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+#include "exec/log.h"
+
+/* This is the state at translation time. */
+typedef struct DisasContext {
+ struct TranslationBlock *tb;
+ target_ulong pc, saved_pc;
+ uint32_t opcode;
+ uint32_t fp_status;
+ /* Routine used to access memory */
+ int memidx;
+ int bstate;
+ target_ulong btarget;
+ int singlestep_enabled;
+} DisasContext;
+
+enum {
+ BS_NONE = 0, /* We go out of the TB without reaching a branch or an
+ * exception condition */
+ BS_STOP = 1, /* We want to stop translation for any reason */
+ BS_BRANCH = 2, /* We reached a branch condition */
+ BS_EXCP = 3, /* We reached an exception condition */
+};
+
+static TCGv cpu_pc;
+static TCGv cpu_gregs[16];
+static TCGv_env cpu_env;
+static TCGv cc_a, cc_b;
+
+#include "exec/gen-icount.h"
+
+#define REG(x) (cpu_gregs[x])
+
+/* Extract the signed 10-bit offset from a 16-bit branch
+ instruction. */
+static int extract_branch_offset(int opcode)
+{
+ return (((signed short)((opcode & ((1 << 10) - 1)) << 6)) >> 6) << 1;
+}
+
+void moxie_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ MoxieCPU *cpu = MOXIE_CPU(cs);
+ CPUMoxieState *env = &cpu->env;
+ int i;
+ cpu_fprintf(f, "pc=0x%08x\n", env->pc);
+ cpu_fprintf(f, "$fp=0x%08x $sp=0x%08x $r0=0x%08x $r1=0x%08x\n",
+ env->gregs[0], env->gregs[1], env->gregs[2], env->gregs[3]);
+ for (i = 4; i < 16; i += 4) {
+ cpu_fprintf(f, "$r%d=0x%08x $r%d=0x%08x $r%d=0x%08x $r%d=0x%08x\n",
+ i-2, env->gregs[i], i-1, env->gregs[i + 1],
+ i, env->gregs[i + 2], i+1, env->gregs[i + 3]);
+ }
+ for (i = 4; i < 16; i += 4) {
+ cpu_fprintf(f, "sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x sr%d=0x%08x\n",
+ i-2, env->sregs[i], i-1, env->sregs[i + 1],
+ i, env->sregs[i + 2], i+1, env->sregs[i + 3]);
+ }
+}
+
+void moxie_translate_init(void)
+{
+ int i;
+ static int done_init;
+ static const char * const gregnames[16] = {
+ "$fp", "$sp", "$r0", "$r1",
+ "$r2", "$r3", "$r4", "$r5",
+ "$r6", "$r7", "$r8", "$r9",
+ "$r10", "$r11", "$r12", "$r13"
+ };
+
+ if (done_init) {
+ return;
+ }
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMoxieState, pc), "$pc");
+ for (i = 0; i < 16; i++)
+ cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMoxieState, gregs[i]),
+ gregnames[i]);
+
+ cc_a = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMoxieState, cc_a), "cc_a");
+ cc_b = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUMoxieState, cc_b), "cc_b");
+
+ done_init = 1;
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(CPUMoxieState *env, DisasContext *ctx,
+ int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ if (ctx->singlestep_enabled) {
+ gen_helper_debug(cpu_env);
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static int decode_opc(MoxieCPU *cpu, DisasContext *ctx)
+{
+ CPUMoxieState *env = &cpu->env;
+
+ /* Local cache for the instruction opcode. */
+ int opcode;
+ /* Set the default instruction length. */
+ int length = 2;
+
+ /* Examine the 16-bit opcode. */
+ opcode = ctx->opcode;
+
+ /* Decode instruction. */
+ if (opcode & (1 << 15)) {
+ if (opcode & (1 << 14)) {
+ /* This is a Form 3 instruction. */
+ int inst = (opcode >> 10 & 0xf);
+
+#define BRANCH(cond) \
+ do { \
+ TCGLabel *l1 = gen_new_label(); \
+ tcg_gen_brcond_i32(cond, cc_a, cc_b, l1); \
+ gen_goto_tb(env, ctx, 1, ctx->pc+2); \
+ gen_set_label(l1); \
+ gen_goto_tb(env, ctx, 0, extract_branch_offset(opcode) + ctx->pc+2); \
+ ctx->bstate = BS_BRANCH; \
+ } while (0)
+
+ switch (inst) {
+ case 0x00: /* beq */
+ BRANCH(TCG_COND_EQ);
+ break;
+ case 0x01: /* bne */
+ BRANCH(TCG_COND_NE);
+ break;
+ case 0x02: /* blt */
+ BRANCH(TCG_COND_LT);
+ break;
+ case 0x03: /* bgt */
+ BRANCH(TCG_COND_GT);
+ break;
+ case 0x04: /* bltu */
+ BRANCH(TCG_COND_LTU);
+ break;
+ case 0x05: /* bgtu */
+ BRANCH(TCG_COND_GTU);
+ break;
+ case 0x06: /* bge */
+ BRANCH(TCG_COND_GE);
+ break;
+ case 0x07: /* ble */
+ BRANCH(TCG_COND_LE);
+ break;
+ case 0x08: /* bgeu */
+ BRANCH(TCG_COND_GEU);
+ break;
+ case 0x09: /* bleu */
+ BRANCH(TCG_COND_LEU);
+ break;
+ default:
+ {
+ TCGv temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ tcg_gen_movi_i32(temp, MOXIE_EX_BAD);
+ gen_helper_raise_exception(cpu_env, temp);
+ tcg_temp_free_i32(temp);
+ }
+ break;
+ }
+ } else {
+ /* This is a Form 2 instruction. */
+ int inst = (opcode >> 12 & 0x3);
+ switch (inst) {
+ case 0x00: /* inc */
+ {
+ int a = (opcode >> 8) & 0xf;
+ unsigned int v = (opcode & 0xff);
+ tcg_gen_addi_i32(REG(a), REG(a), v);
+ }
+ break;
+ case 0x01: /* dec */
+ {
+ int a = (opcode >> 8) & 0xf;
+ unsigned int v = (opcode & 0xff);
+ tcg_gen_subi_i32(REG(a), REG(a), v);
+ }
+ break;
+ case 0x02: /* gsr */
+ {
+ int a = (opcode >> 8) & 0xf;
+ unsigned v = (opcode & 0xff);
+ tcg_gen_ld_i32(REG(a), cpu_env,
+ offsetof(CPUMoxieState, sregs[v]));
+ }
+ break;
+ case 0x03: /* ssr */
+ {
+ int a = (opcode >> 8) & 0xf;
+ unsigned v = (opcode & 0xff);
+ tcg_gen_st_i32(REG(a), cpu_env,
+ offsetof(CPUMoxieState, sregs[v]));
+ }
+ break;
+ default:
+ {
+ TCGv temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ tcg_gen_movi_i32(temp, MOXIE_EX_BAD);
+ gen_helper_raise_exception(cpu_env, temp);
+ tcg_temp_free_i32(temp);
+ }
+ break;
+ }
+ }
+ } else {
+ /* This is a Form 1 instruction. */
+ int inst = opcode >> 8;
+ switch (inst) {
+ case 0x00: /* nop */
+ break;
+ case 0x01: /* ldi.l (immediate) */
+ {
+ int reg = (opcode >> 4) & 0xf;
+ int val = cpu_ldl_code(env, ctx->pc+2);
+ tcg_gen_movi_i32(REG(reg), val);
+ length = 6;
+ }
+ break;
+ case 0x02: /* mov (register-to-register) */
+ {
+ int dest = (opcode >> 4) & 0xf;
+ int src = opcode & 0xf;
+ tcg_gen_mov_i32(REG(dest), REG(src));
+ }
+ break;
+ case 0x03: /* jsra */
+ {
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(t1, ctx->pc + 6);
+
+ /* Make space for the static chain and return address. */
+ tcg_gen_subi_i32(t2, REG(1), 8);
+ tcg_gen_mov_i32(REG(1), t2);
+ tcg_gen_qemu_st32(t1, REG(1), ctx->memidx);
+
+ /* Push the current frame pointer. */
+ tcg_gen_subi_i32(t2, REG(1), 4);
+ tcg_gen_mov_i32(REG(1), t2);
+ tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx);
+
+ /* Set the pc and $fp. */
+ tcg_gen_mov_i32(REG(0), REG(1));
+
+ gen_goto_tb(env, ctx, 0, cpu_ldl_code(env, ctx->pc+2));
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ ctx->bstate = BS_BRANCH;
+ length = 6;
+ }
+ break;
+ case 0x04: /* ret */
+ {
+ TCGv t1 = tcg_temp_new_i32();
+
+ /* The new $sp is the old $fp. */
+ tcg_gen_mov_i32(REG(1), REG(0));
+
+ /* Pop the frame pointer. */
+ tcg_gen_qemu_ld32u(REG(0), REG(1), ctx->memidx);
+ tcg_gen_addi_i32(t1, REG(1), 4);
+ tcg_gen_mov_i32(REG(1), t1);
+
+
+ /* Pop the return address and skip over the static chain
+ slot. */
+ tcg_gen_qemu_ld32u(cpu_pc, REG(1), ctx->memidx);
+ tcg_gen_addi_i32(t1, REG(1), 8);
+ tcg_gen_mov_i32(REG(1), t1);
+
+ tcg_temp_free_i32(t1);
+
+ /* Jump... */
+ tcg_gen_exit_tb(0);
+
+ ctx->bstate = BS_BRANCH;
+ }
+ break;
+ case 0x05: /* add.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_add_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x06: /* push */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ tcg_gen_subi_i32(t1, REG(a), 4);
+ tcg_gen_mov_i32(REG(a), t1);
+ tcg_gen_qemu_st32(REG(b), REG(a), ctx->memidx);
+ tcg_temp_free_i32(t1);
+ }
+ break;
+ case 0x07: /* pop */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+ TCGv t1 = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld32u(REG(b), REG(a), ctx->memidx);
+ tcg_gen_addi_i32(t1, REG(a), 4);
+ tcg_gen_mov_i32(REG(a), t1);
+ tcg_temp_free_i32(t1);
+ }
+ break;
+ case 0x08: /* lda.l */
+ {
+ int reg = (opcode >> 4) & 0xf;
+
+ TCGv ptr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_ld32u(REG(reg), ptr, ctx->memidx);
+ tcg_temp_free_i32(ptr);
+
+ length = 6;
+ }
+ break;
+ case 0x09: /* sta.l */
+ {
+ int val = (opcode >> 4) & 0xf;
+
+ TCGv ptr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_st32(REG(val), ptr, ctx->memidx);
+ tcg_temp_free_i32(ptr);
+
+ length = 6;
+ }
+ break;
+ case 0x0a: /* ld.l (register indirect) */
+ {
+ int src = opcode & 0xf;
+ int dest = (opcode >> 4) & 0xf;
+
+ tcg_gen_qemu_ld32u(REG(dest), REG(src), ctx->memidx);
+ }
+ break;
+ case 0x0b: /* st.l */
+ {
+ int dest = (opcode >> 4) & 0xf;
+ int val = opcode & 0xf;
+
+ tcg_gen_qemu_st32(REG(val), REG(dest), ctx->memidx);
+ }
+ break;
+ case 0x0c: /* ldo.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_ld32u(t2, t1, ctx->memidx);
+ tcg_gen_mov_i32(REG(a), t2);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ length = 6;
+ }
+ break;
+ case 0x0d: /* sto.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_st32(REG(b), t1, ctx->memidx);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ length = 6;
+ }
+ break;
+ case 0x0e: /* cmp */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_mov_i32(cc_a, REG(a));
+ tcg_gen_mov_i32(cc_b, REG(b));
+ }
+ break;
+ case 0x19: /* jsr */
+ {
+ int fnreg = (opcode >> 4) & 0xf;
+
+ /* Load the stack pointer into T0. */
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(t1, ctx->pc+2);
+
+ /* Make space for the static chain and return address. */
+ tcg_gen_subi_i32(t2, REG(1), 8);
+ tcg_gen_mov_i32(REG(1), t2);
+ tcg_gen_qemu_st32(t1, REG(1), ctx->memidx);
+
+ /* Push the current frame pointer. */
+ tcg_gen_subi_i32(t2, REG(1), 4);
+ tcg_gen_mov_i32(REG(1), t2);
+ tcg_gen_qemu_st32(REG(0), REG(1), ctx->memidx);
+
+ /* Set the pc and $fp. */
+ tcg_gen_mov_i32(REG(0), REG(1));
+ tcg_gen_mov_i32(cpu_pc, REG(fnreg));
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+ }
+ break;
+ case 0x1a: /* jmpa */
+ {
+ tcg_gen_movi_i32(cpu_pc, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+ length = 6;
+ }
+ break;
+ case 0x1b: /* ldi.b (immediate) */
+ {
+ int reg = (opcode >> 4) & 0xf;
+ int val = cpu_ldl_code(env, ctx->pc+2);
+ tcg_gen_movi_i32(REG(reg), val);
+ length = 6;
+ }
+ break;
+ case 0x1c: /* ld.b (register indirect) */
+ {
+ int src = opcode & 0xf;
+ int dest = (opcode >> 4) & 0xf;
+
+ tcg_gen_qemu_ld8u(REG(dest), REG(src), ctx->memidx);
+ }
+ break;
+ case 0x1d: /* lda.b */
+ {
+ int reg = (opcode >> 4) & 0xf;
+
+ TCGv ptr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_ld8u(REG(reg), ptr, ctx->memidx);
+ tcg_temp_free_i32(ptr);
+
+ length = 6;
+ }
+ break;
+ case 0x1e: /* st.b */
+ {
+ int dest = (opcode >> 4) & 0xf;
+ int val = opcode & 0xf;
+
+ tcg_gen_qemu_st8(REG(val), REG(dest), ctx->memidx);
+ }
+ break;
+ case 0x1f: /* sta.b */
+ {
+ int val = (opcode >> 4) & 0xf;
+
+ TCGv ptr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_st8(REG(val), ptr, ctx->memidx);
+ tcg_temp_free_i32(ptr);
+
+ length = 6;
+ }
+ break;
+ case 0x20: /* ldi.s (immediate) */
+ {
+ int reg = (opcode >> 4) & 0xf;
+ int val = cpu_ldl_code(env, ctx->pc+2);
+ tcg_gen_movi_i32(REG(reg), val);
+ length = 6;
+ }
+ break;
+ case 0x21: /* ld.s (register indirect) */
+ {
+ int src = opcode & 0xf;
+ int dest = (opcode >> 4) & 0xf;
+
+ tcg_gen_qemu_ld16u(REG(dest), REG(src), ctx->memidx);
+ }
+ break;
+ case 0x22: /* lda.s */
+ {
+ int reg = (opcode >> 4) & 0xf;
+
+ TCGv ptr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_ld16u(REG(reg), ptr, ctx->memidx);
+ tcg_temp_free_i32(ptr);
+
+ length = 6;
+ }
+ break;
+ case 0x23: /* st.s */
+ {
+ int dest = (opcode >> 4) & 0xf;
+ int val = opcode & 0xf;
+
+ tcg_gen_qemu_st16(REG(val), REG(dest), ctx->memidx);
+ }
+ break;
+ case 0x24: /* sta.s */
+ {
+ int val = (opcode >> 4) & 0xf;
+
+ TCGv ptr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(ptr, cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_st16(REG(val), ptr, ctx->memidx);
+ tcg_temp_free_i32(ptr);
+
+ length = 6;
+ }
+ break;
+ case 0x25: /* jmp */
+ {
+ int reg = (opcode >> 4) & 0xf;
+ tcg_gen_mov_i32(cpu_pc, REG(reg));
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+ }
+ break;
+ case 0x26: /* and */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_and_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x27: /* lshr */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv sv = tcg_temp_new_i32();
+ tcg_gen_andi_i32(sv, REG(b), 0x1f);
+ tcg_gen_shr_i32(REG(a), REG(a), sv);
+ tcg_temp_free_i32(sv);
+ }
+ break;
+ case 0x28: /* ashl */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv sv = tcg_temp_new_i32();
+ tcg_gen_andi_i32(sv, REG(b), 0x1f);
+ tcg_gen_shl_i32(REG(a), REG(a), sv);
+ tcg_temp_free_i32(sv);
+ }
+ break;
+ case 0x29: /* sub.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_sub_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x2a: /* neg */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_neg_i32(REG(a), REG(b));
+ }
+ break;
+ case 0x2b: /* or */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_or_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x2c: /* not */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_not_i32(REG(a), REG(b));
+ }
+ break;
+ case 0x2d: /* ashr */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv sv = tcg_temp_new_i32();
+ tcg_gen_andi_i32(sv, REG(b), 0x1f);
+ tcg_gen_sar_i32(REG(a), REG(a), sv);
+ tcg_temp_free_i32(sv);
+ }
+ break;
+ case 0x2e: /* xor */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_xor_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x2f: /* mul.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ tcg_gen_mul_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x30: /* swi */
+ {
+ int val = cpu_ldl_code(env, ctx->pc+2);
+
+ TCGv temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(temp, val);
+ tcg_gen_st_i32(temp, cpu_env,
+ offsetof(CPUMoxieState, sregs[3]));
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ tcg_gen_movi_i32(temp, MOXIE_EX_SWI);
+ gen_helper_raise_exception(cpu_env, temp);
+ tcg_temp_free_i32(temp);
+
+ length = 6;
+ }
+ break;
+ case 0x31: /* div.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ gen_helper_div(REG(a), cpu_env, REG(a), REG(b));
+ }
+ break;
+ case 0x32: /* udiv.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ gen_helper_udiv(REG(a), cpu_env, REG(a), REG(b));
+ }
+ break;
+ case 0x33: /* mod.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+ tcg_gen_rem_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x34: /* umod.l */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+ tcg_gen_remu_i32(REG(a), REG(a), REG(b));
+ }
+ break;
+ case 0x35: /* brk */
+ {
+ TCGv temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ tcg_gen_movi_i32(temp, MOXIE_EX_BREAK);
+ gen_helper_raise_exception(cpu_env, temp);
+ tcg_temp_free_i32(temp);
+ }
+ break;
+ case 0x36: /* ldo.b */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_ld8u(t2, t1, ctx->memidx);
+ tcg_gen_mov_i32(REG(a), t2);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ length = 6;
+ }
+ break;
+ case 0x37: /* sto.b */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_st8(REG(b), t1, ctx->memidx);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ length = 6;
+ }
+ break;
+ case 0x38: /* ldo.s */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ tcg_gen_addi_i32(t1, REG(b), cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_ld16u(t2, t1, ctx->memidx);
+ tcg_gen_mov_i32(REG(a), t2);
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ length = 6;
+ }
+ break;
+ case 0x39: /* sto.s */
+ {
+ int a = (opcode >> 4) & 0xf;
+ int b = opcode & 0xf;
+
+ TCGv t1 = tcg_temp_new_i32();
+ TCGv t2 = tcg_temp_new_i32();
+ tcg_gen_addi_i32(t1, REG(a), cpu_ldl_code(env, ctx->pc+2));
+ tcg_gen_qemu_st16(REG(b), t1, ctx->memidx);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+
+ length = 6;
+ }
+ break;
+ default:
+ {
+ TCGv temp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ tcg_gen_movi_i32(temp, MOXIE_EX_BAD);
+ gen_helper_raise_exception(cpu_env, temp);
+ tcg_temp_free_i32(temp);
+ }
+ break;
+ }
+ }
+
+ return length;
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUMoxieState *env, struct TranslationBlock *tb)
+{
+ MoxieCPU *cpu = moxie_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx;
+ target_ulong pc_start;
+ int num_insns, max_insns;
+
+ pc_start = tb->pc;
+ ctx.pc = pc_start;
+ ctx.saved_pc = -1;
+ ctx.tb = tb;
+ ctx.memidx = 0;
+ ctx.singlestep_enabled = 0;
+ ctx.bstate = BS_NONE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ do {
+ tcg_gen_insn_start(ctx.pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+ tcg_gen_movi_i32(cpu_pc, ctx.pc);
+ gen_helper_debug(cpu_env);
+ ctx.bstate = BS_EXCP;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.pc += 2;
+ goto done_generating;
+ }
+
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
+ ctx.pc += decode_opc(cpu, &ctx);
+
+ if (num_insns >= max_insns) {
+ break;
+ }
+ if (cs->singlestep_enabled) {
+ break;
+ }
+ if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) {
+ break;
+ }
+ } while (ctx.bstate == BS_NONE && !tcg_op_buf_full());
+
+ if (cs->singlestep_enabled) {
+ tcg_gen_movi_tl(cpu_pc, ctx.pc);
+ gen_helper_debug(cpu_env);
+ } else {
+ switch (ctx.bstate) {
+ case BS_STOP:
+ case BS_NONE:
+ gen_goto_tb(env, &ctx, 0, ctx.pc);
+ break;
+ case BS_EXCP:
+ tcg_gen_exit_tb(0);
+ break;
+ case BS_BRANCH:
+ default:
+ break;
+ }
+ }
+ done_generating:
+ gen_tb_end(tb, num_insns);
+
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
+}
+
+void restore_state_to_opc(CPUMoxieState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/openrisc/Makefile.objs b/target/openrisc/Makefile.objs
new file mode 100644
index 0000000000..397d01650e
--- /dev/null
+++ b/target/openrisc/Makefile.objs
@@ -0,0 +1,5 @@
+obj-$(CONFIG_SOFTMMU) += machine.o
+obj-y += cpu.o exception.o interrupt.o mmu.o translate.o
+obj-y += exception_helper.o fpu_helper.o int_helper.o \
+ interrupt_helper.o mmu_helper.o sys_helper.o
+obj-y += gdbstub.o
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
new file mode 100644
index 0000000000..698e87bb25
--- /dev/null
+++ b/target/openrisc/cpu.c
@@ -0,0 +1,278 @@
+/*
+ * QEMU OpenRISC CPU
+ *
+ * Copyright (c) 2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "exec/exec-all.h"
+
+static void openrisc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool openrisc_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_TIMER);
+}
+
+/* CPUClass::reset() */
+static void openrisc_cpu_reset(CPUState *s)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(s);
+ OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(cpu);
+
+ occ->parent_reset(s);
+
+#ifndef CONFIG_USER_ONLY
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, tlb));
+#else
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, irq));
+#endif
+
+ tlb_flush(s, 1);
+ /*tb_flush(&cpu->env); FIXME: Do we need it? */
+
+ cpu->env.pc = 0x100;
+ cpu->env.sr = SR_FO | SR_SM;
+ s->exception_index = -1;
+
+ cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP;
+ cpu->env.cpucfgr = CPUCFGR_OB32S | CPUCFGR_OF32S;
+ cpu->env.dmmucfgr = (DMMUCFGR_NTW & (0 << 2)) | (DMMUCFGR_NTS & (6 << 2));
+ cpu->env.immucfgr = (IMMUCFGR_NTW & (0 << 2)) | (IMMUCFGR_NTS & (6 << 2));
+
+#ifndef CONFIG_USER_ONLY
+ cpu->env.picmr = 0x00000000;
+ cpu->env.picsr = 0x00000000;
+
+ cpu->env.ttmr = 0x00000000;
+ cpu->env.ttcr = 0x00000000;
+#endif
+}
+
+static inline void set_feature(OpenRISCCPU *cpu, int feature)
+{
+ cpu->feature |= feature;
+ cpu->env.cpucfgr = cpu->feature;
+}
+
+static void openrisc_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+ cpu_reset(cs);
+
+ occ->parent_realize(dev, errp);
+}
+
+static void openrisc_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+ static int inited;
+
+ cs->env_ptr = &cpu->env;
+
+#ifndef CONFIG_USER_ONLY
+ cpu_openrisc_mmu_init(cpu);
+#endif
+
+ if (tcg_enabled() && !inited) {
+ inited = 1;
+ openrisc_translate_init();
+ }
+}
+
+/* CPU models */
+
+static ObjectClass *openrisc_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_OPENRISC_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_OPENRISC_CPU) ||
+ object_class_is_abstract(oc))) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void or1200_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ set_feature(cpu, OPENRISC_FEATURE_OB32S);
+ set_feature(cpu, OPENRISC_FEATURE_OF32S);
+}
+
+static void openrisc_any_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ set_feature(cpu, OPENRISC_FEATURE_OB32S);
+}
+
+typedef struct OpenRISCCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+} OpenRISCCPUInfo;
+
+static const OpenRISCCPUInfo openrisc_cpus[] = {
+ { .name = "or1200", .initfn = or1200_initfn },
+ { .name = "any", .initfn = openrisc_any_initfn },
+};
+
+static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(occ);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ occ->parent_realize = dc->realize;
+ dc->realize = openrisc_cpu_realizefn;
+
+ occ->parent_reset = cc->reset;
+ cc->reset = openrisc_cpu_reset;
+
+ cc->class_by_name = openrisc_cpu_class_by_name;
+ cc->has_work = openrisc_cpu_has_work;
+ cc->do_interrupt = openrisc_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = openrisc_cpu_exec_interrupt;
+ cc->dump_state = openrisc_cpu_dump_state;
+ cc->set_pc = openrisc_cpu_set_pc;
+ cc->gdb_read_register = openrisc_cpu_gdb_read_register;
+ cc->gdb_write_register = openrisc_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = openrisc_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = openrisc_cpu_get_phys_page_debug;
+ dc->vmsd = &vmstate_openrisc_cpu;
+#endif
+ cc->gdb_num_core_regs = 32 + 3;
+}
+
+static void cpu_register(const OpenRISCCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_OPENRISC_CPU,
+ .instance_size = sizeof(OpenRISCCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(OpenRISCCPUClass),
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_OPENRISC_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo openrisc_cpu_type_info = {
+ .name = TYPE_OPENRISC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(OpenRISCCPU),
+ .instance_init = openrisc_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(OpenRISCCPUClass),
+ .class_init = openrisc_cpu_class_init,
+};
+
+static void openrisc_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&openrisc_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(openrisc_cpus); i++) {
+ cpu_register(&openrisc_cpus[i]);
+ }
+}
+
+OpenRISCCPU *cpu_openrisc_init(const char *cpu_model)
+{
+ return OPENRISC_CPU(cpu_generic_init(TYPE_OPENRISC_CPU, cpu_model));
+}
+
+/* Sort alphabetically by type name, except for "any". */
+static gint openrisc_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any-" TYPE_OPENRISC_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any-" TYPE_OPENRISC_CPU) == 0) {
+ return -1;
+ } else {
+ return strcmp(name_a, name_b);
+ }
+}
+
+static void openrisc_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(oc);
+ name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_OPENRISC_CPU));
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ name);
+ g_free(name);
+}
+
+void cpu_openrisc_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_OPENRISC_CPU, false);
+ list = g_slist_sort(list, openrisc_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, openrisc_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+type_init(openrisc_cpu_register_types)
diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h
new file mode 100644
index 0000000000..aaf153579a
--- /dev/null
+++ b/target/openrisc/cpu.h
@@ -0,0 +1,411 @@
+/*
+ * OpenRISC virtual CPU header.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef OPENRISC_CPU_H
+#define OPENRISC_CPU_H
+
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUOpenRISCState
+
+/* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */
+struct OpenRISCCPU;
+
+#include "qemu-common.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+#include "qom/cpu.h"
+
+#define TYPE_OPENRISC_CPU "or32-cpu"
+
+#define OPENRISC_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(OpenRISCCPUClass, (klass), TYPE_OPENRISC_CPU)
+#define OPENRISC_CPU(obj) \
+ OBJECT_CHECK(OpenRISCCPU, (obj), TYPE_OPENRISC_CPU)
+#define OPENRISC_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(OpenRISCCPUClass, (obj), TYPE_OPENRISC_CPU)
+
+/**
+ * OpenRISCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A OpenRISC CPU model.
+ */
+typedef struct OpenRISCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} OpenRISCCPUClass;
+
+#define NB_MMU_MODES 3
+
+enum {
+ MMU_NOMMU_IDX = 0,
+ MMU_SUPERVISOR_IDX = 1,
+ MMU_USER_IDX = 2,
+};
+
+#define TARGET_PAGE_BITS 13
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define SET_FP_CAUSE(reg, v) do {\
+ (reg) = ((reg) & ~(0x3f << 12)) | \
+ ((v & 0x3f) << 12);\
+ } while (0)
+#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
+#define UPDATE_FP_FLAGS(reg, v) do {\
+ (reg) |= ((v & 0x1f) << 2);\
+ } while (0)
+
+/* Version Register */
+#define SPR_VR 0xFFFF003F
+
+/* Internal flags, delay slot flag */
+#define D_FLAG 1
+
+/* Interrupt */
+#define NR_IRQS 32
+
+/* Unit presece register */
+enum {
+ UPR_UP = (1 << 0),
+ UPR_DCP = (1 << 1),
+ UPR_ICP = (1 << 2),
+ UPR_DMP = (1 << 3),
+ UPR_IMP = (1 << 4),
+ UPR_MP = (1 << 5),
+ UPR_DUP = (1 << 6),
+ UPR_PCUR = (1 << 7),
+ UPR_PMP = (1 << 8),
+ UPR_PICP = (1 << 9),
+ UPR_TTP = (1 << 10),
+ UPR_CUP = (255 << 24),
+};
+
+/* CPU configure register */
+enum {
+ CPUCFGR_NSGF = (15 << 0),
+ CPUCFGR_CGF = (1 << 4),
+ CPUCFGR_OB32S = (1 << 5),
+ CPUCFGR_OB64S = (1 << 6),
+ CPUCFGR_OF32S = (1 << 7),
+ CPUCFGR_OF64S = (1 << 8),
+ CPUCFGR_OV64S = (1 << 9),
+};
+
+/* DMMU configure register */
+enum {
+ DMMUCFGR_NTW = (3 << 0),
+ DMMUCFGR_NTS = (7 << 2),
+ DMMUCFGR_NAE = (7 << 5),
+ DMMUCFGR_CRI = (1 << 8),
+ DMMUCFGR_PRI = (1 << 9),
+ DMMUCFGR_TEIRI = (1 << 10),
+ DMMUCFGR_HTR = (1 << 11),
+};
+
+/* IMMU configure register */
+enum {
+ IMMUCFGR_NTW = (3 << 0),
+ IMMUCFGR_NTS = (7 << 2),
+ IMMUCFGR_NAE = (7 << 5),
+ IMMUCFGR_CRI = (1 << 8),
+ IMMUCFGR_PRI = (1 << 9),
+ IMMUCFGR_TEIRI = (1 << 10),
+ IMMUCFGR_HTR = (1 << 11),
+};
+
+/* Float point control status register */
+enum {
+ FPCSR_FPEE = 1,
+ FPCSR_RM = (3 << 1),
+ FPCSR_OVF = (1 << 3),
+ FPCSR_UNF = (1 << 4),
+ FPCSR_SNF = (1 << 5),
+ FPCSR_QNF = (1 << 6),
+ FPCSR_ZF = (1 << 7),
+ FPCSR_IXF = (1 << 8),
+ FPCSR_IVF = (1 << 9),
+ FPCSR_INF = (1 << 10),
+ FPCSR_DZF = (1 << 11),
+};
+
+/* Exceptions indices */
+enum {
+ EXCP_RESET = 0x1,
+ EXCP_BUSERR = 0x2,
+ EXCP_DPF = 0x3,
+ EXCP_IPF = 0x4,
+ EXCP_TICK = 0x5,
+ EXCP_ALIGN = 0x6,
+ EXCP_ILLEGAL = 0x7,
+ EXCP_INT = 0x8,
+ EXCP_DTLBMISS = 0x9,
+ EXCP_ITLBMISS = 0xa,
+ EXCP_RANGE = 0xb,
+ EXCP_SYSCALL = 0xc,
+ EXCP_FPE = 0xd,
+ EXCP_TRAP = 0xe,
+ EXCP_NR,
+};
+
+/* Supervisor register */
+enum {
+ SR_SM = (1 << 0),
+ SR_TEE = (1 << 1),
+ SR_IEE = (1 << 2),
+ SR_DCE = (1 << 3),
+ SR_ICE = (1 << 4),
+ SR_DME = (1 << 5),
+ SR_IME = (1 << 6),
+ SR_LEE = (1 << 7),
+ SR_CE = (1 << 8),
+ SR_F = (1 << 9),
+ SR_CY = (1 << 10),
+ SR_OV = (1 << 11),
+ SR_OVE = (1 << 12),
+ SR_DSX = (1 << 13),
+ SR_EPH = (1 << 14),
+ SR_FO = (1 << 15),
+ SR_SUMRA = (1 << 16),
+ SR_SCE = (1 << 17),
+};
+
+/* OpenRISC Hardware Capabilities */
+enum {
+ OPENRISC_FEATURE_NSGF = (15 << 0),
+ OPENRISC_FEATURE_CGF = (1 << 4),
+ OPENRISC_FEATURE_OB32S = (1 << 5),
+ OPENRISC_FEATURE_OB64S = (1 << 6),
+ OPENRISC_FEATURE_OF32S = (1 << 7),
+ OPENRISC_FEATURE_OF64S = (1 << 8),
+ OPENRISC_FEATURE_OV64S = (1 << 9),
+};
+
+/* Tick Timer Mode Register */
+enum {
+ TTMR_TP = (0xfffffff),
+ TTMR_IP = (1 << 28),
+ TTMR_IE = (1 << 29),
+ TTMR_M = (3 << 30),
+};
+
+/* Timer Mode */
+enum {
+ TIMER_NONE = (0 << 30),
+ TIMER_INTR = (1 << 30),
+ TIMER_SHOT = (2 << 30),
+ TIMER_CONT = (3 << 30),
+};
+
+/* TLB size */
+enum {
+ DTLB_WAYS = 1,
+ DTLB_SIZE = 64,
+ DTLB_MASK = (DTLB_SIZE-1),
+ ITLB_WAYS = 1,
+ ITLB_SIZE = 64,
+ ITLB_MASK = (ITLB_SIZE-1),
+};
+
+/* TLB prot */
+enum {
+ URE = (1 << 6),
+ UWE = (1 << 7),
+ SRE = (1 << 8),
+ SWE = (1 << 9),
+
+ SXE = (1 << 6),
+ UXE = (1 << 7),
+};
+
+/* check if tlb available */
+enum {
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+typedef struct OpenRISCTLBEntry {
+ uint32_t mr;
+ uint32_t tr;
+} OpenRISCTLBEntry;
+
+#ifndef CONFIG_USER_ONLY
+typedef struct CPUOpenRISCTLBContext {
+ OpenRISCTLBEntry itlb[ITLB_WAYS][ITLB_SIZE];
+ OpenRISCTLBEntry dtlb[DTLB_WAYS][DTLB_SIZE];
+
+ int (*cpu_openrisc_map_address_code)(struct OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot,
+ target_ulong address, int rw);
+ int (*cpu_openrisc_map_address_data)(struct OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot,
+ target_ulong address, int rw);
+} CPUOpenRISCTLBContext;
+#endif
+
+typedef struct CPUOpenRISCState {
+ target_ulong gpr[32]; /* General registers */
+ target_ulong pc; /* Program counter */
+ target_ulong npc; /* Next PC */
+ target_ulong ppc; /* Prev PC */
+ target_ulong jmp_pc; /* Jump PC */
+
+ target_ulong machi; /* Multiply register MACHI */
+ target_ulong maclo; /* Multiply register MACLO */
+
+ target_ulong fpmaddhi; /* Multiply and add float register FPMADDHI */
+ target_ulong fpmaddlo; /* Multiply and add float register FPMADDLO */
+
+ target_ulong epcr; /* Exception PC register */
+ target_ulong eear; /* Exception EA register */
+
+ uint32_t sr; /* Supervisor register */
+ uint32_t vr; /* Version register */
+ uint32_t upr; /* Unit presence register */
+ uint32_t cpucfgr; /* CPU configure register */
+ uint32_t dmmucfgr; /* DMMU configure register */
+ uint32_t immucfgr; /* IMMU configure register */
+ uint32_t esr; /* Exception supervisor register */
+ uint32_t fpcsr; /* Float register */
+ float_status fp_status;
+
+ uint32_t flags; /* cpu_flags, we only use it for exception
+ in solt so far. */
+ uint32_t btaken; /* the SR_F bit */
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved across CPU reset. */
+#ifndef CONFIG_USER_ONLY
+ CPUOpenRISCTLBContext * tlb;
+
+ QEMUTimer *timer;
+ uint32_t ttmr; /* Timer tick mode register */
+ uint32_t ttcr; /* Timer tick count register */
+
+ uint32_t picmr; /* Interrupt mask register */
+ uint32_t picsr; /* Interrupt contrl register*/
+#endif
+ void *irq[32]; /* Interrupt irq input */
+} CPUOpenRISCState;
+
+/**
+ * OpenRISCCPU:
+ * @env: #CPUOpenRISCState
+ *
+ * A OpenRISC CPU.
+ */
+typedef struct OpenRISCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUOpenRISCState env;
+
+ uint32_t feature; /* CPU Capabilities */
+} OpenRISCCPU;
+
+static inline OpenRISCCPU *openrisc_env_get_cpu(CPUOpenRISCState *env)
+{
+ return container_of(env, OpenRISCCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(openrisc_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(OpenRISCCPU, env)
+
+OpenRISCCPU *cpu_openrisc_init(const char *cpu_model);
+
+void cpu_openrisc_list(FILE *f, fprintf_function cpu_fprintf);
+void openrisc_cpu_do_interrupt(CPUState *cpu);
+bool openrisc_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void openrisc_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int openrisc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void openrisc_translate_init(void);
+int openrisc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address,
+ int rw, int mmu_idx);
+int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define cpu_list cpu_openrisc_list
+#define cpu_signal_handler cpu_openrisc_signal_handler
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_openrisc_cpu;
+
+/* hw/openrisc_pic.c */
+void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
+
+/* hw/openrisc_timer.c */
+void cpu_openrisc_clock_init(OpenRISCCPU *cpu);
+void cpu_openrisc_count_update(OpenRISCCPU *cpu);
+void cpu_openrisc_timer_update(OpenRISCCPU *cpu);
+void cpu_openrisc_count_start(OpenRISCCPU *cpu);
+void cpu_openrisc_count_stop(OpenRISCCPU *cpu);
+
+void cpu_openrisc_mmu_init(OpenRISCCPU *cpu);
+int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address, int rw);
+int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address, int rw);
+int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address, int rw);
+#endif
+
+#define cpu_init(cpu_model) CPU(cpu_openrisc_init(cpu_model))
+
+#include "exec/cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
+ target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ /* D_FLAG -- branch instruction exception */
+ *flags = (env->flags & D_FLAG);
+}
+
+static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
+{
+ if (!(env->sr & SR_IME)) {
+ return MMU_NOMMU_IDX;
+ }
+ return (env->sr & SR_SM) == 0 ? MMU_USER_IDX : MMU_SUPERVISOR_IDX;
+}
+
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0
+
+#endif /* OPENRISC_CPU_H */
diff --git a/target/openrisc/exception.c b/target/openrisc/exception.c
new file mode 100644
index 0000000000..49470be051
--- /dev/null
+++ b/target/openrisc/exception.c
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC exception.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exception.h"
+
+void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp)
+{
+ CPUState *cs = CPU(cpu);
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
diff --git a/target/openrisc/exception.h b/target/openrisc/exception.h
new file mode 100644
index 0000000000..4ec56b4653
--- /dev/null
+++ b/target/openrisc/exception.h
@@ -0,0 +1,28 @@
+/*
+ * OpenRISC exception header.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_OPENRISC_EXCEPTION_H
+#define TARGET_OPENRISC_EXCEPTION_H
+
+#include "cpu.h"
+#include "qemu-common.h"
+
+void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp);
+
+#endif /* TARGET_OPENRISC_EXCEPTION_H */
diff --git a/target/openrisc/exception_helper.c b/target/openrisc/exception_helper.c
new file mode 100644
index 0000000000..329a9e400b
--- /dev/null
+++ b/target/openrisc/exception_helper.c
@@ -0,0 +1,30 @@
+/*
+ * OpenRISC exception helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exception.h"
+
+void HELPER(exception)(CPUOpenRISCState *env, uint32_t excp)
+{
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ raise_exception(cpu, excp);
+}
diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c
new file mode 100644
index 0000000000..c54404b80d
--- /dev/null
+++ b/target/openrisc/fpu_helper.c
@@ -0,0 +1,301 @@
+/*
+ * OpenRISC float helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exception.h"
+
+static inline uint32_t ieee_ex_to_openrisc(OpenRISCCPU *cpu, int fexcp)
+{
+ int ret = 0;
+ if (fexcp) {
+ if (fexcp & float_flag_invalid) {
+ cpu->env.fpcsr |= FPCSR_IVF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_overflow) {
+ cpu->env.fpcsr |= FPCSR_OVF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_underflow) {
+ cpu->env.fpcsr |= FPCSR_UNF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_divbyzero) {
+ cpu->env.fpcsr |= FPCSR_DZF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_inexact) {
+ cpu->env.fpcsr |= FPCSR_IXF;
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+static inline void update_fpcsr(OpenRISCCPU *cpu)
+{
+ int tmp = ieee_ex_to_openrisc(cpu,
+ get_float_exception_flags(&cpu->env.fp_status));
+
+ SET_FP_CAUSE(cpu->env.fpcsr, tmp);
+ if ((GET_FP_ENABLE(cpu->env.fpcsr) & tmp) &&
+ (cpu->env.fpcsr & FPCSR_FPEE)) {
+ helper_exception(&cpu->env, EXCP_FPE);
+ } else {
+ UPDATE_FP_FLAGS(cpu->env.fpcsr, tmp);
+ }
+}
+
+uint64_t HELPER(itofd)(CPUOpenRISCState *env, uint64_t val)
+{
+ uint64_t itofd;
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ itofd = int32_to_float64(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return itofd;
+}
+
+uint32_t HELPER(itofs)(CPUOpenRISCState *env, uint32_t val)
+{
+ uint32_t itofs;
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ itofs = int32_to_float32(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return itofs;
+}
+
+uint64_t HELPER(ftoid)(CPUOpenRISCState *env, uint64_t val)
+{
+ uint64_t ftoid;
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ ftoid = float32_to_int64(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return ftoid;
+}
+
+uint32_t HELPER(ftois)(CPUOpenRISCState *env, uint32_t val)
+{
+ uint32_t ftois;
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ ftois = float32_to_int32(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return ftois;
+}
+
+#define FLOAT_OP(name, p) void helper_float_##_##p(void)
+
+#define FLOAT_CALC(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ uint64_t result; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float64_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return result; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ uint32_t result; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float32_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return result; \
+} \
+
+FLOAT_CALC(add)
+FLOAT_CALC(sub)
+FLOAT_CALC(mul)
+FLOAT_CALC(div)
+FLOAT_CALC(rem)
+#undef FLOAT_CALC
+
+#define FLOAT_TERNOP(name1, name2) \
+uint64_t helper_float_ ## name1 ## name2 ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1) \
+{ \
+ uint64_t result, temp, hi, lo; \
+ uint32_t val1, val2; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ hi = env->fpmaddhi; \
+ lo = env->fpmaddlo; \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float64_ ## name1(fdt0, fdt1, &cpu->env.fp_status); \
+ lo &= 0xffffffff; \
+ hi &= 0xffffffff; \
+ temp = (hi << 32) | lo; \
+ result = float64_ ## name2(result, temp, &cpu->env.fp_status); \
+ val1 = result >> 32; \
+ val2 = (uint32_t) (result & 0xffffffff); \
+ update_fpcsr(cpu); \
+ cpu->env.fpmaddlo = val2; \
+ cpu->env.fpmaddhi = val1; \
+ return 0; \
+} \
+ \
+uint32_t helper_float_ ## name1 ## name2 ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ uint64_t result, temp, hi, lo; \
+ uint32_t val1, val2; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ hi = cpu->env.fpmaddhi; \
+ lo = cpu->env.fpmaddlo; \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float64_ ## name1(fdt0, fdt1, &cpu->env.fp_status); \
+ temp = (hi << 32) | lo; \
+ result = float64_ ## name2(result, temp, &cpu->env.fp_status); \
+ val1 = result >> 32; \
+ val2 = (uint32_t) (result & 0xffffffff); \
+ update_fpcsr(cpu); \
+ cpu->env.fpmaddlo = val2; \
+ cpu->env.fpmaddhi = val1; \
+ return 0; \
+}
+
+FLOAT_TERNOP(mul, add)
+#undef FLOAT_TERNOP
+
+
+#define FLOAT_CMP(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = float64_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1)\
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = float32_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+
+FLOAT_CMP(le)
+FLOAT_CMP(eq)
+FLOAT_CMP(lt)
+#undef FLOAT_CMP
+
+
+#define FLOAT_CMPNE(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float64_eq_quiet(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float32_eq_quiet(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+
+FLOAT_CMPNE(ne)
+#undef FLOAT_CMPNE
+
+#define FLOAT_CMPGT(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float64_le(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float32_le(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+FLOAT_CMPGT(gt)
+#undef FLOAT_CMPGT
+
+#define FLOAT_CMPGE(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float64_lt(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float32_lt(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+
+FLOAT_CMPGE(ge)
+#undef FLOAT_CMPGE
diff --git a/target/openrisc/gdbstub.c b/target/openrisc/gdbstub.c
new file mode 100644
index 0000000000..cb16e76358
--- /dev/null
+++ b/target/openrisc/gdbstub.c
@@ -0,0 +1,84 @@
+/*
+ * OpenRISC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+int openrisc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+
+ if (n < 32) {
+ return gdb_get_reg32(mem_buf, env->gpr[n]);
+ } else {
+ switch (n) {
+ case 32: /* PPC */
+ return gdb_get_reg32(mem_buf, env->ppc);
+
+ case 33: /* NPC */
+ return gdb_get_reg32(mem_buf, env->npc);
+
+ case 34: /* SR */
+ return gdb_get_reg32(mem_buf, env->sr);
+
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+int openrisc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ uint32_t tmp;
+
+ if (n > cc->gdb_num_core_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 32) {
+ env->gpr[n] = tmp;
+ } else {
+ switch (n) {
+ case 32: /* PPC */
+ env->ppc = tmp;
+ break;
+
+ case 33: /* NPC */
+ env->npc = tmp;
+ break;
+
+ case 34: /* SR */
+ env->sr = tmp;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 4;
+}
diff --git a/target/openrisc/helper.h b/target/openrisc/helper.h
new file mode 100644
index 0000000000..f53fa21344
--- /dev/null
+++ b/target/openrisc/helper.h
@@ -0,0 +1,66 @@
+/*
+ * OpenRISC helper defines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* exception */
+DEF_HELPER_FLAGS_2(exception, 0, void, env, i32)
+
+/* float */
+DEF_HELPER_FLAGS_2(itofd, 0, i64, env, i64)
+DEF_HELPER_FLAGS_2(itofs, 0, i32, env, i32)
+DEF_HELPER_FLAGS_2(ftoid, 0, i64, env, i64)
+DEF_HELPER_FLAGS_2(ftois, 0, i32, env, i32)
+
+#define FOP_MADD(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, 0, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, 0, i64, env, i64, i64)
+FOP_MADD(muladd)
+#undef FOP_MADD
+
+#define FOP_CALC(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, 0, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, 0, i64, env, i64, i64)
+FOP_CALC(add)
+FOP_CALC(sub)
+FOP_CALC(mul)
+FOP_CALC(div)
+FOP_CALC(rem)
+#undef FOP_CALC
+
+#define FOP_CMP(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, 0, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, 0, i64, env, i64, i64)
+FOP_CMP(eq)
+FOP_CMP(lt)
+FOP_CMP(le)
+FOP_CMP(ne)
+FOP_CMP(gt)
+FOP_CMP(ge)
+#undef FOP_CMP
+
+/* int */
+DEF_HELPER_FLAGS_1(ff1, 0, tl, tl)
+DEF_HELPER_FLAGS_1(fl1, 0, tl, tl)
+DEF_HELPER_FLAGS_3(mul32, 0, i32, env, i32, i32)
+
+/* interrupt */
+DEF_HELPER_FLAGS_1(rfe, 0, void, env)
+
+/* sys */
+DEF_HELPER_FLAGS_4(mtspr, 0, void, env, tl, tl, tl)
+DEF_HELPER_FLAGS_4(mfspr, 0, tl, env, tl, tl, tl)
diff --git a/target/openrisc/int_helper.c b/target/openrisc/int_helper.c
new file mode 100644
index 0000000000..4d1f958901
--- /dev/null
+++ b/target/openrisc/int_helper.c
@@ -0,0 +1,80 @@
+/*
+ * OpenRISC int helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exception.h"
+#include "qemu/host-utils.h"
+
+target_ulong HELPER(ff1)(target_ulong x)
+{
+/*#ifdef TARGET_OPENRISC64
+ return x ? ctz64(x) + 1 : 0;
+#else*/
+ return x ? ctz32(x) + 1 : 0;
+/*#endif*/
+}
+
+target_ulong HELPER(fl1)(target_ulong x)
+{
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ return 64 - clz64(x);
+#else*/
+ return 32 - clz32(x);
+/*#endif*/
+}
+
+uint32_t HELPER(mul32)(CPUOpenRISCState *env,
+ uint32_t ra, uint32_t rb)
+{
+ uint64_t result;
+ uint32_t high, cy;
+
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ result = (uint64_t)ra * rb;
+ /* regisiers in or32 is 32bit, so 32 is NOT a magic number.
+ or64 is not handled in this function, and not implement yet,
+ TARGET_LONG_BITS for or64 is 64, it will break this function,
+ so, we didn't use TARGET_LONG_BITS here. */
+ high = result >> 32;
+ cy = result >> (32 - 1);
+
+ if ((cy & 0x1) == 0x0) {
+ if (high == 0x0) {
+ return result;
+ }
+ }
+
+ if ((cy & 0x1) == 0x1) {
+ if (high == 0xffffffff) {
+ return result;
+ }
+ }
+
+ cpu->env.sr |= (SR_OV | SR_CY);
+ if (cpu->env.sr & SR_OVE) {
+ raise_exception(cpu, EXCP_RANGE);
+ }
+
+ return result;
+}
diff --git a/target/openrisc/interrupt.c b/target/openrisc/interrupt.c
new file mode 100644
index 0000000000..5fe3f11ffc
--- /dev/null
+++ b/target/openrisc/interrupt.c
@@ -0,0 +1,87 @@
+/*
+ * OpenRISC interrupt.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/loader.h"
+#endif
+
+void openrisc_cpu_do_interrupt(CPUState *cs)
+{
+#ifndef CONFIG_USER_ONLY
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+
+ env->epcr = env->pc;
+ if (env->flags & D_FLAG) {
+ env->flags &= ~D_FLAG;
+ env->sr |= SR_DSX;
+ env->epcr -= 4;
+ }
+ if (cs->exception_index == EXCP_SYSCALL) {
+ env->epcr += 4;
+ }
+
+ /* For machine-state changed between user-mode and supervisor mode,
+ we need flush TLB when we enter&exit EXCP. */
+ tlb_flush(cs, 1);
+
+ env->esr = env->sr;
+ env->sr &= ~SR_DME;
+ env->sr &= ~SR_IME;
+ env->sr |= SR_SM;
+ env->sr &= ~SR_IEE;
+ env->sr &= ~SR_TEE;
+ env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
+ env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
+
+ if (cs->exception_index > 0 && cs->exception_index < EXCP_NR) {
+ env->pc = (cs->exception_index << 8);
+ } else {
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ }
+#endif
+
+ cs->exception_index = -1;
+}
+
+bool openrisc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ int idx = -1;
+
+ if ((interrupt_request & CPU_INTERRUPT_HARD) && (env->sr & SR_IEE)) {
+ idx = EXCP_INT;
+ }
+ if ((interrupt_request & CPU_INTERRUPT_TIMER) && (env->sr & SR_TEE)) {
+ idx = EXCP_TICK;
+ }
+ if (idx >= 0) {
+ cs->exception_index = idx;
+ openrisc_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
diff --git a/target/openrisc/interrupt_helper.c b/target/openrisc/interrupt_helper.c
new file mode 100644
index 0000000000..116f9109a7
--- /dev/null
+++ b/target/openrisc/interrupt_helper.c
@@ -0,0 +1,60 @@
+/*
+ * OpenRISC interrupt helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+void HELPER(rfe)(CPUOpenRISCState *env)
+{
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+#ifndef CONFIG_USER_ONLY
+ int need_flush_tlb = (cpu->env.sr & (SR_SM | SR_IME | SR_DME)) ^
+ (cpu->env.esr & (SR_SM | SR_IME | SR_DME));
+#endif
+ cpu->env.pc = cpu->env.epcr;
+ cpu->env.npc = cpu->env.epcr;
+ cpu->env.sr = cpu->env.esr;
+
+#ifndef CONFIG_USER_ONLY
+ if (cpu->env.sr & SR_DME) {
+ cpu->env.tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_data;
+ } else {
+ cpu->env.tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_nommu;
+ }
+
+ if (cpu->env.sr & SR_IME) {
+ cpu->env.tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_code;
+ } else {
+ cpu->env.tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_nommu;
+ }
+
+ if (need_flush_tlb) {
+ tlb_flush(cs, 1);
+ }
+#endif
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+}
diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c
new file mode 100644
index 0000000000..17b0c77d6c
--- /dev/null
+++ b/target/openrisc/machine.c
@@ -0,0 +1,54 @@
+/*
+ * OpenRISC Machine
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "migration/cpu.h"
+
+static const VMStateDescription vmstate_env = {
+ .name = "env",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(gpr, CPUOpenRISCState, 32),
+ VMSTATE_UINT32(sr, CPUOpenRISCState),
+ VMSTATE_UINT32(epcr, CPUOpenRISCState),
+ VMSTATE_UINT32(eear, CPUOpenRISCState),
+ VMSTATE_UINT32(esr, CPUOpenRISCState),
+ VMSTATE_UINT32(fpcsr, CPUOpenRISCState),
+ VMSTATE_UINT32(pc, CPUOpenRISCState),
+ VMSTATE_UINT32(npc, CPUOpenRISCState),
+ VMSTATE_UINT32(ppc, CPUOpenRISCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_openrisc_cpu = {
+ .name = "cpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_CPU(),
+ VMSTATE_STRUCT(env, OpenRISCCPU, 1, vmstate_env, CPUOpenRISCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c
new file mode 100644
index 0000000000..505dcdcdc8
--- /dev/null
+++ b/target/openrisc/mmu.c
@@ -0,0 +1,238 @@
+/*
+ * OpenRISC MMU.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu-common.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/loader.h"
+#endif
+
+#ifndef CONFIG_USER_ONLY
+int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address, int rw)
+{
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return TLBRET_MATCH;
+}
+
+int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address, int rw)
+{
+ int vpn = address >> TARGET_PAGE_BITS;
+ int idx = vpn & ITLB_MASK;
+ int right = 0;
+
+ if ((cpu->env.tlb->itlb[0][idx].mr >> TARGET_PAGE_BITS) != vpn) {
+ return TLBRET_NOMATCH;
+ }
+ if (!(cpu->env.tlb->itlb[0][idx].mr & 1)) {
+ return TLBRET_INVALID;
+ }
+
+ if (cpu->env.sr & SR_SM) { /* supervisor mode */
+ if (cpu->env.tlb->itlb[0][idx].tr & SXE) {
+ right |= PAGE_EXEC;
+ }
+ } else {
+ if (cpu->env.tlb->itlb[0][idx].tr & UXE) {
+ right |= PAGE_EXEC;
+ }
+ }
+
+ if ((rw & 2) && ((right & PAGE_EXEC) == 0)) {
+ return TLBRET_BADADDR;
+ }
+
+ *physical = (cpu->env.tlb->itlb[0][idx].tr & TARGET_PAGE_MASK) |
+ (address & (TARGET_PAGE_SIZE-1));
+ *prot = right;
+ return TLBRET_MATCH;
+}
+
+int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address, int rw)
+{
+ int vpn = address >> TARGET_PAGE_BITS;
+ int idx = vpn & DTLB_MASK;
+ int right = 0;
+
+ if ((cpu->env.tlb->dtlb[0][idx].mr >> TARGET_PAGE_BITS) != vpn) {
+ return TLBRET_NOMATCH;
+ }
+ if (!(cpu->env.tlb->dtlb[0][idx].mr & 1)) {
+ return TLBRET_INVALID;
+ }
+
+ if (cpu->env.sr & SR_SM) { /* supervisor mode */
+ if (cpu->env.tlb->dtlb[0][idx].tr & SRE) {
+ right |= PAGE_READ;
+ }
+ if (cpu->env.tlb->dtlb[0][idx].tr & SWE) {
+ right |= PAGE_WRITE;
+ }
+ } else {
+ if (cpu->env.tlb->dtlb[0][idx].tr & URE) {
+ right |= PAGE_READ;
+ }
+ if (cpu->env.tlb->dtlb[0][idx].tr & UWE) {
+ right |= PAGE_WRITE;
+ }
+ }
+
+ if (!(rw & 1) && ((right & PAGE_READ) == 0)) {
+ return TLBRET_BADADDR;
+ }
+ if ((rw & 1) && ((right & PAGE_WRITE) == 0)) {
+ return TLBRET_BADADDR;
+ }
+
+ *physical = (cpu->env.tlb->dtlb[0][idx].tr & TARGET_PAGE_MASK) |
+ (address & (TARGET_PAGE_SIZE-1));
+ *prot = right;
+ return TLBRET_MATCH;
+}
+
+static int cpu_openrisc_get_phys_addr(OpenRISCCPU *cpu,
+ hwaddr *physical,
+ int *prot, target_ulong address,
+ int rw)
+{
+ int ret = TLBRET_MATCH;
+
+ if (rw == 2) { /* ITLB */
+ *physical = 0;
+ ret = cpu->env.tlb->cpu_openrisc_map_address_code(cpu, physical,
+ prot, address, rw);
+ } else { /* DTLB */
+ ret = cpu->env.tlb->cpu_openrisc_map_address_data(cpu, physical,
+ prot, address, rw);
+ }
+
+ return ret;
+}
+#endif
+
+static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
+ target_ulong address,
+ int rw, int tlb_error)
+{
+ CPUState *cs = CPU(cpu);
+ int exception = 0;
+
+ switch (tlb_error) {
+ default:
+ if (rw == 2) {
+ exception = EXCP_IPF;
+ } else {
+ exception = EXCP_DPF;
+ }
+ break;
+#ifndef CONFIG_USER_ONLY
+ case TLBRET_BADADDR:
+ if (rw == 2) {
+ exception = EXCP_IPF;
+ } else {
+ exception = EXCP_DPF;
+ }
+ break;
+ case TLBRET_INVALID:
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (rw == 2) {
+ exception = EXCP_ITLBMISS;
+ } else {
+ exception = EXCP_DTLBMISS;
+ }
+ break;
+#endif
+ }
+
+ cs->exception_index = exception;
+ cpu->env.eear = address;
+}
+
+#ifndef CONFIG_USER_ONLY
+int openrisc_cpu_handle_mmu_fault(CPUState *cs,
+ vaddr address, int rw, int mmu_idx)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ int ret = 0;
+ hwaddr physical = 0;
+ int prot = 0;
+
+ ret = cpu_openrisc_get_phys_addr(cpu, &physical, &prot,
+ address, rw);
+
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
+ } else if (ret < 0) {
+ cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
+ ret = 1;
+ }
+
+ return ret;
+}
+#else
+int openrisc_cpu_handle_mmu_fault(CPUState *cs,
+ vaddr address, int rw, int mmu_idx)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ int ret = 0;
+
+ cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
+ ret = 1;
+
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ hwaddr phys_addr;
+ int prot;
+
+ if (cpu_openrisc_get_phys_addr(cpu, &phys_addr, &prot, addr, 0)) {
+ return -1;
+ }
+
+ return phys_addr;
+}
+
+void cpu_openrisc_mmu_init(OpenRISCCPU *cpu)
+{
+ cpu->env.tlb = g_malloc0(sizeof(CPUOpenRISCTLBContext));
+
+ cpu->env.tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
+ cpu->env.tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
+}
+#endif
diff --git a/target/openrisc/mmu_helper.c b/target/openrisc/mmu_helper.c
new file mode 100644
index 0000000000..a44d0aa51a
--- /dev/null
+++ b/target/openrisc/mmu_helper.c
@@ -0,0 +1,44 @@
+/*
+ * OpenRISC MMU helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#ifndef CONFIG_USER_ONLY
+
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = openrisc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+
+ if (ret) {
+ if (retaddr) {
+ /* now we have a real cpu fault. */
+ cpu_restore_state(cs, retaddr);
+ }
+ /* Raise Exception. */
+ cpu_loop_exit(cs);
+ }
+}
+#endif
diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c
new file mode 100644
index 0000000000..a719e452be
--- /dev/null
+++ b/target/openrisc/sys_helper.c
@@ -0,0 +1,288 @@
+/*
+ * OpenRISC system instructions helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#define TO_SPR(group, number) (((group) << 11) + (number))
+
+void HELPER(mtspr)(CPUOpenRISCState *env,
+ target_ulong ra, target_ulong rb, target_ulong offset)
+{
+#ifndef CONFIG_USER_ONLY
+ int spr = (ra | offset);
+ int idx;
+
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ switch (spr) {
+ case TO_SPR(0, 0): /* VR */
+ env->vr = rb;
+ break;
+
+ case TO_SPR(0, 16): /* NPC */
+ env->npc = rb;
+ break;
+
+ case TO_SPR(0, 17): /* SR */
+ if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^
+ (rb & (SR_IME | SR_DME | SR_SM))) {
+ tlb_flush(cs, 1);
+ }
+ env->sr = rb;
+ env->sr |= SR_FO; /* FO is const equal to 1 */
+ if (env->sr & SR_DME) {
+ env->tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_data;
+ } else {
+ env->tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_nommu;
+ }
+
+ if (env->sr & SR_IME) {
+ env->tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_code;
+ } else {
+ env->tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_nommu;
+ }
+ break;
+
+ case TO_SPR(0, 18): /* PPC */
+ env->ppc = rb;
+ break;
+
+ case TO_SPR(0, 32): /* EPCR */
+ env->epcr = rb;
+ break;
+
+ case TO_SPR(0, 48): /* EEAR */
+ env->eear = rb;
+ break;
+
+ case TO_SPR(0, 64): /* ESR */
+ env->esr = rb;
+ break;
+ case TO_SPR(1, 512) ... TO_SPR(1, 512+DTLB_SIZE-1): /* DTLBW0MR 0-127 */
+ idx = spr - TO_SPR(1, 512);
+ if (!(rb & 1)) {
+ tlb_flush_page(cs, env->tlb->dtlb[0][idx].mr & TARGET_PAGE_MASK);
+ }
+ env->tlb->dtlb[0][idx].mr = rb;
+ break;
+
+ case TO_SPR(1, 640) ... TO_SPR(1, 640+DTLB_SIZE-1): /* DTLBW0TR 0-127 */
+ idx = spr - TO_SPR(1, 640);
+ env->tlb->dtlb[0][idx].tr = rb;
+ break;
+ case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
+ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
+ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
+ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
+ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
+ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
+ break;
+ case TO_SPR(2, 512) ... TO_SPR(2, 512+ITLB_SIZE-1): /* ITLBW0MR 0-127 */
+ idx = spr - TO_SPR(2, 512);
+ if (!(rb & 1)) {
+ tlb_flush_page(cs, env->tlb->itlb[0][idx].mr & TARGET_PAGE_MASK);
+ }
+ env->tlb->itlb[0][idx].mr = rb;
+ break;
+
+ case TO_SPR(2, 640) ... TO_SPR(2, 640+ITLB_SIZE-1): /* ITLBW0TR 0-127 */
+ idx = spr - TO_SPR(2, 640);
+ env->tlb->itlb[0][idx].tr = rb;
+ break;
+ case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
+ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
+ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
+ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
+ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
+ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
+ break;
+ case TO_SPR(9, 0): /* PICMR */
+ env->picmr |= rb;
+ break;
+ case TO_SPR(9, 2): /* PICSR */
+ env->picsr &= ~rb;
+ break;
+ case TO_SPR(10, 0): /* TTMR */
+ {
+ if ((env->ttmr & TTMR_M) ^ (rb & TTMR_M)) {
+ switch (rb & TTMR_M) {
+ case TIMER_NONE:
+ cpu_openrisc_count_stop(cpu);
+ break;
+ case TIMER_INTR:
+ case TIMER_SHOT:
+ case TIMER_CONT:
+ cpu_openrisc_count_start(cpu);
+ break;
+ default:
+ break;
+ }
+ }
+
+ int ip = env->ttmr & TTMR_IP;
+
+ if (rb & TTMR_IP) { /* Keep IP bit. */
+ env->ttmr = (rb & ~TTMR_IP) | ip;
+ } else { /* Clear IP bit. */
+ env->ttmr = rb & ~TTMR_IP;
+ cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
+ }
+
+ cpu_openrisc_timer_update(cpu);
+ }
+ break;
+
+ case TO_SPR(10, 1): /* TTCR */
+ env->ttcr = rb;
+ if (env->ttmr & TIMER_NONE) {
+ return;
+ }
+ cpu_openrisc_timer_update(cpu);
+ break;
+ default:
+
+ break;
+ }
+#endif
+}
+
+target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
+ target_ulong rd, target_ulong ra, uint32_t offset)
+{
+#ifndef CONFIG_USER_ONLY
+ int spr = (ra | offset);
+ int idx;
+
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+
+ switch (spr) {
+ case TO_SPR(0, 0): /* VR */
+ return env->vr & SPR_VR;
+
+ case TO_SPR(0, 1): /* UPR */
+ return env->upr; /* TT, DM, IM, UP present */
+
+ case TO_SPR(0, 2): /* CPUCFGR */
+ return env->cpucfgr;
+
+ case TO_SPR(0, 3): /* DMMUCFGR */
+ return env->dmmucfgr; /* 1Way, 64 entries */
+
+ case TO_SPR(0, 4): /* IMMUCFGR */
+ return env->immucfgr;
+
+ case TO_SPR(0, 16): /* NPC */
+ return env->npc;
+
+ case TO_SPR(0, 17): /* SR */
+ return env->sr;
+
+ case TO_SPR(0, 18): /* PPC */
+ return env->ppc;
+
+ case TO_SPR(0, 32): /* EPCR */
+ return env->epcr;
+
+ case TO_SPR(0, 48): /* EEAR */
+ return env->eear;
+
+ case TO_SPR(0, 64): /* ESR */
+ return env->esr;
+
+ case TO_SPR(1, 512) ... TO_SPR(1, 512+DTLB_SIZE-1): /* DTLBW0MR 0-127 */
+ idx = spr - TO_SPR(1, 512);
+ return env->tlb->dtlb[0][idx].mr;
+
+ case TO_SPR(1, 640) ... TO_SPR(1, 640+DTLB_SIZE-1): /* DTLBW0TR 0-127 */
+ idx = spr - TO_SPR(1, 640);
+ return env->tlb->dtlb[0][idx].tr;
+
+ case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
+ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
+ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
+ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
+ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
+ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(2, 512) ... TO_SPR(2, 512+ITLB_SIZE-1): /* ITLBW0MR 0-127 */
+ idx = spr - TO_SPR(2, 512);
+ return env->tlb->itlb[0][idx].mr;
+
+ case TO_SPR(2, 640) ... TO_SPR(2, 640+ITLB_SIZE-1): /* ITLBW0TR 0-127 */
+ idx = spr - TO_SPR(2, 640);
+ return env->tlb->itlb[0][idx].tr;
+
+ case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
+ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
+ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
+ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
+ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
+ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(9, 0): /* PICMR */
+ return env->picmr;
+
+ case TO_SPR(9, 2): /* PICSR */
+ return env->picsr;
+
+ case TO_SPR(10, 0): /* TTMR */
+ return env->ttmr;
+
+ case TO_SPR(10, 1): /* TTCR */
+ cpu_openrisc_count_update(cpu);
+ return env->ttcr;
+
+ default:
+ break;
+ }
+#endif
+
+/*If we later need to add tracepoints (or debug printfs) for the return
+value, it may be useful to structure the code like this:
+
+target_ulong ret = 0;
+
+switch() {
+case x:
+ ret = y;
+ break;
+case z:
+ ret = 42;
+ break;
+...
+}
+
+later something like trace_spr_read(ret);
+
+return ret;*/
+
+ /* for rd is passed in, if rd unchanged, just keep it back. */
+ return rd;
+}
diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c
new file mode 100644
index 0000000000..229361aed1
--- /dev/null
+++ b/target/openrisc/translate.c
@@ -0,0 +1,1783 @@
+/*
+ * OpenRISC translation
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "qemu-common.h"
+#include "qemu/log.h"
+#include "qemu/bitops.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define OPENRISC_DISAS
+
+#ifdef OPENRISC_DISAS
+# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DIS(...) do { } while (0)
+#endif
+
+typedef struct DisasContext {
+ TranslationBlock *tb;
+ target_ulong pc, ppc, npc;
+ uint32_t tb_flags, synced_flags, flags;
+ uint32_t is_jmp;
+ uint32_t mem_idx;
+ int singlestep_enabled;
+ uint32_t delayed_branch;
+} DisasContext;
+
+static TCGv_env cpu_env;
+static TCGv cpu_sr;
+static TCGv cpu_R[32];
+static TCGv cpu_pc;
+static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
+static TCGv cpu_npc;
+static TCGv cpu_ppc;
+static TCGv_i32 env_btaken; /* bf/bnf , F flag taken */
+static TCGv_i32 fpcsr;
+static TCGv machi, maclo;
+static TCGv fpmaddhi, fpmaddlo;
+static TCGv_i32 env_flags;
+#include "exec/gen-icount.h"
+
+void openrisc_translate_init(void)
+{
+ static const char * const regnames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ };
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_sr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, sr), "sr");
+ env_flags = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUOpenRISCState, flags),
+ "flags");
+ cpu_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, pc), "pc");
+ cpu_npc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, npc), "npc");
+ cpu_ppc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, ppc), "ppc");
+ jmp_pc = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
+ env_btaken = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUOpenRISCState, btaken),
+ "btaken");
+ fpcsr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUOpenRISCState, fpcsr),
+ "fpcsr");
+ machi = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, machi),
+ "machi");
+ maclo = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, maclo),
+ "maclo");
+ fpmaddhi = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, fpmaddhi),
+ "fpmaddhi");
+ fpmaddlo = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, fpmaddlo),
+ "fpmaddlo");
+ for (i = 0; i < 32; i++) {
+ cpu_R[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUOpenRISCState, gpr[i]),
+ regnames[i]);
+ }
+}
+
+/* Writeback SR_F translation space to execution space. */
+static inline void wb_SR_F(void)
+{
+ TCGLabel *label = gen_new_label();
+ tcg_gen_andi_tl(cpu_sr, cpu_sr, ~SR_F);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, label);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, SR_F);
+ gen_set_label(label);
+}
+
+static inline int zero_extend(unsigned int val, int width)
+{
+ return val & ((1 << width) - 1);
+}
+
+static inline int sign_extend(unsigned int val, int width)
+{
+ int sval;
+
+ /* LSL */
+ val <<= TARGET_LONG_BITS - width;
+ sval = val;
+ /* ASR. */
+ sval >>= TARGET_LONG_BITS - width;
+ return sval;
+}
+
+static inline void gen_sync_flags(DisasContext *dc)
+{
+ /* Sync the tb dependent flag between translate and runtime. */
+ if (dc->tb_flags != dc->synced_flags) {
+ tcg_gen_movi_tl(env_flags, dc->tb_flags);
+ dc->synced_flags = dc->tb_flags;
+ }
+}
+
+static void gen_exception(DisasContext *dc, unsigned int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_illegal_exception(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_ILLEGAL);
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+static void check_ob64s(DisasContext *dc)
+{
+ if (!(dc->flags & CPUCFGR_OB64S)) {
+ gen_illegal_exception(dc);
+ }
+}
+
+static void check_of64s(DisasContext *dc)
+{
+ if (!(dc->flags & CPUCFGR_OF64S)) {
+ gen_illegal_exception(dc);
+ }
+}
+
+static void check_ov64s(DisasContext *dc)
+{
+ if (!(dc->flags & CPUCFGR_OV64S)) {
+ gen_illegal_exception(dc);
+ }
+}
+#endif*/
+
+static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
+{
+ if (unlikely(dc->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ if (use_goto_tb(dc, dest)) {
+ tcg_gen_movi_tl(cpu_pc, dest);
+ tcg_gen_goto_tb(n);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + n);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dest);
+ if (dc->singlestep_enabled) {
+ gen_exception(dc, EXCP_DEBUG);
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void gen_jump(DisasContext *dc, uint32_t imm, uint32_t reg, uint32_t op0)
+{
+ target_ulong tmp_pc;
+ /* N26, 26bits imm */
+ tmp_pc = sign_extend((imm<<2), 26) + dc->pc;
+
+ switch (op0) {
+ case 0x00: /* l.j */
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ break;
+ case 0x01: /* l.jal */
+ tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ break;
+ case 0x03: /* l.bnf */
+ case 0x04: /* l.bf */
+ {
+ TCGLabel *lab = gen_new_label();
+ TCGv sr_f = tcg_temp_new();
+ tcg_gen_movi_tl(jmp_pc, dc->pc+8);
+ tcg_gen_andi_tl(sr_f, cpu_sr, SR_F);
+ tcg_gen_brcondi_i32(op0 == 0x03 ? TCG_COND_EQ : TCG_COND_NE,
+ sr_f, SR_F, lab);
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ gen_set_label(lab);
+ tcg_temp_free(sr_f);
+ }
+ break;
+ case 0x11: /* l.jr */
+ tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
+ break;
+ case 0x12: /* l.jalr */
+ tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
+ tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+
+ dc->delayed_branch = 2;
+ dc->tb_flags |= D_FLAG;
+ gen_sync_flags(dc);
+}
+
+
+static void dec_calc(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0, op1, op2;
+ uint32_t ra, rb, rd;
+ op0 = extract32(insn, 0, 4);
+ op1 = extract32(insn, 8, 2);
+ op2 = extract32(insn, 6, 2);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+ rd = extract32(insn, 21, 5);
+
+ switch (op0) {
+ case 0x0000:
+ switch (op1) {
+ case 0x00: /* l.add */
+ LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ TCGLabel *lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 tb = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
+ tcg_gen_add_i64(td, ta, tb);
+ tcg_gen_extrl_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 31);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0001: /* l.addc */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ TCGLabel *lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 tb = tcg_temp_new_i64();
+ TCGv_i64 tcy = tcg_temp_local_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_cy = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
+ tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY);
+ tcg_gen_extu_i32_i64(tcy, sr_cy);
+ tcg_gen_shri_i64(tcy, tcy, 10);
+ tcg_gen_add_i64(td, ta, tb);
+ tcg_gen_add_i64(td, td, tcy);
+ tcg_gen_extrl_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 32);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i64(tcy);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_cy);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0002: /* l.sub */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ TCGLabel *lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 tb = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
+ tcg_gen_sub_i64(td, ta, tb);
+ tcg_gen_extrl_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 31);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0003: /* l.and */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0004: /* l.or */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0005:
+ switch (op1) {
+ case 0x00: /* l.xor */
+ LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0006:
+ switch (op1) {
+ case 0x03: /* l.mul */
+ LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb);
+ if (ra != 0 && rb != 0) {
+ gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ } else {
+ tcg_gen_movi_tl(cpu_R[rd], 0x0);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0009:
+ switch (op1) {
+ case 0x03: /* l.div */
+ LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ TCGLabel *lab0 = gen_new_label();
+ TCGLabel *lab1 = gen_new_label();
+ TCGLabel *lab2 = gen_new_label();
+ TCGLabel *lab3 = gen_new_label();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ if (rb == 0) {
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab0);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[rb],
+ 0x00000000, lab1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[ra],
+ 0x80000000, lab2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb],
+ 0xffffffff, lab2);
+ gen_set_label(lab1);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab3);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab2);
+ tcg_gen_div_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ gen_set_label(lab3);
+ }
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000a:
+ switch (op1) {
+ case 0x03: /* l.divu */
+ LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ TCGLabel *lab0 = gen_new_label();
+ TCGLabel *lab1 = gen_new_label();
+ TCGLabel *lab2 = gen_new_label();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ if (rb == 0) {
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab0);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb],
+ 0x00000000, lab1);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab2);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab1);
+ tcg_gen_divu_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ gen_set_label(lab2);
+ }
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000b:
+ switch (op1) {
+ case 0x03: /* l.mulu */
+ LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb);
+ if (rb != 0 && ra != 0) {
+ TCGv_i64 result = tcg_temp_local_new_i64();
+ TCGv_i64 tra = tcg_temp_local_new_i64();
+ TCGv_i64 trb = tcg_temp_local_new_i64();
+ TCGv_i64 high = tcg_temp_new_i64();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ TCGLabel *lab = gen_new_label();
+ /* Calculate each result. */
+ tcg_gen_extu_i32_i64(tra, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(trb, cpu_R[rb]);
+ tcg_gen_mul_i64(result, tra, trb);
+ tcg_temp_free_i64(tra);
+ tcg_temp_free_i64(trb);
+ tcg_gen_shri_i64(high, result, TARGET_LONG_BITS);
+ /* Overflow or not. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, high, 0x00000000, lab);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_temp_free_i64(high);
+ tcg_gen_trunc_i64_tl(cpu_R[rd], result);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i32(sr_ove);
+ } else {
+ tcg_gen_movi_tl(cpu_R[rd], 0);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000e:
+ switch (op1) {
+ case 0x00: /* l.cmov */
+ LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ TCGLabel *lab = gen_new_label();
+ TCGv res = tcg_temp_local_new();
+ TCGv sr_f = tcg_temp_new();
+ tcg_gen_andi_tl(sr_f, cpu_sr, SR_F);
+ tcg_gen_mov_tl(res, cpu_R[rb]);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_f, SR_F, lab);
+ tcg_gen_mov_tl(res, cpu_R[ra]);
+ gen_set_label(lab);
+ tcg_gen_mov_tl(cpu_R[rd], res);
+ tcg_temp_free(sr_f);
+ tcg_temp_free(res);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000f:
+ switch (op1) {
+ case 0x00: /* l.ff1 */
+ LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_ff1(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x01: /* l.fl1 */
+ LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_fl1(cpu_R[rd], cpu_R[ra]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0008:
+ switch (op1) {
+ case 0x00:
+ switch (op2) {
+ case 0x00: /* l.sll */
+ LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ case 0x01: /* l.srl */
+ LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ case 0x02: /* l.sra */
+ LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ case 0x03: /* l.ror */
+ LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000c:
+ switch (op1) {
+ case 0x00:
+ switch (op2) {
+ case 0x00: /* l.exths */
+ LOG_DIS("l.exths r%d, r%d\n", rd, ra);
+ tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x01: /* l.extbs */
+ LOG_DIS("l.extbs r%d, r%d\n", rd, ra);
+ tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x02: /* l.exthz */
+ LOG_DIS("l.exthz r%d, r%d\n", rd, ra);
+ tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x03: /* l.extbz */
+ LOG_DIS("l.extbz r%d, r%d\n", rd, ra);
+ tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000d:
+ switch (op1) {
+ case 0x00:
+ switch (op2) {
+ case 0x00: /* l.extws */
+ LOG_DIS("l.extws r%d, r%d\n", rd, ra);
+ tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x01: /* l.extwz */
+ LOG_DIS("l.extwz r%d, r%d\n", rd, ra);
+ tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_misc(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0, op1;
+ uint32_t ra, rb, rd;
+#ifdef OPENRISC_DISAS
+ uint32_t L6, K5;
+#endif
+ uint32_t I16, I5, I11, N26, tmp;
+ TCGMemOp mop;
+
+ op0 = extract32(insn, 26, 6);
+ op1 = extract32(insn, 24, 2);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+ rd = extract32(insn, 21, 5);
+#ifdef OPENRISC_DISAS
+ L6 = extract32(insn, 5, 6);
+ K5 = extract32(insn, 0, 5);
+#endif
+ I16 = extract32(insn, 0, 16);
+ I5 = extract32(insn, 21, 5);
+ I11 = extract32(insn, 0, 11);
+ N26 = extract32(insn, 0, 26);
+ tmp = (I5<<11) + I11;
+
+ switch (op0) {
+ case 0x00: /* l.j */
+ LOG_DIS("l.j %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x01: /* l.jal */
+ LOG_DIS("l.jal %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x03: /* l.bnf */
+ LOG_DIS("l.bnf %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x04: /* l.bf */
+ LOG_DIS("l.bf %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x05:
+ switch (op1) {
+ case 0x01: /* l.nop */
+ LOG_DIS("l.nop %d\n", I16);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x11: /* l.jr */
+ LOG_DIS("l.jr r%d\n", rb);
+ gen_jump(dc, 0, rb, op0);
+ break;
+
+ case 0x12: /* l.jalr */
+ LOG_DIS("l.jalr r%d\n", rb);
+ gen_jump(dc, 0, rb, op0);
+ break;
+
+ case 0x13: /* l.maci */
+ LOG_DIS("l.maci %d, r%d, %d\n", I5, ra, I11);
+ {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i32 dst = tcg_temp_new_i32();
+ TCGv ttmp = tcg_const_tl(tmp);
+ tcg_gen_mul_tl(dst, cpu_R[ra], ttmp);
+ tcg_gen_ext_i32_i64(t1, dst);
+ tcg_gen_concat_i32_i64(t2, maclo, machi);
+ tcg_gen_add_i64(t2, t2, t1);
+ tcg_gen_extrl_i64_i32(maclo, t2);
+ tcg_gen_shri_i64(t2, t2, 32);
+ tcg_gen_extrl_i64_i32(machi, t2);
+ tcg_temp_free_i32(dst);
+ tcg_temp_free(ttmp);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+
+ case 0x09: /* l.rfe */
+ LOG_DIS("l.rfe\n");
+ {
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ gen_helper_rfe(cpu_env);
+ dc->is_jmp = DISAS_UPDATE;
+#endif
+ }
+ break;
+
+ case 0x1c: /* l.cust1 */
+ LOG_DIS("l.cust1\n");
+ break;
+
+ case 0x1d: /* l.cust2 */
+ LOG_DIS("l.cust2\n");
+ break;
+
+ case 0x1e: /* l.cust3 */
+ LOG_DIS("l.cust3\n");
+ break;
+
+ case 0x1f: /* l.cust4 */
+ LOG_DIS("l.cust4\n");
+ break;
+
+ case 0x3c: /* l.cust5 */
+ LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5);
+ break;
+
+ case 0x3d: /* l.cust6 */
+ LOG_DIS("l.cust6\n");
+ break;
+
+ case 0x3e: /* l.cust7 */
+ LOG_DIS("l.cust7\n");
+ break;
+
+ case 0x3f: /* l.cust8 */
+ LOG_DIS("l.cust8\n");
+ break;
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ case 0x20: l.ld
+ LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
+ check_ob64s(dc);
+ mop = MO_TEQ;
+ goto do_load;
+#endif*/
+
+ case 0x21: /* l.lwz */
+ LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16);
+ mop = MO_TEUL;
+ goto do_load;
+
+ case 0x22: /* l.lws */
+ LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16);
+ mop = MO_TESL;
+ goto do_load;
+
+ case 0x23: /* l.lbz */
+ LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16);
+ mop = MO_UB;
+ goto do_load;
+
+ case 0x24: /* l.lbs */
+ LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16);
+ mop = MO_SB;
+ goto do_load;
+
+ case 0x25: /* l.lhz */
+ LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16);
+ mop = MO_TEUW;
+ goto do_load;
+
+ case 0x26: /* l.lhs */
+ LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16);
+ mop = MO_TESW;
+ goto do_load;
+
+ do_load:
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld_tl(cpu_R[rd], t0, dc->mem_idx, mop);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x27: /* l.addi */
+ LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16);
+ {
+ if (I16 == 0) {
+ tcg_gen_mov_tl(cpu_R[rd], cpu_R[ra]);
+ } else {
+ TCGLabel *lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
+ tcg_gen_extrl_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 32);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_ove);
+ }
+ }
+ break;
+
+ case 0x28: /* l.addic */
+ LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGLabel *lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i64 tcy = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_cy = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY);
+ tcg_gen_shri_i32(sr_cy, sr_cy, 10);
+ tcg_gen_extu_i32_i64(tcy, sr_cy);
+ tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
+ tcg_gen_add_i64(td, td, tcy);
+ tcg_gen_extrl_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 32);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i64(tcy);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_cy);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ case 0x29: /* l.andi */
+ LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, I16);
+ tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], zero_extend(I16, 16));
+ break;
+
+ case 0x2a: /* l.ori */
+ LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, I16);
+ tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], zero_extend(I16, 16));
+ break;
+
+ case 0x2b: /* l.xori */
+ LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16);
+ tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], sign_extend(I16, 16));
+ break;
+
+ case 0x2c: /* l.muli */
+ LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16);
+ if (ra != 0 && I16 != 0) {
+ TCGv_i32 im = tcg_const_i32(I16);
+ gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], im);
+ tcg_temp_free_i32(im);
+ } else {
+ tcg_gen_movi_tl(cpu_R[rd], 0x0);
+ }
+ break;
+
+ case 0x2d: /* l.mfspr */
+ LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, I16);
+ {
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ TCGv_i32 ti = tcg_const_i32(I16);
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti);
+ tcg_temp_free_i32(ti);
+#endif
+ }
+ break;
+
+ case 0x30: /* l.mtspr */
+ LOG_DIS("l.mtspr %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ {
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ TCGv_i32 im = tcg_const_i32(tmp);
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im);
+ tcg_temp_free_i32(im);
+#endif
+ }
+ break;
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ case 0x34: l.sd
+ LOG_DIS("l.sd %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ check_ob64s(dc);
+ mop = MO_TEQ;
+ goto do_store;
+#endif*/
+
+ case 0x35: /* l.sw */
+ LOG_DIS("l.sw %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ mop = MO_TEUL;
+ goto do_store;
+
+ case 0x36: /* l.sb */
+ LOG_DIS("l.sb %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ mop = MO_UB;
+ goto do_store;
+
+ case 0x37: /* l.sh */
+ LOG_DIS("l.sh %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ mop = MO_TEUW;
+ goto do_store;
+
+ do_store:
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(tmp, 16));
+ tcg_gen_qemu_st_tl(cpu_R[rb], t0, dc->mem_idx, mop);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_mac(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, rb;
+ op0 = extract32(insn, 0, 4);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+
+ switch (op0) {
+ case 0x0001: /* l.mac */
+ LOG_DIS("l.mac r%d, r%d\n", ra, rb);
+ {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_mul_tl(t0, cpu_R[ra], cpu_R[rb]);
+ tcg_gen_ext_i32_i64(t1, t0);
+ tcg_gen_concat_i32_i64(t2, maclo, machi);
+ tcg_gen_add_i64(t2, t2, t1);
+ tcg_gen_extrl_i64_i32(maclo, t2);
+ tcg_gen_shri_i64(t2, t2, 32);
+ tcg_gen_extrl_i64_i32(machi, t2);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+
+ case 0x0002: /* l.msb */
+ LOG_DIS("l.msb r%d, r%d\n", ra, rb);
+ {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_mul_tl(t0, cpu_R[ra], cpu_R[rb]);
+ tcg_gen_ext_i32_i64(t1, t0);
+ tcg_gen_concat_i32_i64(t2, maclo, machi);
+ tcg_gen_sub_i64(t2, t2, t1);
+ tcg_gen_extrl_i64_i32(maclo, t2);
+ tcg_gen_shri_i64(t2, t2, 32);
+ tcg_gen_extrl_i64_i32(machi, t2);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_logic(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t rd, ra, L6;
+ op0 = extract32(insn, 6, 2);
+ rd = extract32(insn, 21, 5);
+ ra = extract32(insn, 16, 5);
+ L6 = extract32(insn, 0, 6);
+
+ switch (op0) {
+ case 0x00: /* l.slli */
+ LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f));
+ break;
+
+ case 0x01: /* l.srli */
+ LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f));
+ break;
+
+ case 0x02: /* l.srai */
+ LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f)); break;
+
+ case 0x03: /* l.rori */
+ LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f));
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_M(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t rd;
+ uint32_t K16;
+ op0 = extract32(insn, 16, 1);
+ rd = extract32(insn, 21, 5);
+ K16 = extract32(insn, 0, 16);
+
+ switch (op0) {
+ case 0x0: /* l.movhi */
+ LOG_DIS("l.movhi r%d, %d\n", rd, K16);
+ tcg_gen_movi_tl(cpu_R[rd], (K16 << 16));
+ break;
+
+ case 0x1: /* l.macrc */
+ LOG_DIS("l.macrc r%d\n", rd);
+ tcg_gen_mov_tl(cpu_R[rd], maclo);
+ tcg_gen_movi_tl(maclo, 0x0);
+ tcg_gen_movi_tl(machi, 0x0);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_comp(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, rb;
+
+ op0 = extract32(insn, 21, 5);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+
+ tcg_gen_movi_i32(env_btaken, 0x0);
+ /* unsigned integers */
+ tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]);
+ tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]);
+
+ switch (op0) {
+ case 0x0: /* l.sfeq */
+ LOG_DIS("l.sfeq r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_EQ, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1: /* l.sfne */
+ LOG_DIS("l.sfne r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_NE, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x2: /* l.sfgtu */
+ LOG_DIS("l.sfgtu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GTU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x3: /* l.sfgeu */
+ LOG_DIS("l.sfgeu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GEU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x4: /* l.sfltu */
+ LOG_DIS("l.sfltu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LTU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x5: /* l.sfleu */
+ LOG_DIS("l.sfleu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LEU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xa: /* l.sfgts */
+ LOG_DIS("l.sfgts r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GT, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xb: /* l.sfges */
+ LOG_DIS("l.sfges r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GE, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xc: /* l.sflts */
+ LOG_DIS("l.sflts r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LT, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xd: /* l.sfles */
+ LOG_DIS("l.sfles r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LE, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ wb_SR_F();
+}
+
+static void dec_compi(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, I16;
+
+ op0 = extract32(insn, 21, 5);
+ ra = extract32(insn, 16, 5);
+ I16 = extract32(insn, 0, 16);
+
+ tcg_gen_movi_i32(env_btaken, 0x0);
+ I16 = sign_extend(I16, 16);
+
+ switch (op0) {
+ case 0x0: /* l.sfeqi */
+ LOG_DIS("l.sfeqi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x1: /* l.sfnei */
+ LOG_DIS("l.sfnei r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_NE, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x2: /* l.sfgtui */
+ LOG_DIS("l.sfgtui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GTU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x3: /* l.sfgeui */
+ LOG_DIS("l.sfgeui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GEU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x4: /* l.sfltui */
+ LOG_DIS("l.sfltui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LTU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x5: /* l.sfleui */
+ LOG_DIS("l.sfleui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LEU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xa: /* l.sfgtsi */
+ LOG_DIS("l.sfgtsi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GT, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xb: /* l.sfgesi */
+ LOG_DIS("l.sfgesi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GE, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xc: /* l.sfltsi */
+ LOG_DIS("l.sfltsi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LT, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xd: /* l.sflesi */
+ LOG_DIS("l.sflesi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LE, env_btaken, cpu_R[ra], I16);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ wb_SR_F();
+}
+
+static void dec_sys(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+#ifdef OPENRISC_DISAS
+ uint32_t K16;
+#endif
+ op0 = extract32(insn, 16, 10);
+#ifdef OPENRISC_DISAS
+ K16 = extract32(insn, 0, 16);
+#endif
+
+ switch (op0) {
+ case 0x000: /* l.sys */
+ LOG_DIS("l.sys %d\n", K16);
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_SYSCALL);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+
+ case 0x100: /* l.trap */
+ LOG_DIS("l.trap %d\n", K16);
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_TRAP);
+#endif
+ break;
+
+ case 0x300: /* l.csync */
+ LOG_DIS("l.csync\n");
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+#endif
+ break;
+
+ case 0x200: /* l.msync */
+ LOG_DIS("l.msync\n");
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+#endif
+ break;
+
+ case 0x270: /* l.psync */
+ LOG_DIS("l.psync\n");
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+#endif
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_float(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, rb, rd;
+ op0 = extract32(insn, 0, 8);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+ rd = extract32(insn, 21, 5);
+
+ switch (op0) {
+ case 0x00: /* lf.add.s */
+ LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x01: /* lf.sub.s */
+ LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+
+ case 0x02: /* lf.mul.s */
+ LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb);
+ if (ra != 0 && rb != 0) {
+ gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ } else {
+ tcg_gen_ori_tl(fpcsr, fpcsr, FPCSR_ZF);
+ tcg_gen_movi_i32(cpu_R[rd], 0x0);
+ }
+ break;
+
+ case 0x03: /* lf.div.s */
+ LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x04: /* lf.itof.s */
+ LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
+ gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x05: /* lf.ftoi.s */
+ LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
+ gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x06: /* lf.rem.s */
+ LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x07: /* lf.madd.s */
+ LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_muladd_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x08: /* lf.sfeq.s */
+ LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb);
+ gen_helper_float_eq_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x09: /* lf.sfne.s */
+ LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb);
+ gen_helper_float_ne_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0a: /* lf.sfgt.s */
+ LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb);
+ gen_helper_float_gt_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0b: /* lf.sfge.s */
+ LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb);
+ gen_helper_float_ge_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0c: /* lf.sflt.s */
+ LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb);
+ gen_helper_float_lt_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0d: /* lf.sfle.s */
+ LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb);
+ gen_helper_float_le_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ case 0x10: lf.add.d
+ LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x11: lf.sub.d
+ LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x12: lf.mul.d
+ LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ if (ra != 0 && rb != 0) {
+ gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ } else {
+ tcg_gen_ori_tl(fpcsr, fpcsr, FPCSR_ZF);
+ tcg_gen_movi_i64(cpu_R[rd], 0x0);
+ }
+ break;
+
+ case 0x13: lf.div.d
+ LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x14: lf.itof.d
+ LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
+ check_of64s(dc);
+ gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x15: lf.ftoi.d
+ LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
+ check_of64s(dc);
+ gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x16: lf.rem.d
+ LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x17: lf.madd.d
+ LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_muladd_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x18: lf.sfeq.d
+ LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_eq_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1a: lf.sfgt.d
+ LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_gt_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1b: lf.sfge.d
+ LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_ge_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x19: lf.sfne.d
+ LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_ne_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1c: lf.sflt.d
+ LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_lt_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1d: lf.sfle.d
+ LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_le_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+#endif*/
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ wb_SR_F();
+}
+
+static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
+{
+ uint32_t op0;
+ uint32_t insn;
+ insn = cpu_ldl_code(&cpu->env, dc->pc);
+ op0 = extract32(insn, 26, 6);
+
+ switch (op0) {
+ case 0x06:
+ dec_M(dc, insn);
+ break;
+
+ case 0x08:
+ dec_sys(dc, insn);
+ break;
+
+ case 0x2e:
+ dec_logic(dc, insn);
+ break;
+
+ case 0x2f:
+ dec_compi(dc, insn);
+ break;
+
+ case 0x31:
+ dec_mac(dc, insn);
+ break;
+
+ case 0x32:
+ dec_float(dc, insn);
+ break;
+
+ case 0x38:
+ dec_calc(dc, insn);
+ break;
+
+ case 0x39:
+ dec_comp(dc, insn);
+ break;
+
+ default:
+ dec_misc(dc, insn);
+ break;
+ }
+}
+
+void gen_intermediate_code(CPUOpenRISCState *env, struct TranslationBlock *tb)
+{
+ OpenRISCCPU *cpu = openrisc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ struct DisasContext ctx, *dc = &ctx;
+ uint32_t pc_start;
+ uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+ dc->tb = tb;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->ppc = pc_start;
+ dc->pc = pc_start;
+ dc->flags = cpu->env.cpucfgr;
+ dc->mem_idx = cpu_mmu_index(&cpu->env, false);
+ dc->synced_flags = dc->tb_flags = tb->flags;
+ dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
+ dc->singlestep_enabled = cs->singlestep_enabled;
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+
+ do {
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_DEBUG);
+ dc->is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 4;
+ break;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+ dc->ppc = dc->pc - 4;
+ dc->npc = dc->pc + 4;
+ tcg_gen_movi_tl(cpu_ppc, dc->ppc);
+ tcg_gen_movi_tl(cpu_npc, dc->npc);
+ disas_openrisc_insn(dc, cpu);
+ dc->pc = dc->npc;
+ /* delay slot */
+ if (dc->delayed_branch) {
+ dc->delayed_branch--;
+ if (!dc->delayed_branch) {
+ dc->tb_flags &= ~D_FLAG;
+ gen_sync_flags(dc);
+ tcg_gen_mov_tl(cpu_pc, jmp_pc);
+ tcg_gen_mov_tl(cpu_npc, jmp_pc);
+ tcg_gen_movi_tl(jmp_pc, 0);
+ tcg_gen_exit_tb(0);
+ dc->is_jmp = DISAS_JUMP;
+ break;
+ }
+ }
+ } while (!dc->is_jmp
+ && !tcg_op_buf_full()
+ && !cs->singlestep_enabled
+ && !singlestep
+ && (dc->pc < next_page_start)
+ && num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+ if (dc->is_jmp == DISAS_NEXT) {
+ dc->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ if (unlikely(cs->singlestep_enabled)) {
+ if (dc->is_jmp == DISAS_NEXT) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ gen_exception(dc, EXCP_DEBUG);
+ } else {
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 0, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ break;
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used
+ to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ }
+
+ gen_tb_end(tb, num_insns);
+
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
+ qemu_log("\nisize=%d osize=%d\n",
+ dc->pc - pc_start, tcg_op_buf_count());
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void openrisc_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf,
+ int flags)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(cs);
+ CPUOpenRISCState *env = &cpu->env;
+ int i;
+
+ cpu_fprintf(f, "PC=%08x\n", env->pc);
+ for (i = 0; i < 32; ++i) {
+ cpu_fprintf(f, "R%02d=%08x%c", i, env->gpr[i],
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+}
+
+void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/ppc/Makefile.objs b/target/ppc/Makefile.objs
new file mode 100644
index 0000000000..e667e69701
--- /dev/null
+++ b/target/ppc/Makefile.objs
@@ -0,0 +1,17 @@
+obj-y += cpu-models.o
+obj-y += translate.o
+ifeq ($(CONFIG_SOFTMMU),y)
+obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o
+obj-$(TARGET_PPC64) += mmu-hash64.o arch_dump.o
+endif
+obj-$(CONFIG_KVM) += kvm.o
+obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o
+obj-y += dfp_helper.o
+obj-y += excp_helper.o
+obj-y += fpu_helper.o
+obj-y += int_helper.o
+obj-y += timebase_helper.o
+obj-y += misc_helper.o
+obj-y += mem_helper.o
+obj-$(CONFIG_USER_ONLY) += user_only_helper.o
+obj-y += gdbstub.o
diff --git a/target/ppc/STATUS b/target/ppc/STATUS
new file mode 100644
index 0000000000..a4d48a7ca2
--- /dev/null
+++ b/target/ppc/STATUS
@@ -0,0 +1,550 @@
+PowerPC emulation status.
+The goal of this file is to provide a reference status to avoid regressions.
+
+===============================================================================
+PowerPC core emulation status
+
+INSN: instruction set.
+ OK => all instructions are emulated
+ KO => some insns are missing or some should be removed
+ ? => unchecked
+SPR: special purpose registers set
+ OK => all SPR registered (but some may be fake)
+ KO => some SPR are missing or should be removed
+ ? => unchecked
+MSR: MSR bits definitions
+ OK => all MSR bits properly defined
+ KO => MSR definition is incorrect
+ ? => unchecked
+IRQ: input signals definitions (mostly interrupts)
+ OK => input signals are properly defined
+ KO => input signals are not implemented (system emulation does not work)
+ ? => input signals definitions may be incorrect
+MMU: MMU model implementation
+ OK => MMU model is implemented and Linux is able to boot
+ KO => MMU model not implemented or bugged
+ ? => MMU model not tested
+EXCP: exceptions model implementation
+ OK => exception model is implemented and Linux is able to boot
+ KO => exception model not implemented or known to be buggy
+ ? => exception model may be incorrect or is untested
+
+Embedded PowerPC cores
+***
+PowerPC 401:
+INSN OK
+SPR OK 401A1
+MSR OK
+IRQ KO partially implemented
+MMU OK
+EXCP ?
+
+PowerPC 401x2:
+INSN OK
+SPR OK 401B2 401C2 401D2 401E2 401F2
+MSR OK
+IRQ KO partially implemented
+MMU OK
+EXCP ?
+
+PowerPC IOP480:
+INSN OK
+SPR OK IOP480
+MSR OK
+IRQ KO partially implemented
+MMU OK
+EXCP ?
+
+To be checked: 401G2 401B3 Cobra
+
+***
+PowerPC 403:
+INSN OK
+SPR OK 403GA 403GB
+MMU OK
+MSR OK
+IRQ KO not implemented
+EXCP ?
+
+PowerPC 403GCX:
+INSN OK
+SPR OK 403GCX
+MMU OK
+MSR OK
+IRQ KO not implemented
+EXCP ?
+
+To be checked: 403GC
+
+***
+PowerPC 405:
+Checked: 405CRa 405CRb 405CRc 405EP 405GPa 405GPb 405GPc 405GPd 405GPe 405GPR
+ Npe405H Npe405H2 Npe405L
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots (at least 1 proprietary firmware).
+ uboot seems to freeze at boot time.
+To be checked: 405D2 405D4 405EZ 405LP Npe4GS3 STB03 STB04 STB25
+ x2vp4 x2vp7 x2vp20 x2vp50
+
+XXX: find what is IBM e407b4
+
+***
+PowerPC 440:
+Checked: 440EPa 440EPb 440GXa 440GXb 440GXc 440GXf 440SP 440SP2
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+PowerPC 440GP:
+Checked: 440GPb 440GPc
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+PowerPC 440x4:
+Checked: 440A4 440B4 440G4 440H4
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+PowerPC 440x5:
+Checked: 440A5 440F5 440G5 440H6 440GRa
+INSN OK
+SPR OK
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+
+To be checked: 440EPx 440GRx 440SPE
+
+***
+PowerPC 460: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+PowerPC 460F: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e200: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e300: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e500: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+PowerPC e600: (not implemented)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+***
+32 bits PowerPC
+PowerPC 601: (601 601v2)
+INSN OK
+SPR OK is HID15 only on 601v2 ?
+MSR OK
+IRQ KO not implemented
+MMU ?
+EXCP ?
+Remarks: some instructions should have a specific behavior (not implemented)
+
+PowerPC 602: 602
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU ?
+EXCP ? at least timer and external interrupt are OK
+Remarks: Linux 2.4 crashes when entering user-mode.
+ Linux 2.6.22 boots on this CPU but does not recognize it.
+
+PowerPC 603: (603)
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU
+ Linux 2.6.22 idem.
+
+PowerPC 603e: (603e11)
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU
+ Linux 2.6.22 idem.
+
+PowerPC G2:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots, recognizes the CPU as a 82xx.
+ Linux 2.6.22 idem.
+
+PowerPC G2le:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 does not boots. Same symptoms as 602.
+ Linux 2.6.22 boots and properly recognizes the CPU.
+
+PowerPC 604:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU.
+ Linux 2.6.22 idem.
+
+PowerPC 7x0:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots and properly recognizes the CPU.
+ Linux 2.6.22 idem.
+
+PowerPC 750fx:
+INSN OK
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP OK
+Remarks: Linux 2.4 boots but does not properly recognizes the CPU.
+ Linux 2.6.22 boots and properly recognizes the CPU.
+
+PowerPC 7x5:
+INSN ?
+SPR ?
+MSR ?
+IRQ OK
+MMU ?
+EXCP OK
+Remarks: Linux 2.4 does not boot.
+ Linux 2.6.22 idem.
+
+PowerPC 7400:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux 2.4 boots and properly recognize the CPU.
+ Linux 2.6.22 idem.
+
+PowerPC 7410:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux 2.4 boots and properly recognize the CPU.
+ Linux 2.6.22 idem.
+ Note that UM says tlbld & tlbli are implemented but this may be a mistake
+ as TLB loads are managed by the hardware and the CPU does not implement the
+ needed registers.
+
+PowerPC 7441:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+PowerPC 7450/7451:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+PowerPC 7445/7447:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+PowerPC 7455/7457:
+INSN KO Altivec missing
+SPR OK
+MSR OK
+IRQ OK
+MMU OK
+EXCP ? Altivec, ...
+Remarks: Linux does not have the code to handle TLB miss on this CPU
+ Linux 2.6.22 idem.
+
+64 bits PowerPC
+PowerPC 620: (disabled)
+INSN KO
+SPR KO
+MSR ?
+IRQ KO
+MMU KO
+EXCP KO
+Remarks: not much documentation for this implementation...
+
+PowerPC 970:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ OK
+MMU OK
+EXCP KO partially implemented
+Remarks: Should be able to boot but there is no hw platform currently emulated.
+
+PowerPC 970FX:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ OK
+MMU OK
+EXCP KO partially implemented
+Remarks: Should be able to boot but there is no hw platform currently emulated.
+
+PowerPC Cell:
+INSN KO Altivec missing and more
+SPR KO
+MSR ?
+IRQ ?
+MMU ?
+EXCP ? partially implemented
+Remarks: As the core is mostly a 970, should be able to boot.
+ SPE are not implemented.
+
+PowerPC 630: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+PowerPC 631: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER4: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER4+: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER5: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER5+: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER6: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64-II: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64-III: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+RS64-IV: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+Original POWER
+POWER: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+POWER2: (disabled: lack of detailed specifications)
+INSN KO
+SPR KO
+MSR KO
+IRQ KO
+MMU KO
+EXCP KO
+
+===============================================================================
+PowerPC microcontrollers emulation status
+
+Implemementation should be sufficient to boot Linux:
+(there seem to be problems with uboot freezing at some point)
+- PowerPC 405CR
+- PowerPC 405EP
+
+TODO:
+- PowerPC 401 microcontrollers emulation
+- PowerPC 403 microcontrollers emulation
+- more PowerPC 405 microcontrollers emulation
+- Fixes / more features for implemented PowerPC 405 microcontrollers emulation
+- PowerPC 440 microcontrollers emulation
+- e200 microcontrollers emulation
+- e300 microcontrollers emulation
+- e500 microcontrollers emulation
+- e600 microcontrollers emulation
+
+===============================================================================
+PowerPC based platforms emulation status
+
+* PREP platform (RS/6000 7043...) - TO BE CHECKED (broken)
+- Gentoo Linux live CDROM 1.4
+- Debian Linux 3.0
+- Mandrake Linux 9
+
+* heathrow PowerMac platform (beige PowerMac) - TO BE CHECKED (broken)
+- Gentoo Linux live CDROM 1.4
+- Debian Linux 3.0
+- Mandrake Linux 9
+
+* mac99 platform (white and blue PowerMac, ...)
+- Gentoo Linux live CDROM 1.4 - boots, compiles linux kernel
+- Debian Linux woody - boots from CDROM and HDD
+- Mandrake Linux 9 - boots from CDROM, freezes during install
+- Knoppix 2003-07-13_4 boots from CDROM, pb with X configuration
+ distribution bug: X runs with a properly hand-coded configuration.
+- rock Linux 2.0 runs from CDROM
+
+* Linux 2.6 support seems deadly broken (used to boot...).
+
+* PowerPC 405EP reference boards:
+- can boot Linux 2.4 & 2.6.
+ Need to provide a flash image ready to boot for reproductible tests.
+
+TODO:
+- URGENT: fix PreP and heathrow platforms
+- PowerPC 64 reference platform
+- MCA based RS/6000 emulation
+- CHRP emulation (not PowerMac)
+- PPAR emulation
+- ePPAR emulation
+- misc PowerPC reference boards emulation
+
+===============================================================================
diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c
new file mode 100644
index 0000000000..40282a1f50
--- /dev/null
+++ b/target/ppc/arch_dump.c
@@ -0,0 +1,286 @@
+/*
+ * writing ELF notes for ppc64 arch
+ *
+ *
+ * Copyright IBM, Corp. 2013
+ *
+ * Authors:
+ * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "exec/cpu-all.h"
+#include "sysemu/dump.h"
+#include "sysemu/kvm.h"
+
+struct PPC64UserRegStruct {
+ uint64_t gpr[32];
+ uint64_t nip;
+ uint64_t msr;
+ uint64_t orig_gpr3;
+ uint64_t ctr;
+ uint64_t link;
+ uint64_t xer;
+ uint64_t ccr;
+ uint64_t softe;
+ uint64_t trap;
+ uint64_t dar;
+ uint64_t dsisr;
+ uint64_t result;
+} QEMU_PACKED;
+
+struct PPC64ElfPrstatus {
+ char pad1[112];
+ struct PPC64UserRegStruct pr_reg;
+ uint64_t pad2[4];
+} QEMU_PACKED;
+
+
+struct PPC64ElfFpregset {
+ uint64_t fpr[32];
+ uint64_t fpscr;
+} QEMU_PACKED;
+
+
+struct PPC64ElfVmxregset {
+ ppc_avr_t avr[32];
+ ppc_avr_t vscr;
+ union {
+ ppc_avr_t unused;
+ uint32_t value;
+ } vrsave;
+} QEMU_PACKED;
+
+struct PPC64ElfVsxregset {
+ uint64_t vsr[32];
+} QEMU_PACKED;
+
+struct PPC64ElfSperegset {
+ uint32_t evr[32];
+ uint64_t spe_acc;
+ uint32_t spe_fscr;
+} QEMU_PACKED;
+
+typedef struct noteStruct {
+ Elf64_Nhdr hdr;
+ char name[5];
+ char pad3[3];
+ union {
+ struct PPC64ElfPrstatus prstatus;
+ struct PPC64ElfFpregset fpregset;
+ struct PPC64ElfVmxregset vmxregset;
+ struct PPC64ElfVsxregset vsxregset;
+ struct PPC64ElfSperegset speregset;
+ } contents;
+} QEMU_PACKED Note;
+
+typedef struct NoteFuncArg {
+ Note note;
+ DumpState *state;
+} NoteFuncArg;
+
+static void ppc64_write_elf64_prstatus(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ uint64_t cr;
+ struct PPC64ElfPrstatus *prstatus;
+ struct PPC64UserRegStruct *reg;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PRSTATUS);
+
+ prstatus = &note->contents.prstatus;
+ memset(prstatus, 0, sizeof(*prstatus));
+ reg = &prstatus->pr_reg;
+
+ for (i = 0; i < 32; i++) {
+ reg->gpr[i] = cpu_to_dump64(s, cpu->env.gpr[i]);
+ }
+ reg->nip = cpu_to_dump64(s, cpu->env.nip);
+ reg->msr = cpu_to_dump64(s, cpu->env.msr);
+ reg->ctr = cpu_to_dump64(s, cpu->env.ctr);
+ reg->link = cpu_to_dump64(s, cpu->env.lr);
+ reg->xer = cpu_to_dump64(s, cpu_read_xer(&cpu->env));
+
+ cr = 0;
+ for (i = 0; i < 8; i++) {
+ cr |= (cpu->env.crf[i] & 15) << (4 * (7 - i));
+ }
+ reg->ccr = cpu_to_dump64(s, cr);
+}
+
+static void ppc64_write_elf64_fpregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ struct PPC64ElfFpregset *fpregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PRFPREG);
+
+ fpregset = &note->contents.fpregset;
+ memset(fpregset, 0, sizeof(*fpregset));
+
+ for (i = 0; i < 32; i++) {
+ fpregset->fpr[i] = cpu_to_dump64(s, cpu->env.fpr[i]);
+ }
+ fpregset->fpscr = cpu_to_dump64(s, cpu->env.fpscr);
+}
+
+static void ppc64_write_elf64_vmxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ struct PPC64ElfVmxregset *vmxregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PPC_VMX);
+ vmxregset = &note->contents.vmxregset;
+ memset(vmxregset, 0, sizeof(*vmxregset));
+
+ for (i = 0; i < 32; i++) {
+ bool needs_byteswap;
+
+#ifdef HOST_WORDS_BIGENDIAN
+ needs_byteswap = s->dump_info.d_endian == ELFDATA2LSB;
+#else
+ needs_byteswap = s->dump_info.d_endian == ELFDATA2MSB;
+#endif
+
+ if (needs_byteswap) {
+ vmxregset->avr[i].u64[0] = bswap64(cpu->env.avr[i].u64[1]);
+ vmxregset->avr[i].u64[1] = bswap64(cpu->env.avr[i].u64[0]);
+ } else {
+ vmxregset->avr[i].u64[0] = cpu->env.avr[i].u64[0];
+ vmxregset->avr[i].u64[1] = cpu->env.avr[i].u64[1];
+ }
+ }
+ vmxregset->vscr.u32[3] = cpu_to_dump32(s, cpu->env.vscr);
+}
+static void ppc64_write_elf64_vsxregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ int i;
+ struct PPC64ElfVsxregset *vsxregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PPC_VSX);
+ vsxregset = &note->contents.vsxregset;
+ memset(vsxregset, 0, sizeof(*vsxregset));
+
+ for (i = 0; i < 32; i++) {
+ vsxregset->vsr[i] = cpu_to_dump64(s, cpu->env.vsr[i]);
+ }
+}
+static void ppc64_write_elf64_speregset(NoteFuncArg *arg, PowerPCCPU *cpu)
+{
+ struct PPC64ElfSperegset *speregset;
+ Note *note = &arg->note;
+ DumpState *s = arg->state;
+
+ note->hdr.n_type = cpu_to_dump32(s, NT_PPC_SPE);
+ speregset = &note->contents.speregset;
+ memset(speregset, 0, sizeof(*speregset));
+
+ speregset->spe_acc = cpu_to_dump64(s, cpu->env.spe_acc);
+ speregset->spe_fscr = cpu_to_dump32(s, cpu->env.spe_fscr);
+}
+
+static const struct NoteFuncDescStruct {
+ int contents_size;
+ void (*note_contents_func)(NoteFuncArg *arg, PowerPCCPU *cpu);
+} note_func[] = {
+ {sizeof(((Note *)0)->contents.prstatus), ppc64_write_elf64_prstatus},
+ {sizeof(((Note *)0)->contents.fpregset), ppc64_write_elf64_fpregset},
+ {sizeof(((Note *)0)->contents.vmxregset), ppc64_write_elf64_vmxregset},
+ {sizeof(((Note *)0)->contents.vsxregset), ppc64_write_elf64_vsxregset},
+ {sizeof(((Note *)0)->contents.speregset), ppc64_write_elf64_speregset},
+ { 0, NULL}
+};
+
+typedef struct NoteFuncDescStruct NoteFuncDesc;
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const struct GuestPhysBlockList *guest_phys_blocks)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+
+ info->d_machine = EM_PPC64;
+ info->d_class = ELFCLASS64;
+ if ((*pcc->interrupts_big_endian)(cpu)) {
+ info->d_endian = ELFDATA2MSB;
+ } else {
+ info->d_endian = ELFDATA2LSB;
+ }
+ /* 64KB is the max page size for pseries kernel */
+ if (strncmp(object_get_typename(qdev_get_machine()),
+ "pseries-", 8) == 0) {
+ info->page_size = (1U << 16);
+ }
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ int name_size = 8; /* "CORE" or "QEMU" rounded */
+ size_t elf_note_size = 0;
+ int note_head_size;
+ const NoteFuncDesc *nf;
+
+ if (class != ELFCLASS64) {
+ return -1;
+ }
+ assert(machine == EM_PPC64);
+
+ note_head_size = sizeof(Elf64_Nhdr);
+
+ for (nf = note_func; nf->note_contents_func; nf++) {
+ elf_note_size = elf_note_size + note_head_size + name_size +
+ nf->contents_size;
+ }
+
+ return (elf_note_size) * nr_cpus;
+}
+
+static int ppc64_write_all_elf64_notes(const char *note_name,
+ WriteCoreDumpFunction f,
+ PowerPCCPU *cpu, int id,
+ void *opaque)
+{
+ NoteFuncArg arg = { .state = opaque };
+ int ret = -1;
+ int note_size;
+ const NoteFuncDesc *nf;
+
+ for (nf = note_func; nf->note_contents_func; nf++) {
+ arg.note.hdr.n_namesz = cpu_to_dump32(opaque, sizeof(arg.note.name));
+ arg.note.hdr.n_descsz = cpu_to_dump32(opaque, nf->contents_size);
+ strncpy(arg.note.name, note_name, sizeof(arg.note.name));
+
+ (*nf->note_contents_func)(&arg, cpu);
+
+ note_size =
+ sizeof(arg.note) - sizeof(arg.note.contents) + nf->contents_size;
+ ret = f(&arg.note, note_size, opaque);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ return ppc64_write_all_elf64_notes("CORE", f, cpu, cpuid, opaque);
+}
diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c
new file mode 100644
index 0000000000..506dee1ee8
--- /dev/null
+++ b/target/ppc/cpu-models.c
@@ -0,0 +1,1426 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* A lot of PowerPC definition have been included here.
+ * Most of them are not usable for now but have been kept
+ * inside "#if defined(TODO) ... #endif" statements to make tests easier.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "cpu-models.h"
+
+#if defined(CONFIG_USER_ONLY)
+#define TODO_USER_ONLY 1
+#endif
+
+/***************************************************************************/
+/* PowerPC CPU definitions */
+#define POWERPC_DEF_PREFIX(pvr, svr, type) \
+ glue(glue(glue(glue(pvr, _), svr), _), type)
+#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \
+ static void \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \
+ (ObjectClass *oc, void *data) \
+ { \
+ DeviceClass *dc = DEVICE_CLASS(oc); \
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \
+ \
+ pcc->pvr = _pvr; \
+ pcc->svr = _svr; \
+ dc->desc = _desc; \
+ } \
+ \
+ static const TypeInfo \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_type_info) = { \
+ .name = _name "-" TYPE_POWERPC_CPU, \
+ .parent = stringify(_type) "-family-" TYPE_POWERPC_CPU, \
+ .class_init = \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init), \
+ }; \
+ \
+ static void \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_register_types)(void) \
+ { \
+ type_register_static( \
+ &glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_type_info)); \
+ } \
+ \
+ type_init( \
+ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_register_types))
+
+#define POWERPC_DEF(_name, _pvr, _type, _desc) \
+ POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type)
+
+ /* Embedded PowerPC */
+ /* PowerPC 401 family */
+ POWERPC_DEF("401", CPU_POWERPC_401, 401,
+ "Generic PowerPC 401")
+ /* PowerPC 401 cores */
+ POWERPC_DEF("401A1", CPU_POWERPC_401A1, 401,
+ "PowerPC 401A1")
+ POWERPC_DEF("401B2", CPU_POWERPC_401B2, 401x2,
+ "PowerPC 401B2")
+#if defined(TODO)
+ POWERPC_DEF("401B3", CPU_POWERPC_401B3, 401x3,
+ "PowerPC 401B3")
+#endif
+ POWERPC_DEF("401C2", CPU_POWERPC_401C2, 401x2,
+ "PowerPC 401C2")
+ POWERPC_DEF("401D2", CPU_POWERPC_401D2, 401x2,
+ "PowerPC 401D2")
+ POWERPC_DEF("401E2", CPU_POWERPC_401E2, 401x2,
+ "PowerPC 401E2")
+ POWERPC_DEF("401F2", CPU_POWERPC_401F2, 401x2,
+ "PowerPC 401F2")
+ /* XXX: to be checked */
+ POWERPC_DEF("401G2", CPU_POWERPC_401G2, 401x2,
+ "PowerPC 401G2")
+ /* PowerPC 401 microcontrollers */
+#if defined(TODO)
+ POWERPC_DEF("401GF", CPU_POWERPC_401GF, 401,
+ "PowerPC 401GF")
+#endif
+ POWERPC_DEF("IOP480", CPU_POWERPC_IOP480, IOP480,
+ "IOP480 (401 microcontroller)")
+ POWERPC_DEF("Cobra", CPU_POWERPC_COBRA, 401,
+ "IBM Processor for Network Resources")
+#if defined(TODO)
+ POWERPC_DEF("Xipchip", CPU_POWERPC_XIPCHIP, 401,
+ NULL)
+#endif
+ /* PowerPC 403 family */
+ /* PowerPC 403 microcontrollers */
+ POWERPC_DEF("403GA", CPU_POWERPC_403GA, 403,
+ "PowerPC 403 GA")
+ POWERPC_DEF("403GB", CPU_POWERPC_403GB, 403,
+ "PowerPC 403 GB")
+ POWERPC_DEF("403GC", CPU_POWERPC_403GC, 403,
+ "PowerPC 403 GC")
+ POWERPC_DEF("403GCX", CPU_POWERPC_403GCX, 403GCX,
+ "PowerPC 403 GCX")
+#if defined(TODO)
+ POWERPC_DEF("403GP", CPU_POWERPC_403GP, 403,
+ "PowerPC 403 GP")
+#endif
+ /* PowerPC 405 family */
+ /* PowerPC 405 cores */
+#if defined(TODO)
+ POWERPC_DEF("405A3", CPU_POWERPC_405A3, 405,
+ "PowerPC 405 A3")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405A4", CPU_POWERPC_405A4, 405,
+ "PowerPC 405 A4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405B3", CPU_POWERPC_405B3, 405,
+ "PowerPC 405 B3")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405B4", CPU_POWERPC_405B4, 405,
+ "PowerPC 405 B4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405C3", CPU_POWERPC_405C3, 405,
+ "PowerPC 405 C3")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405C4", CPU_POWERPC_405C4, 405,
+ "PowerPC 405 C4")
+#endif
+ POWERPC_DEF("405D2", CPU_POWERPC_405D2, 405,
+ "PowerPC 405 D2")
+#if defined(TODO)
+ POWERPC_DEF("405D3", CPU_POWERPC_405D3, 405,
+ "PowerPC 405 D3")
+#endif
+ POWERPC_DEF("405D4", CPU_POWERPC_405D4, 405,
+ "PowerPC 405 D4")
+#if defined(TODO)
+ POWERPC_DEF("405D5", CPU_POWERPC_405D5, 405,
+ "PowerPC 405 D5")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405E4", CPU_POWERPC_405E4, 405,
+ "PowerPC 405 E4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405F4", CPU_POWERPC_405F4, 405,
+ "PowerPC 405 F4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405F5", CPU_POWERPC_405F5, 405,
+ "PowerPC 405 F5")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405F6", CPU_POWERPC_405F6, 405,
+ "PowerPC 405 F6")
+#endif
+ /* PowerPC 405 microcontrollers */
+ POWERPC_DEF("405CRa", CPU_POWERPC_405CRa, 405,
+ "PowerPC 405 CRa")
+ POWERPC_DEF("405CRb", CPU_POWERPC_405CRb, 405,
+ "PowerPC 405 CRb")
+ POWERPC_DEF("405CRc", CPU_POWERPC_405CRc, 405,
+ "PowerPC 405 CRc")
+ POWERPC_DEF("405EP", CPU_POWERPC_405EP, 405,
+ "PowerPC 405 EP")
+#if defined(TODO)
+ POWERPC_DEF("405EXr", CPU_POWERPC_405EXr, 405,
+ "PowerPC 405 EXr")
+#endif
+ POWERPC_DEF("405EZ", CPU_POWERPC_405EZ, 405,
+ "PowerPC 405 EZ")
+#if defined(TODO)
+ POWERPC_DEF("405FX", CPU_POWERPC_405FX, 405,
+ "PowerPC 405 FX")
+#endif
+ POWERPC_DEF("405GPa", CPU_POWERPC_405GPa, 405,
+ "PowerPC 405 GPa")
+ POWERPC_DEF("405GPb", CPU_POWERPC_405GPb, 405,
+ "PowerPC 405 GPb")
+ POWERPC_DEF("405GPc", CPU_POWERPC_405GPc, 405,
+ "PowerPC 405 GPc")
+ POWERPC_DEF("405GPd", CPU_POWERPC_405GPd, 405,
+ "PowerPC 405 GPd")
+ POWERPC_DEF("405GPR", CPU_POWERPC_405GPR, 405,
+ "PowerPC 405 GPR")
+#if defined(TODO)
+ POWERPC_DEF("405H", CPU_POWERPC_405H, 405,
+ "PowerPC 405 H")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405L", CPU_POWERPC_405L, 405,
+ "PowerPC 405 L")
+#endif
+ POWERPC_DEF("405LP", CPU_POWERPC_405LP, 405,
+ "PowerPC 405 LP")
+#if defined(TODO)
+ POWERPC_DEF("405PM", CPU_POWERPC_405PM, 405,
+ "PowerPC 405 PM")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405PS", CPU_POWERPC_405PS, 405,
+ "PowerPC 405 PS")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("405S", CPU_POWERPC_405S, 405,
+ "PowerPC 405 S")
+#endif
+ POWERPC_DEF("Npe405H", CPU_POWERPC_NPE405H, 405,
+ "Npe405 H")
+ POWERPC_DEF("Npe405H2", CPU_POWERPC_NPE405H2, 405,
+ "Npe405 H2")
+ POWERPC_DEF("Npe405L", CPU_POWERPC_NPE405L, 405,
+ "Npe405 L")
+ POWERPC_DEF("Npe4GS3", CPU_POWERPC_NPE4GS3, 405,
+ "Npe4GS3")
+#if defined(TODO)
+ POWERPC_DEF("Npcxx1", CPU_POWERPC_NPCxx1, 405,
+ NULL)
+#endif
+#if defined(TODO)
+ POWERPC_DEF("Npr161", CPU_POWERPC_NPR161, 405,
+ NULL)
+#endif
+#if defined(TODO)
+ POWERPC_DEF("LC77700", CPU_POWERPC_LC77700, 405,
+ "PowerPC LC77700 (Sanyo)")
+#endif
+ /* PowerPC 401/403/405 based set-top-box microcontrollers */
+#if defined(TODO)
+ POWERPC_DEF("STB01000", CPU_POWERPC_STB01000, 401x2,
+ "STB010000")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("STB01010", CPU_POWERPC_STB01010, 401x2,
+ "STB01010")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("STB0210", CPU_POWERPC_STB0210, 401x3,
+ "STB0210")
+#endif
+ POWERPC_DEF("STB03", CPU_POWERPC_STB03, 405,
+ "STB03xx")
+#if defined(TODO)
+ POWERPC_DEF("STB043", CPU_POWERPC_STB043, 405,
+ "STB043x")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("STB045", CPU_POWERPC_STB045, 405,
+ "STB045x")
+#endif
+ POWERPC_DEF("STB04", CPU_POWERPC_STB04, 405,
+ "STB04xx")
+ POWERPC_DEF("STB25", CPU_POWERPC_STB25, 405,
+ "STB25xx")
+#if defined(TODO)
+ POWERPC_DEF("STB130", CPU_POWERPC_STB130, 405,
+ "STB130")
+#endif
+ /* Xilinx PowerPC 405 cores */
+ POWERPC_DEF("x2vp4", CPU_POWERPC_X2VP4, 405,
+ NULL)
+ POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405,
+ NULL)
+#if defined(TODO)
+ POWERPC_DEF("zl10310", CPU_POWERPC_ZL10310, 405,
+ "Zarlink ZL10310")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("zl10311", CPU_POWERPC_ZL10311, 405,
+ "Zarlink ZL10311")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("zl10320", CPU_POWERPC_ZL10320, 405,
+ "Zarlink ZL10320")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("zl10321", CPU_POWERPC_ZL10321, 405,
+ "Zarlink ZL10321")
+#endif
+ /* PowerPC 440 family */
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440", CPU_POWERPC_440, 440GP,
+ "Generic PowerPC 440")
+#endif
+ /* PowerPC 440 cores */
+#if defined(TODO)
+ POWERPC_DEF("440A4", CPU_POWERPC_440A4, 440x4,
+ "PowerPC 440 A4")
+#endif
+ POWERPC_DEF("440-Xilinx", CPU_POWERPC_440_XILINX, 440x5,
+ "PowerPC 440 Xilinx 5")
+
+ POWERPC_DEF("440-Xilinx-w-dfpu", CPU_POWERPC_440_XILINX, 440x5wDFPU,
+ "PowerPC 440 Xilinx 5 With a Double Prec. FPU")
+#if defined(TODO)
+ POWERPC_DEF("440A5", CPU_POWERPC_440A5, 440x5,
+ "PowerPC 440 A5")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440B4", CPU_POWERPC_440B4, 440x4,
+ "PowerPC 440 B4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440G4", CPU_POWERPC_440G4, 440x4,
+ "PowerPC 440 G4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440F5", CPU_POWERPC_440F5, 440x5,
+ "PowerPC 440 F5")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440G5", CPU_POWERPC_440G5, 440x5,
+ "PowerPC 440 G5")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440H4", CPU_POWERPC_440H4, 440x4,
+ "PowerPC 440H4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440H6", CPU_POWERPC_440H6, 440Gx5,
+ "PowerPC 440H6")
+#endif
+ /* PowerPC 440 microcontrollers */
+ POWERPC_DEF("440EPa", CPU_POWERPC_440EPa, 440EP,
+ "PowerPC 440 EPa")
+ POWERPC_DEF("440EPb", CPU_POWERPC_440EPb, 440EP,
+ "PowerPC 440 EPb")
+ POWERPC_DEF("440EPX", CPU_POWERPC_440EPX, 440EP,
+ "PowerPC 440 EPX")
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GPb", CPU_POWERPC_440GPb, 440GP,
+ "PowerPC 440 GPb")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GPc", CPU_POWERPC_440GPc, 440GP,
+ "PowerPC 440 GPc")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GRa", CPU_POWERPC_440GRa, 440x5,
+ "PowerPC 440 GRa")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GRX", CPU_POWERPC_440GRX, 440x5,
+ "PowerPC 440 GRX")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GXa", CPU_POWERPC_440GXa, 440EP,
+ "PowerPC 440 GXa")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GXb", CPU_POWERPC_440GXb, 440EP,
+ "PowerPC 440 GXb")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GXc", CPU_POWERPC_440GXc, 440EP,
+ "PowerPC 440 GXc")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440GXf", CPU_POWERPC_440GXf, 440EP,
+ "PowerPC 440 GXf")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("440S", CPU_POWERPC_440S, 440,
+ "PowerPC 440 S")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440SP", CPU_POWERPC_440SP, 440EP,
+ "PowerPC 440 SP")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440SP2", CPU_POWERPC_440SP2, 440EP,
+ "PowerPC 440 SP2")
+#endif
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("440SPE", CPU_POWERPC_440SPE, 440EP,
+ "PowerPC 440 SPE")
+#endif
+ /* PowerPC 460 family */
+#if defined(TODO)
+ POWERPC_DEF("464", CPU_POWERPC_464, 460,
+ "Generic PowerPC 464")
+#endif
+ /* PowerPC 464 microcontrollers */
+#if defined(TODO)
+ POWERPC_DEF("464H90", CPU_POWERPC_464H90, 460,
+ "PowerPC 464H90")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("464H90F", CPU_POWERPC_464H90F, 460F,
+ "PowerPC 464H90F")
+#endif
+ /* Freescale embedded PowerPC cores */
+ /* MPC5xx family (aka RCPU) */
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("MPC5xx", CPU_POWERPC_MPC5xx, MPC5xx,
+ "Generic MPC5xx core")
+#endif
+ /* MPC8xx family (aka PowerQUICC) */
+#if defined(TODO_USER_ONLY)
+ POWERPC_DEF("MPC8xx", CPU_POWERPC_MPC8xx, MPC8xx,
+ "Generic MPC8xx core")
+#endif
+ /* MPC82xx family (aka PowerQUICC-II) */
+ POWERPC_DEF("G2", CPU_POWERPC_G2, G2,
+ "PowerPC G2 core")
+ POWERPC_DEF("G2H4", CPU_POWERPC_G2H4, G2,
+ "PowerPC G2 H4 core")
+ POWERPC_DEF("G2GP", CPU_POWERPC_G2gp, G2,
+ "PowerPC G2 GP core")
+ POWERPC_DEF("G2LS", CPU_POWERPC_G2ls, G2,
+ "PowerPC G2 LS core")
+ POWERPC_DEF("G2HiP3", CPU_POWERPC_G2_HIP3, G2,
+ "PowerPC G2 HiP3 core")
+ POWERPC_DEF("G2HiP4", CPU_POWERPC_G2_HIP4, G2,
+ "PowerPC G2 HiP4 core")
+ POWERPC_DEF("MPC603", CPU_POWERPC_MPC603, 603E,
+ "PowerPC MPC603 core")
+ POWERPC_DEF("G2le", CPU_POWERPC_G2LE, G2LE,
+ "PowerPC G2le core (same as G2 plus little-endian mode support)")
+ POWERPC_DEF("G2leGP", CPU_POWERPC_G2LEgp, G2LE,
+ "PowerPC G2LE GP core")
+ POWERPC_DEF("G2leLS", CPU_POWERPC_G2LEls, G2LE,
+ "PowerPC G2LE LS core")
+ POWERPC_DEF("G2leGP1", CPU_POWERPC_G2LEgp1, G2LE,
+ "PowerPC G2LE GP1 core")
+ POWERPC_DEF("G2leGP3", CPU_POWERPC_G2LEgp3, G2LE,
+ "PowerPC G2LE GP3 core")
+ /* PowerPC G2 microcontrollers */
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5121", "MPC5121",
+ CPU_POWERPC_MPC5121, POWERPC_SVR_5121, G2LE)
+#endif
+ POWERPC_DEF_SVR("MPC5200_v10", "MPC5200 v1.0",
+ CPU_POWERPC_MPC5200_v10, POWERPC_SVR_5200_v10, G2LE)
+ POWERPC_DEF_SVR("MPC5200_v11", "MPC5200 v1.1",
+ CPU_POWERPC_MPC5200_v11, POWERPC_SVR_5200_v11, G2LE)
+ POWERPC_DEF_SVR("MPC5200_v12", "MPC5200 v1.2",
+ CPU_POWERPC_MPC5200_v12, POWERPC_SVR_5200_v12, G2LE)
+ POWERPC_DEF_SVR("MPC5200B_v20", "MPC5200B v2.0",
+ CPU_POWERPC_MPC5200B_v20, POWERPC_SVR_5200B_v20, G2LE)
+ POWERPC_DEF_SVR("MPC5200B_v21", "MPC5200B v2.1",
+ CPU_POWERPC_MPC5200B_v21, POWERPC_SVR_5200B_v21, G2LE)
+ /* e200 family */
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC55xx", "Generic MPC55xx core",
+ CPU_POWERPC_MPC55xx, POWERPC_SVR_55xx, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF("e200z0", CPU_POWERPC_e200z0, e200,
+ "PowerPC e200z0 core")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("e200z1", CPU_POWERPC_e200z1, e200,
+ "PowerPC e200z1 core")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("e200z3", CPU_POWERPC_e200z3, e200,
+ "PowerPC e200z3 core")
+#endif
+ POWERPC_DEF("e200z5", CPU_POWERPC_e200z5, e200,
+ "PowerPC e200z5 core")
+ POWERPC_DEF("e200z6", CPU_POWERPC_e200z6, e200,
+ "PowerPC e200z6 core")
+ /* PowerPC e200 microcontrollers */
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5514E", "MPC5514E",
+ CPU_POWERPC_MPC5514E, POWERPC_SVR_5514E, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5514E_v0", "MPC5514E v0",
+ CPU_POWERPC_MPC5514E_v0, POWERPC_SVR_5514E_v0, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5514E_v1", "MPC5514E v1",
+ CPU_POWERPC_MPC5514E_v1, POWERPC_SVR_5514E_v1, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5514G", "MPC5514G",
+ CPU_POWERPC_MPC5514G, POWERPC_SVR_5514G, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5514G_v0", "MPC5514G v0",
+ CPU_POWERPC_MPC5514G_v0, POWERPC_SVR_5514G_v0, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5514G_v1", "MPC5514G v1",
+ CPU_POWERPC_MPC5514G_v1, POWERPC_SVR_5514G_v1, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5515S", "MPC5515S",
+ CPU_POWERPC_MPC5515S, POWERPC_SVR_5515S, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516E", "MPC5516E",
+ CPU_POWERPC_MPC5516E, POWERPC_SVR_5516E, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516E_v0", "MPC5516E v0",
+ CPU_POWERPC_MPC5516E_v0, POWERPC_SVR_5516E_v0, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516E_v1", "MPC5516E v1",
+ CPU_POWERPC_MPC5516E_v1, POWERPC_SVR_5516E_v1, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516G", "MPC5516G",
+ CPU_POWERPC_MPC5516G, POWERPC_SVR_5516G, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516G_v0", "MPC5516G v0",
+ CPU_POWERPC_MPC5516G_v0, POWERPC_SVR_5516G_v0, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516G_v1", "MPC5516G v1",
+ CPU_POWERPC_MPC5516G_v1, POWERPC_SVR_5516G_v1, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5516S", "MPC5516S",
+ CPU_POWERPC_MPC5516S, POWERPC_SVR_5516S, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5533", "MPC5533",
+ CPU_POWERPC_MPC5533, POWERPC_SVR_5533, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5534", "MPC5534",
+ CPU_POWERPC_MPC5534, POWERPC_SVR_5534, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5553", "MPC5553",
+ CPU_POWERPC_MPC5553, POWERPC_SVR_5553, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5554", "MPC5554",
+ CPU_POWERPC_MPC5554, POWERPC_SVR_5554, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5561", "MPC5561",
+ CPU_POWERPC_MPC5561, POWERPC_SVR_5561, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5565", "MPC5565",
+ CPU_POWERPC_MPC5565, POWERPC_SVR_5565, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5566", "MPC5566",
+ CPU_POWERPC_MPC5566, POWERPC_SVR_5566, e200)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC5567", "MPC5567",
+ CPU_POWERPC_MPC5567, POWERPC_SVR_5567, e200)
+#endif
+ /* e300 family */
+ POWERPC_DEF("e300c1", CPU_POWERPC_e300c1, e300,
+ "PowerPC e300c1 core")
+ POWERPC_DEF("e300c2", CPU_POWERPC_e300c2, e300,
+ "PowerPC e300c2 core")
+ POWERPC_DEF("e300c3", CPU_POWERPC_e300c3, e300,
+ "PowerPC e300c3 core")
+ POWERPC_DEF("e300c4", CPU_POWERPC_e300c4, e300,
+ "PowerPC e300c4 core")
+ /* PowerPC e300 microcontrollers */
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8313", "MPC8313",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8313, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8313E", "MPC8313E",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8313E, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8314", "MPC8314",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8314, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8314E", "MPC8314E",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8314E, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8315", "MPC8315",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8315, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8315E", "MPC8315E",
+ CPU_POWERPC_MPC831x, POWERPC_SVR_8315E, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8321", "MPC8321",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8321, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8321E", "MPC8321E",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8321E, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8323", "MPC8323",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8323, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8323E", "MPC8323E",
+ CPU_POWERPC_MPC832x, POWERPC_SVR_8323E, e300)
+#endif
+ POWERPC_DEF_SVR("MPC8343", "MPC8343",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343, e300)
+ POWERPC_DEF_SVR("MPC8343A", "MPC8343A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343A, e300)
+ POWERPC_DEF_SVR("MPC8343E", "MPC8343E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343E, e300)
+ POWERPC_DEF_SVR("MPC8343EA", "MPC8343EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8343EA, e300)
+ POWERPC_DEF_SVR("MPC8347T", "MPC8347T",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347T, e300)
+ POWERPC_DEF_SVR("MPC8347P", "MPC8347P",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347P, e300)
+ POWERPC_DEF_SVR("MPC8347AT", "MPC8347AT",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347AT, e300)
+ POWERPC_DEF_SVR("MPC8347AP", "MPC8347AP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347AP, e300)
+ POWERPC_DEF_SVR("MPC8347ET", "MPC8347ET",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347ET, e300)
+ POWERPC_DEF_SVR("MPC8347EP", "MPC8343EP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EP, e300)
+ POWERPC_DEF_SVR("MPC8347EAT", "MPC8347EAT",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAT, e300)
+ POWERPC_DEF_SVR("MPC8347EAP", "MPC8343EAP",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8347EAP, e300)
+ POWERPC_DEF_SVR("MPC8349", "MPC8349",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349, e300)
+ POWERPC_DEF_SVR("MPC8349A", "MPC8349A",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349A, e300)
+ POWERPC_DEF_SVR("MPC8349E", "MPC8349E",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349E, e300)
+ POWERPC_DEF_SVR("MPC8349EA", "MPC8349EA",
+ CPU_POWERPC_MPC834x, POWERPC_SVR_8349EA, e300)
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8358E", "MPC8358E",
+ CPU_POWERPC_MPC835x, POWERPC_SVR_8358E, e300)
+#endif
+#if defined(TODO)
+ POWERPC_DEF_SVR("MPC8360E", "MPC8360E",
+ CPU_POWERPC_MPC836x, POWERPC_SVR_8360E, e300)
+#endif
+ POWERPC_DEF_SVR("MPC8377", "MPC8377",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8377, e300)
+ POWERPC_DEF_SVR("MPC8377E", "MPC8377E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8377E, e300)
+ POWERPC_DEF_SVR("MPC8378", "MPC8378",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8378, e300)
+ POWERPC_DEF_SVR("MPC8378E", "MPC8378E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8378E, e300)
+ POWERPC_DEF_SVR("MPC8379", "MPC8379",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8379, e300)
+ POWERPC_DEF_SVR("MPC8379E", "MPC8379E",
+ CPU_POWERPC_MPC837x, POWERPC_SVR_8379E, e300)
+ /* e500 family */
+ POWERPC_DEF_SVR("e500_v10", "PowerPC e500 v1.0 core",
+ CPU_POWERPC_e500v1_v10, POWERPC_SVR_E500, e500v1);
+ POWERPC_DEF_SVR("e500_v20", "PowerPC e500 v2.0 core",
+ CPU_POWERPC_e500v1_v20, POWERPC_SVR_E500, e500v1);
+ POWERPC_DEF_SVR("e500v2_v10", "PowerPC e500v2 v1.0 core",
+ CPU_POWERPC_e500v2_v10, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v20", "PowerPC e500v2 v2.0 core",
+ CPU_POWERPC_e500v2_v20, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v21", "PowerPC e500v2 v2.1 core",
+ CPU_POWERPC_e500v2_v21, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v22", "PowerPC e500v2 v2.2 core",
+ CPU_POWERPC_e500v2_v22, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500v2_v30", "PowerPC e500v2 v3.0 core",
+ CPU_POWERPC_e500v2_v30, POWERPC_SVR_E500, e500v2);
+ POWERPC_DEF_SVR("e500mc", "e500mc",
+ CPU_POWERPC_e500mc, POWERPC_SVR_E500, e500mc)
+#ifdef TARGET_PPC64
+ POWERPC_DEF_SVR("e5500", "e5500",
+ CPU_POWERPC_e5500, POWERPC_SVR_E500, e5500)
+#endif
+ /* PowerPC e500 microcontrollers */
+ POWERPC_DEF_SVR("MPC8533_v10", "MPC8533 v1.0",
+ CPU_POWERPC_MPC8533_v10, POWERPC_SVR_8533_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8533_v11", "MPC8533 v1.1",
+ CPU_POWERPC_MPC8533_v11, POWERPC_SVR_8533_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8533E_v10", "MPC8533E v1.0",
+ CPU_POWERPC_MPC8533E_v10, POWERPC_SVR_8533E_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8533E_v11", "MPC8533E v1.1",
+ CPU_POWERPC_MPC8533E_v11, POWERPC_SVR_8533E_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8540_v10", "MPC8540 v1.0",
+ CPU_POWERPC_MPC8540_v10, POWERPC_SVR_8540_v10, e500v1)
+ POWERPC_DEF_SVR("MPC8540_v20", "MPC8540 v2.0",
+ CPU_POWERPC_MPC8540_v20, POWERPC_SVR_8540_v20, e500v1)
+ POWERPC_DEF_SVR("MPC8540_v21", "MPC8540 v2.1",
+ CPU_POWERPC_MPC8540_v21, POWERPC_SVR_8540_v21, e500v1)
+ POWERPC_DEF_SVR("MPC8541_v10", "MPC8541 v1.0",
+ CPU_POWERPC_MPC8541_v10, POWERPC_SVR_8541_v10, e500v1)
+ POWERPC_DEF_SVR("MPC8541_v11", "MPC8541 v1.1",
+ CPU_POWERPC_MPC8541_v11, POWERPC_SVR_8541_v11, e500v1)
+ POWERPC_DEF_SVR("MPC8541E_v10", "MPC8541E v1.0",
+ CPU_POWERPC_MPC8541E_v10, POWERPC_SVR_8541E_v10, e500v1)
+ POWERPC_DEF_SVR("MPC8541E_v11", "MPC8541E v1.1",
+ CPU_POWERPC_MPC8541E_v11, POWERPC_SVR_8541E_v11, e500v1)
+ POWERPC_DEF_SVR("MPC8543_v10", "MPC8543 v1.0",
+ CPU_POWERPC_MPC8543_v10, POWERPC_SVR_8543_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8543_v11", "MPC8543 v1.1",
+ CPU_POWERPC_MPC8543_v11, POWERPC_SVR_8543_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8543_v20", "MPC8543 v2.0",
+ CPU_POWERPC_MPC8543_v20, POWERPC_SVR_8543_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8543_v21", "MPC8543 v2.1",
+ CPU_POWERPC_MPC8543_v21, POWERPC_SVR_8543_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8543E_v10", "MPC8543E v1.0",
+ CPU_POWERPC_MPC8543E_v10, POWERPC_SVR_8543E_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8543E_v11", "MPC8543E v1.1",
+ CPU_POWERPC_MPC8543E_v11, POWERPC_SVR_8543E_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8543E_v20", "MPC8543E v2.0",
+ CPU_POWERPC_MPC8543E_v20, POWERPC_SVR_8543E_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8543E_v21", "MPC8543E v2.1",
+ CPU_POWERPC_MPC8543E_v21, POWERPC_SVR_8543E_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8544_v10", "MPC8544 v1.0",
+ CPU_POWERPC_MPC8544_v10, POWERPC_SVR_8544_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8544_v11", "MPC8544 v1.1",
+ CPU_POWERPC_MPC8544_v11, POWERPC_SVR_8544_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8544E_v10", "MPC8544E v1.0",
+ CPU_POWERPC_MPC8544E_v10, POWERPC_SVR_8544E_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8544E_v11", "MPC8544E v1.1",
+ CPU_POWERPC_MPC8544E_v11, POWERPC_SVR_8544E_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8545_v20", "MPC8545 v2.0",
+ CPU_POWERPC_MPC8545_v20, POWERPC_SVR_8545_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8545_v21", "MPC8545 v2.1",
+ CPU_POWERPC_MPC8545_v21, POWERPC_SVR_8545_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8545E_v20", "MPC8545E v2.0",
+ CPU_POWERPC_MPC8545E_v20, POWERPC_SVR_8545E_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8545E_v21", "MPC8545E v2.1",
+ CPU_POWERPC_MPC8545E_v21, POWERPC_SVR_8545E_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8547E_v20", "MPC8547E v2.0",
+ CPU_POWERPC_MPC8547E_v20, POWERPC_SVR_8547E_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8547E_v21", "MPC8547E v2.1",
+ CPU_POWERPC_MPC8547E_v21, POWERPC_SVR_8547E_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8548_v10", "MPC8548 v1.0",
+ CPU_POWERPC_MPC8548_v10, POWERPC_SVR_8548_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8548_v11", "MPC8548 v1.1",
+ CPU_POWERPC_MPC8548_v11, POWERPC_SVR_8548_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8548_v20", "MPC8548 v2.0",
+ CPU_POWERPC_MPC8548_v20, POWERPC_SVR_8548_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8548_v21", "MPC8548 v2.1",
+ CPU_POWERPC_MPC8548_v21, POWERPC_SVR_8548_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8548E_v10", "MPC8548E v1.0",
+ CPU_POWERPC_MPC8548E_v10, POWERPC_SVR_8548E_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8548E_v11", "MPC8548E v1.1",
+ CPU_POWERPC_MPC8548E_v11, POWERPC_SVR_8548E_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8548E_v20", "MPC8548E v2.0",
+ CPU_POWERPC_MPC8548E_v20, POWERPC_SVR_8548E_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8548E_v21", "MPC8548E v2.1",
+ CPU_POWERPC_MPC8548E_v21, POWERPC_SVR_8548E_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8555_v10", "MPC8555 v1.0",
+ CPU_POWERPC_MPC8555_v10, POWERPC_SVR_8555_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8555_v11", "MPC8555 v1.1",
+ CPU_POWERPC_MPC8555_v11, POWERPC_SVR_8555_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8555E_v10", "MPC8555E v1.0",
+ CPU_POWERPC_MPC8555E_v10, POWERPC_SVR_8555E_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8555E_v11", "MPC8555E v1.1",
+ CPU_POWERPC_MPC8555E_v11, POWERPC_SVR_8555E_v11, e500v2)
+ POWERPC_DEF_SVR("MPC8560_v10", "MPC8560 v1.0",
+ CPU_POWERPC_MPC8560_v10, POWERPC_SVR_8560_v10, e500v2)
+ POWERPC_DEF_SVR("MPC8560_v20", "MPC8560 v2.0",
+ CPU_POWERPC_MPC8560_v20, POWERPC_SVR_8560_v20, e500v2)
+ POWERPC_DEF_SVR("MPC8560_v21", "MPC8560 v2.1",
+ CPU_POWERPC_MPC8560_v21, POWERPC_SVR_8560_v21, e500v2)
+ POWERPC_DEF_SVR("MPC8567", "MPC8567",
+ CPU_POWERPC_MPC8567, POWERPC_SVR_8567, e500v2)
+ POWERPC_DEF_SVR("MPC8567E", "MPC8567E",
+ CPU_POWERPC_MPC8567E, POWERPC_SVR_8567E, e500v2)
+ POWERPC_DEF_SVR("MPC8568", "MPC8568",
+ CPU_POWERPC_MPC8568, POWERPC_SVR_8568, e500v2)
+ POWERPC_DEF_SVR("MPC8568E", "MPC8568E",
+ CPU_POWERPC_MPC8568E, POWERPC_SVR_8568E, e500v2)
+ POWERPC_DEF_SVR("MPC8572", "MPC8572",
+ CPU_POWERPC_MPC8572, POWERPC_SVR_8572, e500v2)
+ POWERPC_DEF_SVR("MPC8572E", "MPC8572E",
+ CPU_POWERPC_MPC8572E, POWERPC_SVR_8572E, e500v2)
+ /* e600 family */
+ POWERPC_DEF("e600", CPU_POWERPC_e600, e600,
+ "PowerPC e600 core")
+ /* PowerPC e600 microcontrollers */
+ POWERPC_DEF_SVR("MPC8610", "MPC8610",
+ CPU_POWERPC_MPC8610, POWERPC_SVR_8610, e600)
+ POWERPC_DEF_SVR("MPC8641", "MPC8641",
+ CPU_POWERPC_MPC8641, POWERPC_SVR_8641, e600)
+ POWERPC_DEF_SVR("MPC8641D", "MPC8641D",
+ CPU_POWERPC_MPC8641D, POWERPC_SVR_8641D, e600)
+ /* 32 bits "classic" PowerPC */
+ /* PowerPC 6xx family */
+ POWERPC_DEF("601_v0", CPU_POWERPC_601_v0, 601,
+ "PowerPC 601v0")
+ POWERPC_DEF("601_v1", CPU_POWERPC_601_v1, 601,
+ "PowerPC 601v1")
+ POWERPC_DEF("601_v2", CPU_POWERPC_601_v2, 601v,
+ "PowerPC 601v2")
+ POWERPC_DEF("602", CPU_POWERPC_602, 602,
+ "PowerPC 602")
+ POWERPC_DEF("603", CPU_POWERPC_603, 603,
+ "PowerPC 603")
+ POWERPC_DEF("603e_v1.1", CPU_POWERPC_603E_v11, 603E,
+ "PowerPC 603e v1.1")
+ POWERPC_DEF("603e_v1.2", CPU_POWERPC_603E_v12, 603E,
+ "PowerPC 603e v1.2")
+ POWERPC_DEF("603e_v1.3", CPU_POWERPC_603E_v13, 603E,
+ "PowerPC 603e v1.3")
+ POWERPC_DEF("603e_v1.4", CPU_POWERPC_603E_v14, 603E,
+ "PowerPC 603e v1.4")
+ POWERPC_DEF("603e_v2.2", CPU_POWERPC_603E_v22, 603E,
+ "PowerPC 603e v2.2")
+ POWERPC_DEF("603e_v3", CPU_POWERPC_603E_v3, 603E,
+ "PowerPC 603e v3")
+ POWERPC_DEF("603e_v4", CPU_POWERPC_603E_v4, 603E,
+ "PowerPC 603e v4")
+ POWERPC_DEF("603e_v4.1", CPU_POWERPC_603E_v41, 603E,
+ "PowerPC 603e v4.1")
+ POWERPC_DEF("603e7", CPU_POWERPC_603E7, 603E,
+ "PowerPC 603e (aka PID7)")
+ POWERPC_DEF("603e7t", CPU_POWERPC_603E7t, 603E,
+ "PowerPC 603e7t")
+ POWERPC_DEF("603e7v", CPU_POWERPC_603E7v, 603E,
+ "PowerPC 603e7v")
+ POWERPC_DEF("603e7v1", CPU_POWERPC_603E7v1, 603E,
+ "PowerPC 603e7v1")
+ POWERPC_DEF("603e7v2", CPU_POWERPC_603E7v2, 603E,
+ "PowerPC 603e7v2")
+ POWERPC_DEF("603p", CPU_POWERPC_603P, 603E,
+ "PowerPC 603p (aka PID7v)")
+ POWERPC_DEF("604", CPU_POWERPC_604, 604,
+ "PowerPC 604")
+ POWERPC_DEF("604e_v1.0", CPU_POWERPC_604E_v10, 604E,
+ "PowerPC 604e v1.0")
+ POWERPC_DEF("604e_v2.2", CPU_POWERPC_604E_v22, 604E,
+ "PowerPC 604e v2.2")
+ POWERPC_DEF("604e_v2.4", CPU_POWERPC_604E_v24, 604E,
+ "PowerPC 604e v2.4")
+ POWERPC_DEF("604r", CPU_POWERPC_604R, 604E,
+ "PowerPC 604r (aka PIDA)")
+#if defined(TODO)
+ POWERPC_DEF("604ev", CPU_POWERPC_604EV, 604E,
+ "PowerPC 604ev")
+#endif
+ /* PowerPC 7xx family */
+ POWERPC_DEF("740_v1.0", CPU_POWERPC_7x0_v10, 740,
+ "PowerPC 740 v1.0 (G3)")
+ POWERPC_DEF("750_v1.0", CPU_POWERPC_7x0_v10, 750,
+ "PowerPC 750 v1.0 (G3)")
+ POWERPC_DEF("740_v2.0", CPU_POWERPC_7x0_v20, 740,
+ "PowerPC 740 v2.0 (G3)")
+ POWERPC_DEF("750_v2.0", CPU_POWERPC_7x0_v20, 750,
+ "PowerPC 750 v2.0 (G3)")
+ POWERPC_DEF("740_v2.1", CPU_POWERPC_7x0_v21, 740,
+ "PowerPC 740 v2.1 (G3)")
+ POWERPC_DEF("750_v2.1", CPU_POWERPC_7x0_v21, 750,
+ "PowerPC 750 v2.1 (G3)")
+ POWERPC_DEF("740_v2.2", CPU_POWERPC_7x0_v22, 740,
+ "PowerPC 740 v2.2 (G3)")
+ POWERPC_DEF("750_v2.2", CPU_POWERPC_7x0_v22, 750,
+ "PowerPC 750 v2.2 (G3)")
+ POWERPC_DEF("740_v3.0", CPU_POWERPC_7x0_v30, 740,
+ "PowerPC 740 v3.0 (G3)")
+ POWERPC_DEF("750_v3.0", CPU_POWERPC_7x0_v30, 750,
+ "PowerPC 750 v3.0 (G3)")
+ POWERPC_DEF("740_v3.1", CPU_POWERPC_7x0_v31, 740,
+ "PowerPC 740 v3.1 (G3)")
+ POWERPC_DEF("750_v3.1", CPU_POWERPC_7x0_v31, 750,
+ "PowerPC 750 v3.1 (G3)")
+ POWERPC_DEF("740e", CPU_POWERPC_740E, 740,
+ "PowerPC 740E (G3)")
+ POWERPC_DEF("750e", CPU_POWERPC_750E, 750,
+ "PowerPC 750E (G3)")
+ POWERPC_DEF("740p", CPU_POWERPC_7x0P, 740,
+ "PowerPC 740P (G3)")
+ POWERPC_DEF("750p", CPU_POWERPC_7x0P, 750,
+ "PowerPC 750P (G3)")
+ POWERPC_DEF("750cl_v1.0", CPU_POWERPC_750CL_v10, 750cl,
+ "PowerPC 750CL v1.0")
+ POWERPC_DEF("750cl_v2.0", CPU_POWERPC_750CL_v20, 750cl,
+ "PowerPC 750CL v2.0")
+ POWERPC_DEF("750cx_v1.0", CPU_POWERPC_750CX_v10, 750cx,
+ "PowerPC 750CX v1.0 (G3 embedded)")
+ POWERPC_DEF("750cx_v2.0", CPU_POWERPC_750CX_v20, 750cx,
+ "PowerPC 750CX v2.1 (G3 embedded)")
+ POWERPC_DEF("750cx_v2.1", CPU_POWERPC_750CX_v21, 750cx,
+ "PowerPC 750CX v2.1 (G3 embedded)")
+ POWERPC_DEF("750cx_v2.2", CPU_POWERPC_750CX_v22, 750cx,
+ "PowerPC 750CX v2.2 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.1", CPU_POWERPC_750CXE_v21, 750cx,
+ "PowerPC 750CXe v2.1 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.2", CPU_POWERPC_750CXE_v22, 750cx,
+ "PowerPC 750CXe v2.2 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.3", CPU_POWERPC_750CXE_v23, 750cx,
+ "PowerPC 750CXe v2.3 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.4", CPU_POWERPC_750CXE_v24, 750cx,
+ "PowerPC 750CXe v2.4 (G3 embedded)")
+ POWERPC_DEF("750cxe_v2.4b", CPU_POWERPC_750CXE_v24b, 750cx,
+ "PowerPC 750CXe v2.4b (G3 embedded)")
+ POWERPC_DEF("750cxe_v3.0", CPU_POWERPC_750CXE_v30, 750cx,
+ "PowerPC 750CXe v3.0 (G3 embedded)")
+ POWERPC_DEF("750cxe_v3.1", CPU_POWERPC_750CXE_v31, 750cx,
+ "PowerPC 750CXe v3.1 (G3 embedded)")
+ POWERPC_DEF("750cxe_v3.1b", CPU_POWERPC_750CXE_v31b, 750cx,
+ "PowerPC 750CXe v3.1b (G3 embedded)")
+ POWERPC_DEF("750cxr", CPU_POWERPC_750CXR, 750cx,
+ "PowerPC 750CXr (G3 embedded)")
+ POWERPC_DEF("750fl", CPU_POWERPC_750FL, 750fx,
+ "PowerPC 750FL (G3 embedded)")
+ POWERPC_DEF("750fx_v1.0", CPU_POWERPC_750FX_v10, 750fx,
+ "PowerPC 750FX v1.0 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.0", CPU_POWERPC_750FX_v20, 750fx,
+ "PowerPC 750FX v2.0 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.1", CPU_POWERPC_750FX_v21, 750fx,
+ "PowerPC 750FX v2.1 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.2", CPU_POWERPC_750FX_v22, 750fx,
+ "PowerPC 750FX v2.2 (G3 embedded)")
+ POWERPC_DEF("750fx_v2.3", CPU_POWERPC_750FX_v23, 750fx,
+ "PowerPC 750FX v2.3 (G3 embedded)")
+ POWERPC_DEF("750gl", CPU_POWERPC_750GL, 750gx,
+ "PowerPC 750GL (G3 embedded)")
+ POWERPC_DEF("750gx_v1.0", CPU_POWERPC_750GX_v10, 750gx,
+ "PowerPC 750GX v1.0 (G3 embedded)")
+ POWERPC_DEF("750gx_v1.1", CPU_POWERPC_750GX_v11, 750gx,
+ "PowerPC 750GX v1.1 (G3 embedded)")
+ POWERPC_DEF("750gx_v1.2", CPU_POWERPC_750GX_v12, 750gx,
+ "PowerPC 750GX v1.2 (G3 embedded)")
+ POWERPC_DEF("750l_v2.0", CPU_POWERPC_750L_v20, 750,
+ "PowerPC 750L v2.0 (G3 embedded)")
+ POWERPC_DEF("750l_v2.1", CPU_POWERPC_750L_v21, 750,
+ "PowerPC 750L v2.1 (G3 embedded)")
+ POWERPC_DEF("750l_v2.2", CPU_POWERPC_750L_v22, 750,
+ "PowerPC 750L v2.2 (G3 embedded)")
+ POWERPC_DEF("750l_v3.0", CPU_POWERPC_750L_v30, 750,
+ "PowerPC 750L v3.0 (G3 embedded)")
+ POWERPC_DEF("750l_v3.2", CPU_POWERPC_750L_v32, 750,
+ "PowerPC 750L v3.2 (G3 embedded)")
+ POWERPC_DEF("745_v1.0", CPU_POWERPC_7x5_v10, 745,
+ "PowerPC 745 v1.0")
+ POWERPC_DEF("755_v1.0", CPU_POWERPC_7x5_v10, 755,
+ "PowerPC 755 v1.0")
+ POWERPC_DEF("745_v1.1", CPU_POWERPC_7x5_v11, 745,
+ "PowerPC 745 v1.1")
+ POWERPC_DEF("755_v1.1", CPU_POWERPC_7x5_v11, 755,
+ "PowerPC 755 v1.1")
+ POWERPC_DEF("745_v2.0", CPU_POWERPC_7x5_v20, 745,
+ "PowerPC 745 v2.0")
+ POWERPC_DEF("755_v2.0", CPU_POWERPC_7x5_v20, 755,
+ "PowerPC 755 v2.0")
+ POWERPC_DEF("745_v2.1", CPU_POWERPC_7x5_v21, 745,
+ "PowerPC 745 v2.1")
+ POWERPC_DEF("755_v2.1", CPU_POWERPC_7x5_v21, 755,
+ "PowerPC 755 v2.1")
+ POWERPC_DEF("745_v2.2", CPU_POWERPC_7x5_v22, 745,
+ "PowerPC 745 v2.2")
+ POWERPC_DEF("755_v2.2", CPU_POWERPC_7x5_v22, 755,
+ "PowerPC 755 v2.2")
+ POWERPC_DEF("745_v2.3", CPU_POWERPC_7x5_v23, 745,
+ "PowerPC 745 v2.3")
+ POWERPC_DEF("755_v2.3", CPU_POWERPC_7x5_v23, 755,
+ "PowerPC 755 v2.3")
+ POWERPC_DEF("745_v2.4", CPU_POWERPC_7x5_v24, 745,
+ "PowerPC 745 v2.4")
+ POWERPC_DEF("755_v2.4", CPU_POWERPC_7x5_v24, 755,
+ "PowerPC 755 v2.4")
+ POWERPC_DEF("745_v2.5", CPU_POWERPC_7x5_v25, 745,
+ "PowerPC 745 v2.5")
+ POWERPC_DEF("755_v2.5", CPU_POWERPC_7x5_v25, 755,
+ "PowerPC 755 v2.5")
+ POWERPC_DEF("745_v2.6", CPU_POWERPC_7x5_v26, 745,
+ "PowerPC 745 v2.6")
+ POWERPC_DEF("755_v2.6", CPU_POWERPC_7x5_v26, 755,
+ "PowerPC 755 v2.6")
+ POWERPC_DEF("745_v2.7", CPU_POWERPC_7x5_v27, 745,
+ "PowerPC 745 v2.7")
+ POWERPC_DEF("755_v2.7", CPU_POWERPC_7x5_v27, 755,
+ "PowerPC 755 v2.7")
+ POWERPC_DEF("745_v2.8", CPU_POWERPC_7x5_v28, 745,
+ "PowerPC 745 v2.8")
+ POWERPC_DEF("755_v2.8", CPU_POWERPC_7x5_v28, 755,
+ "PowerPC 755 v2.8")
+#if defined(TODO)
+ POWERPC_DEF("745p", CPU_POWERPC_7x5P, 745,
+ "PowerPC 745P (G3)")
+ POWERPC_DEF("755p", CPU_POWERPC_7x5P, 755,
+ "PowerPC 755P (G3)")
+#endif
+ /* PowerPC 74xx family */
+ POWERPC_DEF("7400_v1.0", CPU_POWERPC_7400_v10, 7400,
+ "PowerPC 7400 v1.0 (G4)")
+ POWERPC_DEF("7400_v1.1", CPU_POWERPC_7400_v11, 7400,
+ "PowerPC 7400 v1.1 (G4)")
+ POWERPC_DEF("7400_v2.0", CPU_POWERPC_7400_v20, 7400,
+ "PowerPC 7400 v2.0 (G4)")
+ POWERPC_DEF("7400_v2.1", CPU_POWERPC_7400_v21, 7400,
+ "PowerPC 7400 v2.1 (G4)")
+ POWERPC_DEF("7400_v2.2", CPU_POWERPC_7400_v22, 7400,
+ "PowerPC 7400 v2.2 (G4)")
+ POWERPC_DEF("7400_v2.6", CPU_POWERPC_7400_v26, 7400,
+ "PowerPC 7400 v2.6 (G4)")
+ POWERPC_DEF("7400_v2.7", CPU_POWERPC_7400_v27, 7400,
+ "PowerPC 7400 v2.7 (G4)")
+ POWERPC_DEF("7400_v2.8", CPU_POWERPC_7400_v28, 7400,
+ "PowerPC 7400 v2.8 (G4)")
+ POWERPC_DEF("7400_v2.9", CPU_POWERPC_7400_v29, 7400,
+ "PowerPC 7400 v2.9 (G4)")
+ POWERPC_DEF("7410_v1.0", CPU_POWERPC_7410_v10, 7410,
+ "PowerPC 7410 v1.0 (G4)")
+ POWERPC_DEF("7410_v1.1", CPU_POWERPC_7410_v11, 7410,
+ "PowerPC 7410 v1.1 (G4)")
+ POWERPC_DEF("7410_v1.2", CPU_POWERPC_7410_v12, 7410,
+ "PowerPC 7410 v1.2 (G4)")
+ POWERPC_DEF("7410_v1.3", CPU_POWERPC_7410_v13, 7410,
+ "PowerPC 7410 v1.3 (G4)")
+ POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410,
+ "PowerPC 7410 v1.4 (G4)")
+ POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400,
+ "PowerPC 7448 v1.0 (G4)")
+ POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400,
+ "PowerPC 7448 v1.1 (G4)")
+ POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400,
+ "PowerPC 7448 v2.0 (G4)")
+ POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400,
+ "PowerPC 7448 v2.1 (G4)")
+ POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450,
+ "PowerPC 7450 v1.0 (G4)")
+ POWERPC_DEF("7450_v1.1", CPU_POWERPC_7450_v11, 7450,
+ "PowerPC 7450 v1.1 (G4)")
+ POWERPC_DEF("7450_v1.2", CPU_POWERPC_7450_v12, 7450,
+ "PowerPC 7450 v1.2 (G4)")
+ POWERPC_DEF("7450_v2.0", CPU_POWERPC_7450_v20, 7450,
+ "PowerPC 7450 v2.0 (G4)")
+ POWERPC_DEF("7450_v2.1", CPU_POWERPC_7450_v21, 7450,
+ "PowerPC 7450 v2.1 (G4)")
+ POWERPC_DEF("7441_v2.1", CPU_POWERPC_7450_v21, 7440,
+ "PowerPC 7441 v2.1 (G4)")
+ POWERPC_DEF("7441_v2.3", CPU_POWERPC_74x1_v23, 7440,
+ "PowerPC 7441 v2.3 (G4)")
+ POWERPC_DEF("7451_v2.3", CPU_POWERPC_74x1_v23, 7450,
+ "PowerPC 7451 v2.3 (G4)")
+ POWERPC_DEF("7441_v2.10", CPU_POWERPC_74x1_v210, 7440,
+ "PowerPC 7441 v2.10 (G4)")
+ POWERPC_DEF("7451_v2.10", CPU_POWERPC_74x1_v210, 7450,
+ "PowerPC 7451 v2.10 (G4)")
+ POWERPC_DEF("7445_v1.0", CPU_POWERPC_74x5_v10, 7445,
+ "PowerPC 7445 v1.0 (G4)")
+ POWERPC_DEF("7455_v1.0", CPU_POWERPC_74x5_v10, 7455,
+ "PowerPC 7455 v1.0 (G4)")
+ POWERPC_DEF("7445_v2.1", CPU_POWERPC_74x5_v21, 7445,
+ "PowerPC 7445 v2.1 (G4)")
+ POWERPC_DEF("7455_v2.1", CPU_POWERPC_74x5_v21, 7455,
+ "PowerPC 7455 v2.1 (G4)")
+ POWERPC_DEF("7445_v3.2", CPU_POWERPC_74x5_v32, 7445,
+ "PowerPC 7445 v3.2 (G4)")
+ POWERPC_DEF("7455_v3.2", CPU_POWERPC_74x5_v32, 7455,
+ "PowerPC 7455 v3.2 (G4)")
+ POWERPC_DEF("7445_v3.3", CPU_POWERPC_74x5_v33, 7445,
+ "PowerPC 7445 v3.3 (G4)")
+ POWERPC_DEF("7455_v3.3", CPU_POWERPC_74x5_v33, 7455,
+ "PowerPC 7455 v3.3 (G4)")
+ POWERPC_DEF("7445_v3.4", CPU_POWERPC_74x5_v34, 7445,
+ "PowerPC 7445 v3.4 (G4)")
+ POWERPC_DEF("7455_v3.4", CPU_POWERPC_74x5_v34, 7455,
+ "PowerPC 7455 v3.4 (G4)")
+ POWERPC_DEF("7447_v1.0", CPU_POWERPC_74x7_v10, 7445,
+ "PowerPC 7447 v1.0 (G4)")
+ POWERPC_DEF("7457_v1.0", CPU_POWERPC_74x7_v10, 7455,
+ "PowerPC 7457 v1.0 (G4)")
+ POWERPC_DEF("7447_v1.1", CPU_POWERPC_74x7_v11, 7445,
+ "PowerPC 7447 v1.1 (G4)")
+ POWERPC_DEF("7457_v1.1", CPU_POWERPC_74x7_v11, 7455,
+ "PowerPC 7457 v1.1 (G4)")
+ POWERPC_DEF("7457_v1.2", CPU_POWERPC_74x7_v12, 7455,
+ "PowerPC 7457 v1.2 (G4)")
+ POWERPC_DEF("7447A_v1.0", CPU_POWERPC_74x7A_v10, 7445,
+ "PowerPC 7447A v1.0 (G4)")
+ POWERPC_DEF("7457A_v1.0", CPU_POWERPC_74x7A_v10, 7455,
+ "PowerPC 7457A v1.0 (G4)")
+ POWERPC_DEF("7447A_v1.1", CPU_POWERPC_74x7A_v11, 7445,
+ "PowerPC 7447A v1.1 (G4)")
+ POWERPC_DEF("7457A_v1.1", CPU_POWERPC_74x7A_v11, 7455,
+ "PowerPC 7457A v1.1 (G4)")
+ POWERPC_DEF("7447A_v1.2", CPU_POWERPC_74x7A_v12, 7445,
+ "PowerPC 7447A v1.2 (G4)")
+ POWERPC_DEF("7457A_v1.2", CPU_POWERPC_74x7A_v12, 7455,
+ "PowerPC 7457A v1.2 (G4)")
+ /* 64 bits PowerPC */
+#if defined (TARGET_PPC64)
+#if defined(TODO)
+ POWERPC_DEF("620", CPU_POWERPC_620, 620,
+ "PowerPC 620")
+ POWERPC_DEF("630", CPU_POWERPC_630, 630,
+ "PowerPC 630 (POWER3)")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("631", CPU_POWERPC_631, 631,
+ "PowerPC 631 (Power 3+)")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("POWER4", CPU_POWERPC_POWER4, POWER4,
+ "POWER4")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("POWER4+", CPU_POWERPC_POWER4P, POWER4P,
+ "POWER4p")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("POWER5", CPU_POWERPC_POWER5, POWER5,
+ "POWER5")
+#endif
+ POWERPC_DEF("POWER5+_v2.1", CPU_POWERPC_POWER5P_v21, POWER5P,
+ "POWER5+ v2.1")
+#if defined(TODO)
+ POWERPC_DEF("POWER6", CPU_POWERPC_POWER6, POWER6,
+ "POWER6")
+#endif
+ POWERPC_DEF("POWER7_v2.3", CPU_POWERPC_POWER7_v23, POWER7,
+ "POWER7 v2.3")
+ POWERPC_DEF("POWER7+_v2.1", CPU_POWERPC_POWER7P_v21, POWER7,
+ "POWER7+ v2.1")
+ POWERPC_DEF("POWER8E_v2.1", CPU_POWERPC_POWER8E_v21, POWER8,
+ "POWER8E v2.1")
+ POWERPC_DEF("POWER8_v2.0", CPU_POWERPC_POWER8_v20, POWER8,
+ "POWER8 v2.0")
+ POWERPC_DEF("POWER8NVL_v1.0",CPU_POWERPC_POWER8NVL_v10, POWER8,
+ "POWER8NVL v1.0")
+ POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970,
+ "PowerPC 970 v2.2")
+
+ POWERPC_DEF("POWER9_v1.0", CPU_POWERPC_POWER9_BASE, POWER9,
+ "POWER9 v1.0")
+
+ POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
+ "PowerPC 970FX v1.0 (G5)")
+ POWERPC_DEF("970fx_v2.0", CPU_POWERPC_970FX_v20, 970,
+ "PowerPC 970FX v2.0 (G5)")
+ POWERPC_DEF("970fx_v2.1", CPU_POWERPC_970FX_v21, 970,
+ "PowerPC 970FX v2.1 (G5)")
+ POWERPC_DEF("970fx_v3.0", CPU_POWERPC_970FX_v30, 970,
+ "PowerPC 970FX v3.0 (G5)")
+ POWERPC_DEF("970fx_v3.1", CPU_POWERPC_970FX_v31, 970,
+ "PowerPC 970FX v3.1 (G5)")
+ POWERPC_DEF("970mp_v1.0", CPU_POWERPC_970MP_v10, 970,
+ "PowerPC 970MP v1.0")
+ POWERPC_DEF("970mp_v1.1", CPU_POWERPC_970MP_v11, 970,
+ "PowerPC 970MP v1.1")
+#if defined(TODO)
+ POWERPC_DEF("Cell", CPU_POWERPC_CELL, 970,
+ "PowerPC Cell")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("Cell_v1.0", CPU_POWERPC_CELL_v10, 970,
+ "PowerPC Cell v1.0")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("Cell_v2.0", CPU_POWERPC_CELL_v20, 970,
+ "PowerPC Cell v2.0")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("Cell_v3.0", CPU_POWERPC_CELL_v30, 970,
+ "PowerPC Cell v3.0")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("Cell_v3.1", CPU_POWERPC_CELL_v31, 970,
+ "PowerPC Cell v3.1")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("Cell_v3.2", CPU_POWERPC_CELL_v32, 970,
+ "PowerPC Cell v3.2")
+#endif
+#if defined(TODO)
+ /* This one seems to support the whole POWER2 instruction set
+ * and the PowerPC 64 one.
+ */
+ /* What about A10 & A30 ? */
+ POWERPC_DEF("RS64", CPU_POWERPC_RS64, RS64,
+ "RS64 (Apache/A35)")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("RS64-II", CPU_POWERPC_RS64II, RS64,
+ "RS64-II (NorthStar/A50)")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("RS64-III", CPU_POWERPC_RS64III, RS64,
+ "RS64-III (Pulsar)")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("RS64-IV", CPU_POWERPC_RS64IV, RS64,
+ "RS64-IV (IceStar/IStar/SStar)")
+#endif
+#endif /* defined (TARGET_PPC64) */
+ /* POWER */
+#if defined(TODO)
+ POWERPC_DEF("POWER", CPU_POWERPC_POWER, POWER,
+ "Original POWER")
+#endif
+#if defined(TODO)
+ POWERPC_DEF("POWER2", CPU_POWERPC_POWER2, POWER,
+ "POWER2")
+#endif
+ /* PA semi cores */
+#if defined(TODO)
+ POWERPC_DEF("PA6T", CPU_POWERPC_PA6T, PA6T,
+ "PA PA6T")
+#endif
+
+
+/***************************************************************************/
+/* PowerPC CPU aliases */
+
+PowerPCCPUAlias ppc_cpu_aliases[] = {
+ { "403", "403GC" },
+ { "405", "405D4" },
+ { "405CR", "405CRc" },
+ { "405GP", "405GPd" },
+ { "405GPe", "405CRc" },
+ { "x2vp7", "x2vp4" },
+ { "x2vp50", "x2vp20" },
+
+ { "440EP", "440EPb" },
+ { "440GP", "440GPc" },
+ { "440GR", "440GRa" },
+ { "440GX", "440GXf" },
+
+ { "RCPU", "MPC5xx" },
+ /* MPC5xx microcontrollers */
+ { "MGT560", "MPC5xx" },
+ { "MPC509", "MPC5xx" },
+ { "MPC533", "MPC5xx" },
+ { "MPC534", "MPC5xx" },
+ { "MPC555", "MPC5xx" },
+ { "MPC556", "MPC5xx" },
+ { "MPC560", "MPC5xx" },
+ { "MPC561", "MPC5xx" },
+ { "MPC562", "MPC5xx" },
+ { "MPC563", "MPC5xx" },
+ { "MPC564", "MPC5xx" },
+ { "MPC565", "MPC5xx" },
+ { "MPC566", "MPC5xx" },
+
+ { "PowerQUICC", "MPC8xx" },
+ /* MPC8xx microcontrollers */
+ { "MGT823", "MPC8xx" },
+ { "MPC821", "MPC8xx" },
+ { "MPC823", "MPC8xx" },
+ { "MPC850", "MPC8xx" },
+ { "MPC852T", "MPC8xx" },
+ { "MPC855T", "MPC8xx" },
+ { "MPC857", "MPC8xx" },
+ { "MPC859", "MPC8xx" },
+ { "MPC860", "MPC8xx" },
+ { "MPC862", "MPC8xx" },
+ { "MPC866", "MPC8xx" },
+ { "MPC870", "MPC8xx" },
+ { "MPC875", "MPC8xx" },
+ { "MPC880", "MPC8xx" },
+ { "MPC885", "MPC8xx" },
+
+ /* PowerPC MPC603 microcontrollers */
+ { "MPC8240", "603" },
+
+ { "MPC52xx", "MPC5200" },
+ { "MPC5200", "MPC5200_v12" },
+ { "MPC5200B", "MPC5200B_v21" },
+
+ { "MPC82xx", "MPC8280" },
+ { "PowerQUICC-II", "MPC82xx" },
+ { "MPC8241", "G2HiP4" },
+ { "MPC8245", "G2HiP4" },
+ { "MPC8247", "G2leGP3" },
+ { "MPC8248", "G2leGP3" },
+ { "MPC8250", "MPC8250_HiP4" },
+ { "MPC8250_HiP3", "G2HiP3" },
+ { "MPC8250_HiP4", "G2HiP4" },
+ { "MPC8255", "MPC8255_HiP4" },
+ { "MPC8255_HiP3", "G2HiP3" },
+ { "MPC8255_HiP4", "G2HiP4" },
+ { "MPC8260", "MPC8260_HiP4" },
+ { "MPC8260_HiP3", "G2HiP3" },
+ { "MPC8260_HiP4", "G2HiP4" },
+ { "MPC8264", "MPC8264_HiP4" },
+ { "MPC8264_HiP3", "G2HiP3" },
+ { "MPC8264_HiP4", "G2HiP4" },
+ { "MPC8265", "MPC8265_HiP4" },
+ { "MPC8265_HiP3", "G2HiP3" },
+ { "MPC8265_HiP4", "G2HiP4" },
+ { "MPC8266", "MPC8266_HiP4" },
+ { "MPC8266_HiP3", "G2HiP3" },
+ { "MPC8266_HiP4", "G2HiP4" },
+ { "MPC8270", "G2leGP3" },
+ { "MPC8271", "G2leGP3" },
+ { "MPC8272", "G2leGP3" },
+ { "MPC8275", "G2leGP3" },
+ { "MPC8280", "G2leGP3" },
+ { "e200", "e200z6" },
+ { "e300", "e300c3" },
+ { "MPC8347", "MPC8347T" },
+ { "MPC8347A", "MPC8347AT" },
+ { "MPC8347E", "MPC8347ET" },
+ { "MPC8347EA", "MPC8347EAT" },
+ { "e500", "e500v2_v22" },
+ { "e500v1", "e500_v20" },
+ { "e500v2", "e500v2_v22" },
+ { "MPC8533", "MPC8533_v11" },
+ { "MPC8533E", "MPC8533E_v11" },
+ { "MPC8540", "MPC8540_v21" },
+ { "MPC8541", "MPC8541_v11" },
+ { "MPC8541E", "MPC8541E_v11" },
+ { "MPC8543", "MPC8543_v21" },
+ { "MPC8543E", "MPC8543E_v21" },
+ { "MPC8544", "MPC8544_v11" },
+ { "MPC8544E", "MPC8544E_v11" },
+ { "MPC8545", "MPC8545_v21" },
+ { "MPC8545E", "MPC8545E_v21" },
+ { "MPC8547E", "MPC8547E_v21" },
+ { "MPC8548", "MPC8548_v21" },
+ { "MPC8548E", "MPC8548E_v21" },
+ { "MPC8555", "MPC8555_v11" },
+ { "MPC8555E", "MPC8555E_v11" },
+ { "MPC8560", "MPC8560_v21" },
+ { "601", "601_v2" },
+ { "601v", "601_v2" },
+ { "Vanilla", "603" },
+ { "603e", "603e_v4.1" },
+ { "Stretch", "603e" },
+ { "Vaillant", "603e7v" },
+ { "603r", "603e7t" },
+ { "Goldeneye", "603r" },
+ { "604e", "604e_v2.4" },
+ { "Sirocco", "604e" },
+ { "Mach5", "604r" },
+ { "740", "740_v3.1" },
+ { "Arthur", "740" },
+ { "750", "750_v3.1" },
+ { "Typhoon", "750" },
+ { "G3", "750" },
+ { "Conan/Doyle", "750p" },
+ { "750cl", "750cl_v2.0" },
+ { "750cx", "750cx_v2.2" },
+ { "750cxe", "750cxe_v3.1b" },
+ { "750fx", "750fx_v2.3" },
+ { "750gx", "750gx_v1.2" },
+ { "750l", "750l_v3.2" },
+ { "LoneStar", "750l" },
+ { "745", "745_v2.8" },
+ { "755", "755_v2.8" },
+ { "Goldfinger", "755" },
+ { "7400", "7400_v2.9" },
+ { "Max", "7400" },
+ { "G4", "7400" },
+ { "7410", "7410_v1.4" },
+ { "Nitro", "7410" },
+ { "7448", "7448_v2.1" },
+ { "7450", "7450_v2.1" },
+ { "Vger", "7450" },
+ { "7441", "7441_v2.3" },
+ { "7451", "7451_v2.3" },
+ { "7445", "7445_v3.2" },
+ { "7455", "7455_v3.2" },
+ { "Apollo6", "7455" },
+ { "7447", "7447_v1.2" },
+ { "7457", "7457_v1.2" },
+ { "Apollo7", "7457" },
+ { "7447A", "7447A_v1.2" },
+ { "7457A", "7457A_v1.2" },
+ { "Apollo7PM", "7457A_v1.0" },
+#if defined(TARGET_PPC64)
+ { "Trident", "620" },
+ { "POWER3", "630" },
+ { "Boxer", "POWER3" },
+ { "Dino", "POWER3" },
+ { "POWER3+", "631" },
+ { "POWER5gr", "POWER5" },
+ { "POWER5+", "POWER5+_v2.1" },
+ { "POWER5gs", "POWER5+_v2.1" },
+ { "POWER7", "POWER7_v2.3" },
+ { "POWER7+", "POWER7+_v2.1" },
+ { "POWER8E", "POWER8E_v2.1" },
+ { "POWER8", "POWER8_v2.0" },
+ { "POWER8NVL", "POWER8NVL_v1.0" },
+ { "POWER9", "POWER9_v1.0" },
+ { "970", "970_v2.2" },
+ { "970fx", "970fx_v3.1" },
+ { "970mp", "970mp_v1.1" },
+ { "Apache", "RS64" },
+ { "A35", "RS64" },
+ { "NorthStar", "RS64-II" },
+ { "A50", "RS64-II" },
+ { "Pulsar", "RS64-III" },
+ { "IceStar", "RS64-IV" },
+ { "IStar", "RS64-IV" },
+ { "SStar", "RS64-IV" },
+#endif
+ { "RIOS", "POWER" },
+ { "RSC", "POWER" },
+ { "RSC3308", "POWER" },
+ { "RSC4608", "POWER" },
+ { "RSC2", "POWER2" },
+ { "P2SC", "POWER2" },
+
+ /* Generic PowerPCs */
+#if defined(TARGET_PPC64)
+ { "ppc64", "970fx" },
+#endif
+ { "ppc32", "604" },
+ { "ppc", "ppc32" },
+ { "default", "ppc" },
+ { NULL, NULL }
+};
diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h
new file mode 100644
index 0000000000..aafbbd7d5d
--- /dev/null
+++ b/target/ppc/cpu-models.h
@@ -0,0 +1,754 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef TARGET_PPC_CPU_MODELS_H
+#define TARGET_PPC_CPU_MODELS_H
+
+/**
+ * PowerPCCPUAlias:
+ * @alias: The alias name.
+ * @model: The CPU model @alias refers to.
+ *
+ * A mapping entry from CPU @alias to CPU @model.
+ */
+typedef struct PowerPCCPUAlias {
+ const char *alias;
+ const char *model;
+ ObjectClass *oc;
+} PowerPCCPUAlias;
+
+extern PowerPCCPUAlias ppc_cpu_aliases[];
+
+/*****************************************************************************/
+/* PVR definitions for most known PowerPC */
+enum {
+ /* PowerPC 401 family */
+ /* Generic PowerPC 401 */
+#define CPU_POWERPC_401 CPU_POWERPC_401G2
+ /* PowerPC 401 cores */
+ CPU_POWERPC_401A1 = 0x00210000,
+ CPU_POWERPC_401B2 = 0x00220000,
+#if 0
+ CPU_POWERPC_401B3 = xxx,
+#endif
+ CPU_POWERPC_401C2 = 0x00230000,
+ CPU_POWERPC_401D2 = 0x00240000,
+ CPU_POWERPC_401E2 = 0x00250000,
+ CPU_POWERPC_401F2 = 0x00260000,
+ CPU_POWERPC_401G2 = 0x00270000,
+ /* PowerPC 401 microcontrolers */
+#if 0
+ CPU_POWERPC_401GF = xxx,
+#endif
+#define CPU_POWERPC_IOP480 CPU_POWERPC_401B2
+ /* IBM Processor for Network Resources */
+ CPU_POWERPC_COBRA = 0x10100000, /* XXX: 405 ? */
+#if 0
+ CPU_POWERPC_XIPCHIP = xxx,
+#endif
+ /* PowerPC 403 family */
+ /* PowerPC 403 microcontrollers */
+ CPU_POWERPC_403GA = 0x00200011,
+ CPU_POWERPC_403GB = 0x00200100,
+ CPU_POWERPC_403GC = 0x00200200,
+ CPU_POWERPC_403GCX = 0x00201400,
+#if 0
+ CPU_POWERPC_403GP = xxx,
+#endif
+ /* PowerPC 405 family */
+ /* PowerPC 405 cores */
+#if 0
+ CPU_POWERPC_405A3 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405A4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405B3 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405B4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405C3 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405C4 = xxx,
+#endif
+ CPU_POWERPC_405D2 = 0x20010000,
+#if 0
+ CPU_POWERPC_405D3 = xxx,
+#endif
+ CPU_POWERPC_405D4 = 0x41810000,
+#if 0
+ CPU_POWERPC_405D5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405E4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405F4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405F5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405F6 = xxx,
+#endif
+ /* PowerPC 405 microcontrolers */
+ /* XXX: missing 0x200108a0 */
+ CPU_POWERPC_405CRa = 0x40110041,
+ CPU_POWERPC_405CRb = 0x401100C5,
+ CPU_POWERPC_405CRc = 0x40110145,
+ CPU_POWERPC_405EP = 0x51210950,
+#if 0
+ CPU_POWERPC_405EXr = xxx,
+#endif
+ CPU_POWERPC_405EZ = 0x41511460, /* 0x51210950 ? */
+#if 0
+ CPU_POWERPC_405FX = xxx,
+#endif
+ CPU_POWERPC_405GPa = 0x40110000,
+ CPU_POWERPC_405GPb = 0x40110040,
+ CPU_POWERPC_405GPc = 0x40110082,
+ CPU_POWERPC_405GPd = 0x401100C4,
+ CPU_POWERPC_405GPR = 0x50910951,
+#if 0
+ CPU_POWERPC_405H = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405L = xxx,
+#endif
+ CPU_POWERPC_405LP = 0x41F10000,
+#if 0
+ CPU_POWERPC_405PM = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405PS = xxx,
+#endif
+#if 0
+ CPU_POWERPC_405S = xxx,
+#endif
+ /* IBM network processors */
+ CPU_POWERPC_NPE405H = 0x414100C0,
+ CPU_POWERPC_NPE405H2 = 0x41410140,
+ CPU_POWERPC_NPE405L = 0x416100C0,
+ CPU_POWERPC_NPE4GS3 = 0x40B10000,
+#if 0
+ CPU_POWERPC_NPCxx1 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_NPR161 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_LC77700 = xxx,
+#endif
+ /* IBM STBxxx (PowerPC 401/403/405 core based microcontrollers) */
+#if 0
+ CPU_POWERPC_STB01000 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_STB01010 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_STB0210 = xxx, /* 401B3 */
+#endif
+ CPU_POWERPC_STB03 = 0x40310000, /* 0x40130000 ? */
+#if 0
+ CPU_POWERPC_STB043 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_STB045 = xxx,
+#endif
+ CPU_POWERPC_STB04 = 0x41810000,
+ CPU_POWERPC_STB25 = 0x51510950,
+#if 0
+ CPU_POWERPC_STB130 = xxx,
+#endif
+ /* Xilinx cores */
+ CPU_POWERPC_X2VP4 = 0x20010820,
+ CPU_POWERPC_X2VP20 = 0x20010860,
+#if 0
+ CPU_POWERPC_ZL10310 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_ZL10311 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_ZL10320 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_ZL10321 = xxx,
+#endif
+ /* PowerPC 440 family */
+ /* Generic PowerPC 440 */
+#define CPU_POWERPC_440 CPU_POWERPC_440GXf
+ /* PowerPC 440 cores */
+#if 0
+ CPU_POWERPC_440A4 = xxx,
+#endif
+ CPU_POWERPC_440_XILINX = 0x7ff21910,
+#if 0
+ CPU_POWERPC_440A5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440B4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440F5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440G5 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440H4 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_440H6 = xxx,
+#endif
+ /* PowerPC 440 microcontrolers */
+ CPU_POWERPC_440EPa = 0x42221850,
+ CPU_POWERPC_440EPb = 0x422218D3,
+ CPU_POWERPC_440GPb = 0x40120440,
+ CPU_POWERPC_440GPc = 0x40120481,
+#define CPU_POWERPC_440GRa CPU_POWERPC_440EPb
+ CPU_POWERPC_440GRX = 0x200008D0,
+#define CPU_POWERPC_440EPX CPU_POWERPC_440GRX
+ CPU_POWERPC_440GXa = 0x51B21850,
+ CPU_POWERPC_440GXb = 0x51B21851,
+ CPU_POWERPC_440GXc = 0x51B21892,
+ CPU_POWERPC_440GXf = 0x51B21894,
+#if 0
+ CPU_POWERPC_440S = xxx,
+#endif
+ CPU_POWERPC_440SP = 0x53221850,
+ CPU_POWERPC_440SP2 = 0x53221891,
+ CPU_POWERPC_440SPE = 0x53421890,
+ /* PowerPC 460 family */
+#if 0
+ /* Generic PowerPC 464 */
+#define CPU_POWERPC_464 CPU_POWERPC_464H90
+#endif
+ /* PowerPC 464 microcontrolers */
+#if 0
+ CPU_POWERPC_464H90 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_464H90FP = xxx,
+#endif
+ /* Freescale embedded PowerPC cores */
+ /* PowerPC MPC 5xx cores (aka RCPU) */
+ CPU_POWERPC_MPC5xx = 0x00020020,
+ /* PowerPC MPC 8xx cores (aka PowerQUICC) */
+ CPU_POWERPC_MPC8xx = 0x00500000,
+ /* G2 cores (aka PowerQUICC-II) */
+ CPU_POWERPC_G2 = 0x00810011,
+ CPU_POWERPC_G2H4 = 0x80811010,
+ CPU_POWERPC_G2gp = 0x80821010,
+ CPU_POWERPC_G2ls = 0x90810010,
+ CPU_POWERPC_MPC603 = 0x00810100,
+ CPU_POWERPC_G2_HIP3 = 0x00810101,
+ CPU_POWERPC_G2_HIP4 = 0x80811014,
+ /* G2_LE core (aka PowerQUICC-II) */
+ CPU_POWERPC_G2LE = 0x80820010,
+ CPU_POWERPC_G2LEgp = 0x80822010,
+ CPU_POWERPC_G2LEls = 0xA0822010,
+ CPU_POWERPC_G2LEgp1 = 0x80822011,
+ CPU_POWERPC_G2LEgp3 = 0x80822013,
+ /* MPC52xx microcontrollers */
+ /* XXX: MPC 5121 ? */
+#define CPU_POWERPC_MPC5200_v10 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200_v11 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200_v12 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200B_v20 CPU_POWERPC_G2LEgp1
+#define CPU_POWERPC_MPC5200B_v21 CPU_POWERPC_G2LEgp1
+ /* e200 family */
+ /* e200 cores */
+#if 0
+ CPU_POWERPC_e200z0 = xxx,
+#endif
+#if 0
+ CPU_POWERPC_e200z1 = xxx,
+#endif
+#if 0 /* ? */
+ CPU_POWERPC_e200z3 = 0x81120000,
+#endif
+ CPU_POWERPC_e200z5 = 0x81000000,
+ CPU_POWERPC_e200z6 = 0x81120000,
+ /* MPC55xx microcontrollers */
+#define CPU_POWERPC_MPC55xx CPU_POWERPC_MPC5567
+#if 0
+#define CPU_POWERPC_MPC5514E CPU_POWERPC_MPC5514E_v1
+#define CPU_POWERPC_MPC5514E_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5514E_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5514G CPU_POWERPC_MPC5514G_v1
+#define CPU_POWERPC_MPC5514G_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5514G_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5515S CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5516E CPU_POWERPC_MPC5516E_v1
+#define CPU_POWERPC_MPC5516E_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5516E_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5516G CPU_POWERPC_MPC5516G_v1
+#define CPU_POWERPC_MPC5516G_v0 CPU_POWERPC_e200z0
+#define CPU_POWERPC_MPC5516G_v1 CPU_POWERPC_e200z1
+#define CPU_POWERPC_MPC5516S CPU_POWERPC_e200z1
+#endif
+#if 0
+#define CPU_POWERPC_MPC5533 CPU_POWERPC_e200z3
+#define CPU_POWERPC_MPC5534 CPU_POWERPC_e200z3
+#endif
+#define CPU_POWERPC_MPC5553 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5554 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5561 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5565 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5566 CPU_POWERPC_e200z6
+#define CPU_POWERPC_MPC5567 CPU_POWERPC_e200z6
+ /* e300 family */
+ /* e300 cores */
+ CPU_POWERPC_e300c1 = 0x00830010,
+ CPU_POWERPC_e300c2 = 0x00840010,
+ CPU_POWERPC_e300c3 = 0x00850010,
+ CPU_POWERPC_e300c4 = 0x00860010,
+ /* MPC83xx microcontrollers */
+#define CPU_POWERPC_MPC831x CPU_POWERPC_e300c3
+#define CPU_POWERPC_MPC832x CPU_POWERPC_e300c2
+#define CPU_POWERPC_MPC834x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC835x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC836x CPU_POWERPC_e300c1
+#define CPU_POWERPC_MPC837x CPU_POWERPC_e300c4
+ /* e500 family */
+ /* e500 cores */
+#define CPU_POWERPC_e500 CPU_POWERPC_e500v2_v22
+ CPU_POWERPC_e500v1_v10 = 0x80200010,
+ CPU_POWERPC_e500v1_v20 = 0x80200020,
+ CPU_POWERPC_e500v2_v10 = 0x80210010,
+ CPU_POWERPC_e500v2_v11 = 0x80210011,
+ CPU_POWERPC_e500v2_v20 = 0x80210020,
+ CPU_POWERPC_e500v2_v21 = 0x80210021,
+ CPU_POWERPC_e500v2_v22 = 0x80210022,
+ CPU_POWERPC_e500v2_v30 = 0x80210030,
+ CPU_POWERPC_e500mc = 0x80230020,
+ CPU_POWERPC_e5500 = 0x80240020,
+ /* MPC85xx microcontrollers */
+#define CPU_POWERPC_MPC8533_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8533_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8533E_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8533E_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8540_v10 CPU_POWERPC_e500v1_v10
+#define CPU_POWERPC_MPC8540_v20 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8540_v21 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541E_v10 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8541E_v11 CPU_POWERPC_e500v1_v20
+#define CPU_POWERPC_MPC8543_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8543_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8543_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8543_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8543E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8543E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8543E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8543E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8544_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8544_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8544E_v11 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8544E_v10 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8545_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8545_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8545_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8545E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8545E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8545E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8547E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8547E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8547E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8548_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8548_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8548_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8548_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8548E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8548E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8548E_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8548E_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8555_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8555_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8555E_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8555E_v11 CPU_POWERPC_e500v2_v11
+#define CPU_POWERPC_MPC8560_v10 CPU_POWERPC_e500v2_v10
+#define CPU_POWERPC_MPC8560_v20 CPU_POWERPC_e500v2_v20
+#define CPU_POWERPC_MPC8560_v21 CPU_POWERPC_e500v2_v21
+#define CPU_POWERPC_MPC8567 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8567E CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8568 CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8568E CPU_POWERPC_e500v2_v22
+#define CPU_POWERPC_MPC8572 CPU_POWERPC_e500v2_v30
+#define CPU_POWERPC_MPC8572E CPU_POWERPC_e500v2_v30
+ /* e600 family */
+ /* e600 cores */
+ CPU_POWERPC_e600 = 0x80040010,
+ /* MPC86xx microcontrollers */
+#define CPU_POWERPC_MPC8610 CPU_POWERPC_e600
+#define CPU_POWERPC_MPC8641 CPU_POWERPC_e600
+#define CPU_POWERPC_MPC8641D CPU_POWERPC_e600
+ /* PowerPC 6xx cores */
+ CPU_POWERPC_601_v0 = 0x00010001,
+ CPU_POWERPC_601_v1 = 0x00010001,
+ CPU_POWERPC_601_v2 = 0x00010002,
+ CPU_POWERPC_602 = 0x00050100,
+ CPU_POWERPC_603 = 0x00030100,
+ CPU_POWERPC_603E_v11 = 0x00060101,
+ CPU_POWERPC_603E_v12 = 0x00060102,
+ CPU_POWERPC_603E_v13 = 0x00060103,
+ CPU_POWERPC_603E_v14 = 0x00060104,
+ CPU_POWERPC_603E_v22 = 0x00060202,
+ CPU_POWERPC_603E_v3 = 0x00060300,
+ CPU_POWERPC_603E_v4 = 0x00060400,
+ CPU_POWERPC_603E_v41 = 0x00060401,
+ CPU_POWERPC_603E7t = 0x00071201,
+ CPU_POWERPC_603E7v = 0x00070100,
+ CPU_POWERPC_603E7v1 = 0x00070101,
+ CPU_POWERPC_603E7v2 = 0x00070201,
+ CPU_POWERPC_603E7 = 0x00070200,
+ CPU_POWERPC_603P = 0x00070000,
+ /* XXX: missing 0x00040303 (604) */
+ CPU_POWERPC_604 = 0x00040103,
+ /* XXX: missing 0x00091203 */
+ /* XXX: missing 0x00092110 */
+ /* XXX: missing 0x00092120 */
+ CPU_POWERPC_604E_v10 = 0x00090100,
+ CPU_POWERPC_604E_v22 = 0x00090202,
+ CPU_POWERPC_604E_v24 = 0x00090204,
+ /* XXX: missing 0x000a0100 */
+ /* XXX: missing 0x00093102 */
+ CPU_POWERPC_604R = 0x000a0101,
+#if 0
+ CPU_POWERPC_604EV = xxx, /* XXX: same as 604R ? */
+#endif
+ /* PowerPC 740/750 cores (aka G3) */
+ /* XXX: missing 0x00084202 */
+ CPU_POWERPC_7x0_v10 = 0x00080100,
+ CPU_POWERPC_7x0_v20 = 0x00080200,
+ CPU_POWERPC_7x0_v21 = 0x00080201,
+ CPU_POWERPC_7x0_v22 = 0x00080202,
+ CPU_POWERPC_7x0_v30 = 0x00080300,
+ CPU_POWERPC_7x0_v31 = 0x00080301,
+ CPU_POWERPC_740E = 0x00080100,
+ CPU_POWERPC_750E = 0x00080200,
+ CPU_POWERPC_7x0P = 0x10080000,
+ /* XXX: missing 0x00087010 (CL ?) */
+ CPU_POWERPC_750CL_v10 = 0x00087200,
+ CPU_POWERPC_750CL_v20 = 0x00087210, /* aka rev E */
+ CPU_POWERPC_750CX_v10 = 0x00082100,
+ CPU_POWERPC_750CX_v20 = 0x00082200,
+ CPU_POWERPC_750CX_v21 = 0x00082201,
+ CPU_POWERPC_750CX_v22 = 0x00082202,
+ CPU_POWERPC_750CXE_v21 = 0x00082211,
+ CPU_POWERPC_750CXE_v22 = 0x00082212,
+ CPU_POWERPC_750CXE_v23 = 0x00082213,
+ CPU_POWERPC_750CXE_v24 = 0x00082214,
+ CPU_POWERPC_750CXE_v24b = 0x00083214,
+ CPU_POWERPC_750CXE_v30 = 0x00082310,
+ CPU_POWERPC_750CXE_v31 = 0x00082311,
+ CPU_POWERPC_750CXE_v31b = 0x00083311,
+ CPU_POWERPC_750CXR = 0x00083410,
+ CPU_POWERPC_750FL = 0x70000203,
+ CPU_POWERPC_750FX_v10 = 0x70000100,
+ CPU_POWERPC_750FX_v20 = 0x70000200,
+ CPU_POWERPC_750FX_v21 = 0x70000201,
+ CPU_POWERPC_750FX_v22 = 0x70000202,
+ CPU_POWERPC_750FX_v23 = 0x70000203,
+ CPU_POWERPC_750GL = 0x70020102,
+ CPU_POWERPC_750GX_v10 = 0x70020100,
+ CPU_POWERPC_750GX_v11 = 0x70020101,
+ CPU_POWERPC_750GX_v12 = 0x70020102,
+ CPU_POWERPC_750L_v20 = 0x00088200,
+ CPU_POWERPC_750L_v21 = 0x00088201,
+ CPU_POWERPC_750L_v22 = 0x00088202,
+ CPU_POWERPC_750L_v30 = 0x00088300,
+ CPU_POWERPC_750L_v32 = 0x00088302,
+ /* PowerPC 745/755 cores */
+ CPU_POWERPC_7x5_v10 = 0x00083100,
+ CPU_POWERPC_7x5_v11 = 0x00083101,
+ CPU_POWERPC_7x5_v20 = 0x00083200,
+ CPU_POWERPC_7x5_v21 = 0x00083201,
+ CPU_POWERPC_7x5_v22 = 0x00083202, /* aka D */
+ CPU_POWERPC_7x5_v23 = 0x00083203, /* aka E */
+ CPU_POWERPC_7x5_v24 = 0x00083204,
+ CPU_POWERPC_7x5_v25 = 0x00083205,
+ CPU_POWERPC_7x5_v26 = 0x00083206,
+ CPU_POWERPC_7x5_v27 = 0x00083207,
+ CPU_POWERPC_7x5_v28 = 0x00083208,
+#if 0
+ CPU_POWERPC_7x5P = xxx,
+#endif
+ /* PowerPC 74xx cores (aka G4) */
+ /* XXX: missing 0x000C1101 */
+ CPU_POWERPC_7400_v10 = 0x000C0100,
+ CPU_POWERPC_7400_v11 = 0x000C0101,
+ CPU_POWERPC_7400_v20 = 0x000C0200,
+ CPU_POWERPC_7400_v21 = 0x000C0201,
+ CPU_POWERPC_7400_v22 = 0x000C0202,
+ CPU_POWERPC_7400_v26 = 0x000C0206,
+ CPU_POWERPC_7400_v27 = 0x000C0207,
+ CPU_POWERPC_7400_v28 = 0x000C0208,
+ CPU_POWERPC_7400_v29 = 0x000C0209,
+ CPU_POWERPC_7410_v10 = 0x800C1100,
+ CPU_POWERPC_7410_v11 = 0x800C1101,
+ CPU_POWERPC_7410_v12 = 0x800C1102, /* aka C */
+ CPU_POWERPC_7410_v13 = 0x800C1103, /* aka D */
+ CPU_POWERPC_7410_v14 = 0x800C1104, /* aka E */
+ CPU_POWERPC_7448_v10 = 0x80040100,
+ CPU_POWERPC_7448_v11 = 0x80040101,
+ CPU_POWERPC_7448_v20 = 0x80040200,
+ CPU_POWERPC_7448_v21 = 0x80040201,
+ CPU_POWERPC_7450_v10 = 0x80000100,
+ CPU_POWERPC_7450_v11 = 0x80000101,
+ CPU_POWERPC_7450_v12 = 0x80000102,
+ CPU_POWERPC_7450_v20 = 0x80000200, /* aka A, B, C, D: 2.04 */
+ CPU_POWERPC_7450_v21 = 0x80000201, /* aka E */
+ CPU_POWERPC_74x1_v23 = 0x80000203, /* aka G: 2.3 */
+ /* XXX: this entry might be a bug in some documentation */
+ CPU_POWERPC_74x1_v210 = 0x80000210, /* aka G: 2.3 ? */
+ CPU_POWERPC_74x5_v10 = 0x80010100,
+ /* XXX: missing 0x80010200 */
+ CPU_POWERPC_74x5_v21 = 0x80010201, /* aka C: 2.1 */
+ CPU_POWERPC_74x5_v32 = 0x80010302,
+ CPU_POWERPC_74x5_v33 = 0x80010303, /* aka F: 3.3 */
+ CPU_POWERPC_74x5_v34 = 0x80010304, /* aka G: 3.4 */
+ CPU_POWERPC_74x7_v10 = 0x80020100, /* aka A: 1.0 */
+ CPU_POWERPC_74x7_v11 = 0x80020101, /* aka B: 1.1 */
+ CPU_POWERPC_74x7_v12 = 0x80020102, /* aka C: 1.2 */
+ CPU_POWERPC_74x7A_v10 = 0x80030100, /* aka A: 1.0 */
+ CPU_POWERPC_74x7A_v11 = 0x80030101, /* aka B: 1.1 */
+ CPU_POWERPC_74x7A_v12 = 0x80030102, /* aka C: 1.2 */
+ /* 64 bits PowerPC */
+#if defined(TARGET_PPC64)
+ CPU_POWERPC_620 = 0x00140000,
+ CPU_POWERPC_630 = 0x00400000,
+ CPU_POWERPC_631 = 0x00410104,
+ CPU_POWERPC_POWER4 = 0x00350000,
+ CPU_POWERPC_POWER4P = 0x00380000,
+ /* XXX: missing 0x003A0201 */
+ CPU_POWERPC_POWER5 = 0x003A0203,
+ CPU_POWERPC_POWER5P_v21 = 0x003B0201,
+ CPU_POWERPC_POWER6 = 0x003E0000,
+ CPU_POWERPC_POWER_SERVER_MASK = 0xFFFF0000,
+ CPU_POWERPC_POWER7_BASE = 0x003F0000,
+ CPU_POWERPC_POWER7_v23 = 0x003F0203,
+ CPU_POWERPC_POWER7P_BASE = 0x004A0000,
+ CPU_POWERPC_POWER7P_v21 = 0x004A0201,
+ CPU_POWERPC_POWER8E_BASE = 0x004B0000,
+ CPU_POWERPC_POWER8E_v21 = 0x004B0201,
+ CPU_POWERPC_POWER8_BASE = 0x004D0000,
+ CPU_POWERPC_POWER8_v20 = 0x004D0200,
+ CPU_POWERPC_POWER8NVL_BASE = 0x004C0000,
+ CPU_POWERPC_POWER8NVL_v10 = 0x004C0100,
+ CPU_POWERPC_POWER9_BASE = 0x004E0000,
+ CPU_POWERPC_970_v22 = 0x00390202,
+ CPU_POWERPC_970FX_v10 = 0x00391100,
+ CPU_POWERPC_970FX_v20 = 0x003C0200,
+ CPU_POWERPC_970FX_v21 = 0x003C0201,
+ CPU_POWERPC_970FX_v30 = 0x003C0300,
+ CPU_POWERPC_970FX_v31 = 0x003C0301,
+ CPU_POWERPC_970MP_v10 = 0x00440100,
+ CPU_POWERPC_970MP_v11 = 0x00440101,
+#define CPU_POWERPC_CELL CPU_POWERPC_CELL_v32
+ CPU_POWERPC_CELL_v10 = 0x00700100,
+ CPU_POWERPC_CELL_v20 = 0x00700400,
+ CPU_POWERPC_CELL_v30 = 0x00700500,
+ CPU_POWERPC_CELL_v31 = 0x00700501,
+#define CPU_POWERPC_CELL_v32 CPU_POWERPC_CELL_v31
+ CPU_POWERPC_RS64 = 0x00330000,
+ CPU_POWERPC_RS64II = 0x00340000,
+ CPU_POWERPC_RS64III = 0x00360000,
+ CPU_POWERPC_RS64IV = 0x00370000,
+#endif /* defined(TARGET_PPC64) */
+ /* Original POWER */
+ /* XXX: should be POWER (RIOS), RSC3308, RSC4608,
+ * POWER2 (RIOS2) & RSC2 (P2SC) here
+ */
+#if 0
+ CPU_POWER = xxx, /* 0x20000 ? 0x30000 for RSC ? */
+#endif
+#if 0
+ CPU_POWER2 = xxx, /* 0x40000 ? */
+#endif
+ /* PA Semi core */
+ CPU_POWERPC_PA6T = 0x00900000,
+};
+
+/* Logical PVR definitions for sPAPR */
+enum {
+ CPU_POWERPC_LOGICAL_2_04 = 0x0F000001,
+ CPU_POWERPC_LOGICAL_2_05 = 0x0F000002,
+ CPU_POWERPC_LOGICAL_2_06 = 0x0F000003,
+ CPU_POWERPC_LOGICAL_2_06_PLUS = 0x0F100003,
+ CPU_POWERPC_LOGICAL_2_07 = 0x0F000004,
+ CPU_POWERPC_LOGICAL_2_08 = 0x0F000005,
+};
+
+/* System version register (used on MPC 8xxx) */
+enum {
+ POWERPC_SVR_NONE = 0x00000000,
+ POWERPC_SVR_5200_v10 = 0x80110010,
+ POWERPC_SVR_5200_v11 = 0x80110011,
+ POWERPC_SVR_5200_v12 = 0x80110012,
+ POWERPC_SVR_5200B_v20 = 0x80110020,
+ POWERPC_SVR_5200B_v21 = 0x80110021,
+#define POWERPC_SVR_55xx POWERPC_SVR_5567
+#if 0
+ POWERPC_SVR_5533 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5534 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5553 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5554 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5561 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5565 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5566 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_5567 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8313 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8313E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8314 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8314E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8315 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8315E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8321 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8321E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8323 = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8323E = xxx,
+#endif
+ POWERPC_SVR_8343 = 0x80570010,
+ POWERPC_SVR_8343A = 0x80570030,
+ POWERPC_SVR_8343E = 0x80560010,
+ POWERPC_SVR_8343EA = 0x80560030,
+ POWERPC_SVR_8347P = 0x80550010, /* PBGA package */
+ POWERPC_SVR_8347T = 0x80530010, /* TBGA package */
+ POWERPC_SVR_8347AP = 0x80550030, /* PBGA package */
+ POWERPC_SVR_8347AT = 0x80530030, /* TBGA package */
+ POWERPC_SVR_8347EP = 0x80540010, /* PBGA package */
+ POWERPC_SVR_8347ET = 0x80520010, /* TBGA package */
+ POWERPC_SVR_8347EAP = 0x80540030, /* PBGA package */
+ POWERPC_SVR_8347EAT = 0x80520030, /* TBGA package */
+ POWERPC_SVR_8349 = 0x80510010,
+ POWERPC_SVR_8349A = 0x80510030,
+ POWERPC_SVR_8349E = 0x80500010,
+ POWERPC_SVR_8349EA = 0x80500030,
+#if 0
+ POWERPC_SVR_8358E = xxx,
+#endif
+#if 0
+ POWERPC_SVR_8360E = xxx,
+#endif
+#define POWERPC_SVR_E500 0x40000000
+ POWERPC_SVR_8377 = 0x80C70010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8377E = 0x80C60010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8378 = 0x80C50010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8378E = 0x80C40010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8379 = 0x80C30010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8379E = 0x80C00010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533_v10 = 0x80340010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533_v11 = 0x80340011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533E_v10 = 0x803C0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8533E_v11 = 0x803C0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v10 = 0x80300010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v20 = 0x80300020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8540_v21 = 0x80300021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541_v10 = 0x80720010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541_v11 = 0x80720011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541E_v10 = 0x807A0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8541E_v11 = 0x807A0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v10 = 0x80320010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v11 = 0x80320011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v20 = 0x80320020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543_v21 = 0x80320021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v10 = 0x803A0010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v11 = 0x803A0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v20 = 0x803A0020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8543E_v21 = 0x803A0021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544_v10 = 0x80340110 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544_v11 = 0x80340111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544E_v10 = 0x803C0110 | POWERPC_SVR_E500,
+ POWERPC_SVR_8544E_v11 = 0x803C0111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545_v20 = 0x80310220 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545_v21 = 0x80310221 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545E_v20 = 0x80390220 | POWERPC_SVR_E500,
+ POWERPC_SVR_8545E_v21 = 0x80390221 | POWERPC_SVR_E500,
+ POWERPC_SVR_8547E_v20 = 0x80390120 | POWERPC_SVR_E500,
+ POWERPC_SVR_8547E_v21 = 0x80390121 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v10 = 0x80310010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v11 = 0x80310011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v20 = 0x80310020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548_v21 = 0x80310021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v10 = 0x80390010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v11 = 0x80390011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v20 = 0x80390020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8548E_v21 = 0x80390021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555_v10 = 0x80710010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555_v11 = 0x80710011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555E_v10 = 0x80790010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8555E_v11 = 0x80790011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v10 = 0x80700010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v20 = 0x80700020 | POWERPC_SVR_E500,
+ POWERPC_SVR_8560_v21 = 0x80700021 | POWERPC_SVR_E500,
+ POWERPC_SVR_8567 = 0x80750111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8567E = 0x807D0111 | POWERPC_SVR_E500,
+ POWERPC_SVR_8568 = 0x80750011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8568E = 0x807D0011 | POWERPC_SVR_E500,
+ POWERPC_SVR_8572 = 0x80E00010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8572E = 0x80E80010 | POWERPC_SVR_E500,
+ POWERPC_SVR_8610 = 0x80A00011,
+ POWERPC_SVR_8641 = 0x80900021,
+ POWERPC_SVR_8641D = 0x80900121,
+};
+
+#endif
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h
new file mode 100644
index 0000000000..d46c31a15d
--- /dev/null
+++ b/target/ppc/cpu-qom.h
@@ -0,0 +1,219 @@
+/*
+ * QEMU PowerPC CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_PPC_CPU_QOM_H
+#define QEMU_PPC_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#ifdef TARGET_PPC64
+#define TYPE_POWERPC_CPU "powerpc64-cpu"
+#elif defined(TARGET_PPCEMB)
+#define TYPE_POWERPC_CPU "embedded-powerpc-cpu"
+#else
+#define TYPE_POWERPC_CPU "powerpc-cpu"
+#endif
+
+#define POWERPC_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(PowerPCCPUClass, (klass), TYPE_POWERPC_CPU)
+#define POWERPC_CPU(obj) \
+ OBJECT_CHECK(PowerPCCPU, (obj), TYPE_POWERPC_CPU)
+#define POWERPC_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(PowerPCCPUClass, (obj), TYPE_POWERPC_CPU)
+
+typedef struct PowerPCCPU PowerPCCPU;
+typedef struct CPUPPCState CPUPPCState;
+typedef struct ppc_tb_t ppc_tb_t;
+typedef struct ppc_dcr_t ppc_dcr_t;
+
+/*****************************************************************************/
+/* MMU model */
+typedef enum powerpc_mmu_t powerpc_mmu_t;
+enum powerpc_mmu_t {
+ POWERPC_MMU_UNKNOWN = 0x00000000,
+ /* Standard 32 bits PowerPC MMU */
+ POWERPC_MMU_32B = 0x00000001,
+ /* PowerPC 6xx MMU with software TLB */
+ POWERPC_MMU_SOFT_6xx = 0x00000002,
+ /* PowerPC 74xx MMU with software TLB */
+ POWERPC_MMU_SOFT_74xx = 0x00000003,
+ /* PowerPC 4xx MMU with software TLB */
+ POWERPC_MMU_SOFT_4xx = 0x00000004,
+ /* PowerPC 4xx MMU with software TLB and zones protections */
+ POWERPC_MMU_SOFT_4xx_Z = 0x00000005,
+ /* PowerPC MMU in real mode only */
+ POWERPC_MMU_REAL = 0x00000006,
+ /* Freescale MPC8xx MMU model */
+ POWERPC_MMU_MPC8xx = 0x00000007,
+ /* BookE MMU model */
+ POWERPC_MMU_BOOKE = 0x00000008,
+ /* BookE 2.06 MMU model */
+ POWERPC_MMU_BOOKE206 = 0x00000009,
+ /* PowerPC 601 MMU model (specific BATs format) */
+ POWERPC_MMU_601 = 0x0000000A,
+#define POWERPC_MMU_64 0x00010000
+#define POWERPC_MMU_1TSEG 0x00020000
+#define POWERPC_MMU_AMR 0x00040000
+#define POWERPC_MMU_64K 0x00080000
+ /* 64 bits PowerPC MMU */
+ POWERPC_MMU_64B = POWERPC_MMU_64 | 0x00000001,
+ /* Architecture 2.03 and later (has LPCR) */
+ POWERPC_MMU_2_03 = POWERPC_MMU_64 | 0x00000002,
+ /* Architecture 2.06 variant */
+ POWERPC_MMU_2_06 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
+ | POWERPC_MMU_AMR | 0x00000003,
+ /* Architecture 2.06 "degraded" (no 1T segments) */
+ POWERPC_MMU_2_06a = POWERPC_MMU_64 | POWERPC_MMU_AMR
+ | 0x00000003,
+ /* Architecture 2.07 variant */
+ POWERPC_MMU_2_07 = POWERPC_MMU_64 | POWERPC_MMU_1TSEG
+ | POWERPC_MMU_64K
+ | POWERPC_MMU_AMR | 0x00000004,
+ /* FIXME Add POWERPC_MMU_3_OO defines */
+ /* Architecture 2.07 "degraded" (no 1T segments) */
+ POWERPC_MMU_2_07a = POWERPC_MMU_64 | POWERPC_MMU_AMR
+ | 0x00000004,
+};
+
+/*****************************************************************************/
+/* Exception model */
+typedef enum powerpc_excp_t powerpc_excp_t;
+enum powerpc_excp_t {
+ POWERPC_EXCP_UNKNOWN = 0,
+ /* Standard PowerPC exception model */
+ POWERPC_EXCP_STD,
+ /* PowerPC 40x exception model */
+ POWERPC_EXCP_40x,
+ /* PowerPC 601 exception model */
+ POWERPC_EXCP_601,
+ /* PowerPC 602 exception model */
+ POWERPC_EXCP_602,
+ /* PowerPC 603 exception model */
+ POWERPC_EXCP_603,
+ /* PowerPC 603e exception model */
+ POWERPC_EXCP_603E,
+ /* PowerPC G2 exception model */
+ POWERPC_EXCP_G2,
+ /* PowerPC 604 exception model */
+ POWERPC_EXCP_604,
+ /* PowerPC 7x0 exception model */
+ POWERPC_EXCP_7x0,
+ /* PowerPC 7x5 exception model */
+ POWERPC_EXCP_7x5,
+ /* PowerPC 74xx exception model */
+ POWERPC_EXCP_74xx,
+ /* BookE exception model */
+ POWERPC_EXCP_BOOKE,
+ /* PowerPC 970 exception model */
+ POWERPC_EXCP_970,
+ /* POWER7 exception model */
+ POWERPC_EXCP_POWER7,
+ /* POWER8 exception model */
+ POWERPC_EXCP_POWER8,
+};
+
+/*****************************************************************************/
+/* PM instructions */
+typedef enum {
+ PPC_PM_DOZE,
+ PPC_PM_NAP,
+ PPC_PM_SLEEP,
+ PPC_PM_RVWINKLE,
+} powerpc_pm_insn_t;
+
+/*****************************************************************************/
+/* Input pins model */
+typedef enum powerpc_input_t powerpc_input_t;
+enum powerpc_input_t {
+ PPC_FLAGS_INPUT_UNKNOWN = 0,
+ /* PowerPC 6xx bus */
+ PPC_FLAGS_INPUT_6xx,
+ /* BookE bus */
+ PPC_FLAGS_INPUT_BookE,
+ /* PowerPC 405 bus */
+ PPC_FLAGS_INPUT_405,
+ /* PowerPC 970 bus */
+ PPC_FLAGS_INPUT_970,
+ /* PowerPC POWER7 bus */
+ PPC_FLAGS_INPUT_POWER7,
+ /* PowerPC 401 bus */
+ PPC_FLAGS_INPUT_401,
+ /* Freescale RCPU bus */
+ PPC_FLAGS_INPUT_RCPU,
+};
+
+struct ppc_segment_page_sizes;
+
+/**
+ * PowerPCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A PowerPC CPU model.
+ */
+typedef struct PowerPCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ DeviceUnrealize parent_unrealize;
+ void (*parent_reset)(CPUState *cpu);
+
+ uint32_t pvr;
+ bool (*pvr_match)(struct PowerPCCPUClass *pcc, uint32_t pvr);
+ uint64_t pcr_mask; /* Available bits in PCR register */
+ uint64_t pcr_supported; /* Bits for supported PowerISA versions */
+ uint32_t svr;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+ uint64_t msr_mask;
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ uint32_t flags;
+ int bfd_mach;
+ uint32_t l1_dcache_size, l1_icache_size;
+ const struct ppc_segment_page_sizes *sps;
+ void (*init_proc)(CPUPPCState *env);
+ int (*check_pow)(CPUPPCState *env);
+ int (*handle_mmu_fault)(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx);
+ bool (*interrupts_big_endian)(PowerPCCPU *cpu);
+} PowerPCCPUClass;
+
+#ifndef CONFIG_USER_ONLY
+typedef struct PPCTimebase {
+ uint64_t guest_timebase;
+ int64_t time_of_the_day_ns;
+} PPCTimebase;
+
+extern const struct VMStateDescription vmstate_ppc_timebase;
+
+#define VMSTATE_PPC_TIMEBASE_V(_field, _state, _version) { \
+ .name = (stringify(_field)), \
+ .version_id = (_version), \
+ .size = sizeof(PPCTimebase), \
+ .vmsd = &vmstate_ppc_timebase, \
+ .flags = VMS_STRUCT, \
+ .offset = vmstate_offset_value(_state, _field, PPCTimebase), \
+}
+#endif
+
+#endif
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
new file mode 100644
index 0000000000..2a50c43689
--- /dev/null
+++ b/target/ppc/cpu.h
@@ -0,0 +1,2454 @@
+/*
+ * PowerPC emulation cpu definitions for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_CPU_H
+#define PPC_CPU_H
+
+#include "qemu-common.h"
+
+//#define PPC_EMULATE_32BITS_HYPV
+
+#if defined (TARGET_PPC64)
+/* PowerPC 64 definitions */
+#define TARGET_LONG_BITS 64
+#define TARGET_PAGE_BITS 12
+
+/* Note that the official physical address space bits is 62-M where M
+ is implementation dependent. I've not looked up M for the set of
+ cpus we emulate at the system level. */
+#define TARGET_PHYS_ADDR_SPACE_BITS 62
+
+/* Note that the PPC environment architecture talks about 80 bit virtual
+ addresses, with segmentation. Obviously that's not all visible to a
+ single process, which is all we're concerned with here. */
+#ifdef TARGET_ABI32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+#else
+# define TARGET_VIRT_ADDR_SPACE_BITS 64
+#endif
+
+#define TARGET_PAGE_BITS_64K 16
+#define TARGET_PAGE_BITS_16M 24
+
+#else /* defined (TARGET_PPC64) */
+/* PowerPC 32 definitions */
+#define TARGET_LONG_BITS 32
+
+#if defined(TARGET_PPCEMB)
+/* Specific definitions for PowerPC embedded */
+/* BookE have 36 bits physical address space */
+#if defined(CONFIG_USER_ONLY)
+/* It looks like a lot of Linux programs assume page size
+ * is 4kB long. This is evil, but we have to deal with it...
+ */
+#define TARGET_PAGE_BITS 12
+#else /* defined(CONFIG_USER_ONLY) */
+/* Pages can be 1 kB small */
+#define TARGET_PAGE_BITS 10
+#endif /* defined(CONFIG_USER_ONLY) */
+#else /* defined(TARGET_PPCEMB) */
+/* "standard" PowerPC 32 definitions */
+#define TARGET_PAGE_BITS 12
+#endif /* defined(TARGET_PPCEMB) */
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#endif /* defined (TARGET_PPC64) */
+
+#define CPUArchState struct CPUPPCState
+
+#include "exec/cpu-defs.h"
+#include "cpu-qom.h"
+#include "fpu/softfloat.h"
+
+#if defined (TARGET_PPC64)
+#define PPC_ELF_MACHINE EM_PPC64
+#else
+#define PPC_ELF_MACHINE EM_PPC
+#endif
+
+/*****************************************************************************/
+/* Exception vectors definitions */
+enum {
+ POWERPC_EXCP_NONE = -1,
+ /* The 64 first entries are used by the PowerPC embedded specification */
+ POWERPC_EXCP_CRITICAL = 0, /* Critical input */
+ POWERPC_EXCP_MCHECK = 1, /* Machine check exception */
+ POWERPC_EXCP_DSI = 2, /* Data storage exception */
+ POWERPC_EXCP_ISI = 3, /* Instruction storage exception */
+ POWERPC_EXCP_EXTERNAL = 4, /* External input */
+ POWERPC_EXCP_ALIGN = 5, /* Alignment exception */
+ POWERPC_EXCP_PROGRAM = 6, /* Program exception */
+ POWERPC_EXCP_FPU = 7, /* Floating-point unavailable exception */
+ POWERPC_EXCP_SYSCALL = 8, /* System call exception */
+ POWERPC_EXCP_APU = 9, /* Auxiliary processor unavailable */
+ POWERPC_EXCP_DECR = 10, /* Decrementer exception */
+ POWERPC_EXCP_FIT = 11, /* Fixed-interval timer interrupt */
+ POWERPC_EXCP_WDT = 12, /* Watchdog timer interrupt */
+ POWERPC_EXCP_DTLB = 13, /* Data TLB miss */
+ POWERPC_EXCP_ITLB = 14, /* Instruction TLB miss */
+ POWERPC_EXCP_DEBUG = 15, /* Debug interrupt */
+ /* Vectors 16 to 31 are reserved */
+ POWERPC_EXCP_SPEU = 32, /* SPE/embedded floating-point unavailable */
+ POWERPC_EXCP_EFPDI = 33, /* Embedded floating-point data interrupt */
+ POWERPC_EXCP_EFPRI = 34, /* Embedded floating-point round interrupt */
+ POWERPC_EXCP_EPERFM = 35, /* Embedded performance monitor interrupt */
+ POWERPC_EXCP_DOORI = 36, /* Embedded doorbell interrupt */
+ POWERPC_EXCP_DOORCI = 37, /* Embedded doorbell critical interrupt */
+ POWERPC_EXCP_GDOORI = 38, /* Embedded guest doorbell interrupt */
+ POWERPC_EXCP_GDOORCI = 39, /* Embedded guest doorbell critical interrupt*/
+ POWERPC_EXCP_HYPPRIV = 41, /* Embedded hypervisor priv instruction */
+ /* Vectors 42 to 63 are reserved */
+ /* Exceptions defined in the PowerPC server specification */
+ /* Server doorbell variants */
+#define POWERPC_EXCP_SDOOR POWERPC_EXCP_GDOORI
+#define POWERPC_EXCP_SDOOR_HV POWERPC_EXCP_DOORI
+ POWERPC_EXCP_RESET = 64, /* System reset exception */
+ POWERPC_EXCP_DSEG = 65, /* Data segment exception */
+ POWERPC_EXCP_ISEG = 66, /* Instruction segment exception */
+ POWERPC_EXCP_HDECR = 67, /* Hypervisor decrementer exception */
+ POWERPC_EXCP_TRACE = 68, /* Trace exception */
+ POWERPC_EXCP_HDSI = 69, /* Hypervisor data storage exception */
+ POWERPC_EXCP_HISI = 70, /* Hypervisor instruction storage exception */
+ POWERPC_EXCP_HDSEG = 71, /* Hypervisor data segment exception */
+ POWERPC_EXCP_HISEG = 72, /* Hypervisor instruction segment exception */
+ POWERPC_EXCP_VPU = 73, /* Vector unavailable exception */
+ /* 40x specific exceptions */
+ POWERPC_EXCP_PIT = 74, /* Programmable interval timer interrupt */
+ /* 601 specific exceptions */
+ POWERPC_EXCP_IO = 75, /* IO error exception */
+ POWERPC_EXCP_RUNM = 76, /* Run mode exception */
+ /* 602 specific exceptions */
+ POWERPC_EXCP_EMUL = 77, /* Emulation trap exception */
+ /* 602/603 specific exceptions */
+ POWERPC_EXCP_IFTLB = 78, /* Instruction fetch TLB miss */
+ POWERPC_EXCP_DLTLB = 79, /* Data load TLB miss */
+ POWERPC_EXCP_DSTLB = 80, /* Data store TLB miss */
+ /* Exceptions available on most PowerPC */
+ POWERPC_EXCP_FPA = 81, /* Floating-point assist exception */
+ POWERPC_EXCP_DABR = 82, /* Data address breakpoint */
+ POWERPC_EXCP_IABR = 83, /* Instruction address breakpoint */
+ POWERPC_EXCP_SMI = 84, /* System management interrupt */
+ POWERPC_EXCP_PERFM = 85, /* Embedded performance monitor interrupt */
+ /* 7xx/74xx specific exceptions */
+ POWERPC_EXCP_THERM = 86, /* Thermal interrupt */
+ /* 74xx specific exceptions */
+ POWERPC_EXCP_VPUA = 87, /* Vector assist exception */
+ /* 970FX specific exceptions */
+ POWERPC_EXCP_SOFTP = 88, /* Soft patch exception */
+ POWERPC_EXCP_MAINT = 89, /* Maintenance exception */
+ /* Freescale embedded cores specific exceptions */
+ POWERPC_EXCP_MEXTBR = 90, /* Maskable external breakpoint */
+ POWERPC_EXCP_NMEXTBR = 91, /* Non maskable external breakpoint */
+ POWERPC_EXCP_ITLBE = 92, /* Instruction TLB error */
+ POWERPC_EXCP_DTLBE = 93, /* Data TLB error */
+ /* VSX Unavailable (Power ISA 2.06 and later) */
+ POWERPC_EXCP_VSXU = 94, /* VSX Unavailable */
+ POWERPC_EXCP_FU = 95, /* Facility Unavailable */
+ /* Additional ISA 2.06 and later server exceptions */
+ POWERPC_EXCP_HV_EMU = 96, /* HV emulation assistance */
+ POWERPC_EXCP_HV_MAINT = 97, /* HMI */
+ POWERPC_EXCP_HV_FU = 98, /* Hypervisor Facility unavailable */
+ /* EOL */
+ POWERPC_EXCP_NB = 99,
+ /* QEMU exceptions: used internally during code translation */
+ POWERPC_EXCP_STOP = 0x200, /* stop translation */
+ POWERPC_EXCP_BRANCH = 0x201, /* branch instruction */
+ /* QEMU exceptions: special cases we want to stop translation */
+ POWERPC_EXCP_SYNC = 0x202, /* context synchronizing instruction */
+ POWERPC_EXCP_SYSCALL_USER = 0x203, /* System call in user mode only */
+ POWERPC_EXCP_STCX = 0x204 /* Conditional stores in user mode */
+};
+
+/* Exceptions error codes */
+enum {
+ /* Exception subtypes for POWERPC_EXCP_ALIGN */
+ POWERPC_EXCP_ALIGN_FP = 0x01, /* FP alignment exception */
+ POWERPC_EXCP_ALIGN_LST = 0x02, /* Unaligned mult/extern load/store */
+ POWERPC_EXCP_ALIGN_LE = 0x03, /* Multiple little-endian access */
+ POWERPC_EXCP_ALIGN_PROT = 0x04, /* Access cross protection boundary */
+ POWERPC_EXCP_ALIGN_BAT = 0x05, /* Access cross a BAT/seg boundary */
+ POWERPC_EXCP_ALIGN_CACHE = 0x06, /* Impossible dcbz access */
+ /* Exception subtypes for POWERPC_EXCP_PROGRAM */
+ /* FP exceptions */
+ POWERPC_EXCP_FP = 0x10,
+ POWERPC_EXCP_FP_OX = 0x01, /* FP overflow */
+ POWERPC_EXCP_FP_UX = 0x02, /* FP underflow */
+ POWERPC_EXCP_FP_ZX = 0x03, /* FP divide by zero */
+ POWERPC_EXCP_FP_XX = 0x04, /* FP inexact */
+ POWERPC_EXCP_FP_VXSNAN = 0x05, /* FP invalid SNaN op */
+ POWERPC_EXCP_FP_VXISI = 0x06, /* FP invalid infinite subtraction */
+ POWERPC_EXCP_FP_VXIDI = 0x07, /* FP invalid infinite divide */
+ POWERPC_EXCP_FP_VXZDZ = 0x08, /* FP invalid zero divide */
+ POWERPC_EXCP_FP_VXIMZ = 0x09, /* FP invalid infinite * zero */
+ POWERPC_EXCP_FP_VXVC = 0x0A, /* FP invalid compare */
+ POWERPC_EXCP_FP_VXSOFT = 0x0B, /* FP invalid operation */
+ POWERPC_EXCP_FP_VXSQRT = 0x0C, /* FP invalid square root */
+ POWERPC_EXCP_FP_VXCVI = 0x0D, /* FP invalid integer conversion */
+ /* Invalid instruction */
+ POWERPC_EXCP_INVAL = 0x20,
+ POWERPC_EXCP_INVAL_INVAL = 0x01, /* Invalid instruction */
+ POWERPC_EXCP_INVAL_LSWX = 0x02, /* Invalid lswx instruction */
+ POWERPC_EXCP_INVAL_SPR = 0x03, /* Invalid SPR access */
+ POWERPC_EXCP_INVAL_FP = 0x04, /* Unimplemented mandatory fp instr */
+ /* Privileged instruction */
+ POWERPC_EXCP_PRIV = 0x30,
+ POWERPC_EXCP_PRIV_OPC = 0x01, /* Privileged operation exception */
+ POWERPC_EXCP_PRIV_REG = 0x02, /* Privileged register exception */
+ /* Trap */
+ POWERPC_EXCP_TRAP = 0x40,
+};
+
+#define PPC_INPUT(env) (env->bus_model)
+
+/*****************************************************************************/
+typedef struct opc_handler_t opc_handler_t;
+
+/*****************************************************************************/
+/* Types used to describe some PowerPC registers */
+typedef struct DisasContext DisasContext;
+typedef struct ppc_spr_t ppc_spr_t;
+typedef union ppc_avr_t ppc_avr_t;
+typedef union ppc_tlb_t ppc_tlb_t;
+
+/* SPR access micro-ops generations callbacks */
+struct ppc_spr_t {
+ void (*uea_read)(DisasContext *ctx, int gpr_num, int spr_num);
+ void (*uea_write)(DisasContext *ctx, int spr_num, int gpr_num);
+#if !defined(CONFIG_USER_ONLY)
+ void (*oea_read)(DisasContext *ctx, int gpr_num, int spr_num);
+ void (*oea_write)(DisasContext *ctx, int spr_num, int gpr_num);
+ void (*hea_read)(DisasContext *ctx, int gpr_num, int spr_num);
+ void (*hea_write)(DisasContext *ctx, int spr_num, int gpr_num);
+#endif
+ const char *name;
+ target_ulong default_value;
+#ifdef CONFIG_KVM
+ /* We (ab)use the fact that all the SPRs will have ids for the
+ * ONE_REG interface will have KVM_REG_PPC to use 0 as meaning,
+ * don't sync this */
+ uint64_t one_reg_id;
+#endif
+};
+
+/* Altivec registers (128 bits) */
+union ppc_avr_t {
+ float32 f[4];
+ uint8_t u8[16];
+ uint16_t u16[8];
+ uint32_t u32[4];
+ int8_t s8[16];
+ int16_t s16[8];
+ int32_t s32[4];
+ uint64_t u64[2];
+ int64_t s64[2];
+#ifdef CONFIG_INT128
+ __uint128_t u128;
+#endif
+};
+
+#if !defined(CONFIG_USER_ONLY)
+/* Software TLB cache */
+typedef struct ppc6xx_tlb_t ppc6xx_tlb_t;
+struct ppc6xx_tlb_t {
+ target_ulong pte0;
+ target_ulong pte1;
+ target_ulong EPN;
+};
+
+typedef struct ppcemb_tlb_t ppcemb_tlb_t;
+struct ppcemb_tlb_t {
+ uint64_t RPN;
+ target_ulong EPN;
+ target_ulong PID;
+ target_ulong size;
+ uint32_t prot;
+ uint32_t attr; /* Storage attributes */
+};
+
+typedef struct ppcmas_tlb_t {
+ uint32_t mas8;
+ uint32_t mas1;
+ uint64_t mas2;
+ uint64_t mas7_3;
+} ppcmas_tlb_t;
+
+union ppc_tlb_t {
+ ppc6xx_tlb_t *tlb6;
+ ppcemb_tlb_t *tlbe;
+ ppcmas_tlb_t *tlbm;
+};
+
+/* possible TLB variants */
+#define TLB_NONE 0
+#define TLB_6XX 1
+#define TLB_EMB 2
+#define TLB_MAS 3
+#endif
+
+#define SDR_32_HTABORG 0xFFFF0000UL
+#define SDR_32_HTABMASK 0x000001FFUL
+
+#if defined(TARGET_PPC64)
+#define SDR_64_HTABORG 0xFFFFFFFFFFFC0000ULL
+#define SDR_64_HTABSIZE 0x000000000000001FULL
+#endif /* defined(TARGET_PPC64 */
+
+typedef struct ppc_slb_t ppc_slb_t;
+struct ppc_slb_t {
+ uint64_t esid;
+ uint64_t vsid;
+ const struct ppc_one_seg_page_size *sps;
+};
+
+#define MAX_SLB_ENTRIES 64
+#define SEGMENT_SHIFT_256M 28
+#define SEGMENT_MASK_256M (~((1ULL << SEGMENT_SHIFT_256M) - 1))
+
+#define SEGMENT_SHIFT_1T 40
+#define SEGMENT_MASK_1T (~((1ULL << SEGMENT_SHIFT_1T) - 1))
+
+
+/*****************************************************************************/
+/* Machine state register bits definition */
+#define MSR_SF 63 /* Sixty-four-bit mode hflags */
+#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */
+#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */
+#define MSR_SHV 60 /* hypervisor state hflags */
+#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */
+#define MSR_TS1 33
+#define MSR_TM 32 /* Transactional Memory Available (Book3s) */
+#define MSR_CM 31 /* Computation mode for BookE hflags */
+#define MSR_ICM 30 /* Interrupt computation mode for BookE */
+#define MSR_THV 29 /* hypervisor state for 32 bits PowerPC hflags */
+#define MSR_GS 28 /* guest state for BookE */
+#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
+#define MSR_VR 25 /* altivec available x hflags */
+#define MSR_SPE 25 /* SPE enable for BookE x hflags */
+#define MSR_AP 23 /* Access privilege state on 602 hflags */
+#define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */
+#define MSR_SA 22 /* Supervisor access mode on 602 hflags */
+#define MSR_KEY 19 /* key bit on 603e */
+#define MSR_POW 18 /* Power management */
+#define MSR_TGPR 17 /* TGPR usage on 602/603 x */
+#define MSR_CE 17 /* Critical interrupt enable on embedded PowerPC x */
+#define MSR_ILE 16 /* Interrupt little-endian mode */
+#define MSR_EE 15 /* External interrupt enable */
+#define MSR_PR 14 /* Problem state hflags */
+#define MSR_FP 13 /* Floating point available hflags */
+#define MSR_ME 12 /* Machine check interrupt enable */
+#define MSR_FE0 11 /* Floating point exception mode 0 hflags */
+#define MSR_SE 10 /* Single-step trace enable x hflags */
+#define MSR_DWE 10 /* Debug wait enable on 405 x */
+#define MSR_UBLE 10 /* User BTB lock enable on e500 x */
+#define MSR_BE 9 /* Branch trace enable x hflags */
+#define MSR_DE 9 /* Debug interrupts enable on embedded PowerPC x */
+#define MSR_FE1 8 /* Floating point exception mode 1 hflags */
+#define MSR_AL 7 /* AL bit on POWER */
+#define MSR_EP 6 /* Exception prefix on 601 */
+#define MSR_IR 5 /* Instruction relocate */
+#define MSR_DR 4 /* Data relocate */
+#define MSR_IS 5 /* Instruction address space (BookE) */
+#define MSR_DS 4 /* Data address space (BookE) */
+#define MSR_PE 3 /* Protection enable on 403 */
+#define MSR_PX 2 /* Protection exclusive on 403 x */
+#define MSR_PMM 2 /* Performance monitor mark on POWER x */
+#define MSR_RI 1 /* Recoverable interrupt 1 */
+#define MSR_LE 0 /* Little-endian mode 1 hflags */
+
+/* LPCR bits */
+#define LPCR_VPM0 (1ull << (63 - 0))
+#define LPCR_VPM1 (1ull << (63 - 1))
+#define LPCR_ISL (1ull << (63 - 2))
+#define LPCR_KBV (1ull << (63 - 3))
+#define LPCR_DPFD_SHIFT (63 - 11)
+#define LPCR_DPFD (0x3ull << LPCR_DPFD_SHIFT)
+#define LPCR_VRMASD_SHIFT (63 - 16)
+#define LPCR_VRMASD (0x1full << LPCR_VRMASD_SHIFT)
+#define LPCR_RMLS_SHIFT (63 - 37)
+#define LPCR_RMLS (0xfull << LPCR_RMLS_SHIFT)
+#define LPCR_ILE (1ull << (63 - 38))
+#define LPCR_AIL_SHIFT (63 - 40) /* Alternate interrupt location */
+#define LPCR_AIL (3ull << LPCR_AIL_SHIFT)
+#define LPCR_ONL (1ull << (63 - 45))
+#define LPCR_P7_PECE0 (1ull << (63 - 49))
+#define LPCR_P7_PECE1 (1ull << (63 - 50))
+#define LPCR_P7_PECE2 (1ull << (63 - 51))
+#define LPCR_P8_PECE0 (1ull << (63 - 47))
+#define LPCR_P8_PECE1 (1ull << (63 - 48))
+#define LPCR_P8_PECE2 (1ull << (63 - 49))
+#define LPCR_P8_PECE3 (1ull << (63 - 50))
+#define LPCR_P8_PECE4 (1ull << (63 - 51))
+#define LPCR_MER (1ull << (63 - 52))
+#define LPCR_TC (1ull << (63 - 54))
+#define LPCR_LPES0 (1ull << (63 - 60))
+#define LPCR_LPES1 (1ull << (63 - 61))
+#define LPCR_RMI (1ull << (63 - 62))
+#define LPCR_HDICE (1ull << (63 - 63))
+
+#define msr_sf ((env->msr >> MSR_SF) & 1)
+#define msr_isf ((env->msr >> MSR_ISF) & 1)
+#define msr_shv ((env->msr >> MSR_SHV) & 1)
+#define msr_cm ((env->msr >> MSR_CM) & 1)
+#define msr_icm ((env->msr >> MSR_ICM) & 1)
+#define msr_thv ((env->msr >> MSR_THV) & 1)
+#define msr_gs ((env->msr >> MSR_GS) & 1)
+#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
+#define msr_vr ((env->msr >> MSR_VR) & 1)
+#define msr_spe ((env->msr >> MSR_SPE) & 1)
+#define msr_ap ((env->msr >> MSR_AP) & 1)
+#define msr_vsx ((env->msr >> MSR_VSX) & 1)
+#define msr_sa ((env->msr >> MSR_SA) & 1)
+#define msr_key ((env->msr >> MSR_KEY) & 1)
+#define msr_pow ((env->msr >> MSR_POW) & 1)
+#define msr_tgpr ((env->msr >> MSR_TGPR) & 1)
+#define msr_ce ((env->msr >> MSR_CE) & 1)
+#define msr_ile ((env->msr >> MSR_ILE) & 1)
+#define msr_ee ((env->msr >> MSR_EE) & 1)
+#define msr_pr ((env->msr >> MSR_PR) & 1)
+#define msr_fp ((env->msr >> MSR_FP) & 1)
+#define msr_me ((env->msr >> MSR_ME) & 1)
+#define msr_fe0 ((env->msr >> MSR_FE0) & 1)
+#define msr_se ((env->msr >> MSR_SE) & 1)
+#define msr_dwe ((env->msr >> MSR_DWE) & 1)
+#define msr_uble ((env->msr >> MSR_UBLE) & 1)
+#define msr_be ((env->msr >> MSR_BE) & 1)
+#define msr_de ((env->msr >> MSR_DE) & 1)
+#define msr_fe1 ((env->msr >> MSR_FE1) & 1)
+#define msr_al ((env->msr >> MSR_AL) & 1)
+#define msr_ep ((env->msr >> MSR_EP) & 1)
+#define msr_ir ((env->msr >> MSR_IR) & 1)
+#define msr_dr ((env->msr >> MSR_DR) & 1)
+#define msr_is ((env->msr >> MSR_IS) & 1)
+#define msr_ds ((env->msr >> MSR_DS) & 1)
+#define msr_pe ((env->msr >> MSR_PE) & 1)
+#define msr_px ((env->msr >> MSR_PX) & 1)
+#define msr_pmm ((env->msr >> MSR_PMM) & 1)
+#define msr_ri ((env->msr >> MSR_RI) & 1)
+#define msr_le ((env->msr >> MSR_LE) & 1)
+#define msr_ts ((env->msr >> MSR_TS1) & 3)
+#define msr_tm ((env->msr >> MSR_TM) & 1)
+
+/* Hypervisor bit is more specific */
+#if defined(TARGET_PPC64)
+#define MSR_HVB (1ULL << MSR_SHV)
+#define msr_hv msr_shv
+#else
+#if defined(PPC_EMULATE_32BITS_HYPV)
+#define MSR_HVB (1ULL << MSR_THV)
+#define msr_hv msr_thv
+#else
+#define MSR_HVB (0ULL)
+#define msr_hv (0)
+#endif
+#endif
+
+/* Facility Status and Control (FSCR) bits */
+#define FSCR_EBB (63 - 56) /* Event-Based Branch Facility */
+#define FSCR_TAR (63 - 55) /* Target Address Register */
+/* Interrupt cause mask and position in FSCR. HFSCR has the same format */
+#define FSCR_IC_MASK (0xFFULL)
+#define FSCR_IC_POS (63 - 7)
+#define FSCR_IC_DSCR_SPR3 2
+#define FSCR_IC_PMU 3
+#define FSCR_IC_BHRB 4
+#define FSCR_IC_TM 5
+#define FSCR_IC_EBB 7
+#define FSCR_IC_TAR 8
+
+/* Exception state register bits definition */
+#define ESR_PIL (1 << (63 - 36)) /* Illegal Instruction */
+#define ESR_PPR (1 << (63 - 37)) /* Privileged Instruction */
+#define ESR_PTR (1 << (63 - 38)) /* Trap */
+#define ESR_FP (1 << (63 - 39)) /* Floating-Point Operation */
+#define ESR_ST (1 << (63 - 40)) /* Store Operation */
+#define ESR_AP (1 << (63 - 44)) /* Auxiliary Processor Operation */
+#define ESR_PUO (1 << (63 - 45)) /* Unimplemented Operation */
+#define ESR_BO (1 << (63 - 46)) /* Byte Ordering */
+#define ESR_PIE (1 << (63 - 47)) /* Imprecise exception */
+#define ESR_DATA (1 << (63 - 53)) /* Data Access (Embedded page table) */
+#define ESR_TLBI (1 << (63 - 54)) /* TLB Ineligible (Embedded page table) */
+#define ESR_PT (1 << (63 - 55)) /* Page Table (Embedded page table) */
+#define ESR_SPV (1 << (63 - 56)) /* SPE/VMX operation */
+#define ESR_EPID (1 << (63 - 57)) /* External Process ID operation */
+#define ESR_VLEMI (1 << (63 - 58)) /* VLE operation */
+#define ESR_MIF (1 << (63 - 62)) /* Misaligned instruction (VLE) */
+
+/* Transaction EXception And Summary Register bits */
+#define TEXASR_FAILURE_PERSISTENT (63 - 7)
+#define TEXASR_DISALLOWED (63 - 8)
+#define TEXASR_NESTING_OVERFLOW (63 - 9)
+#define TEXASR_FOOTPRINT_OVERFLOW (63 - 10)
+#define TEXASR_SELF_INDUCED_CONFLICT (63 - 11)
+#define TEXASR_NON_TRANSACTIONAL_CONFLICT (63 - 12)
+#define TEXASR_TRANSACTION_CONFLICT (63 - 13)
+#define TEXASR_TRANSLATION_INVALIDATION_CONFLICT (63 - 14)
+#define TEXASR_IMPLEMENTATION_SPECIFIC (63 - 15)
+#define TEXASR_INSTRUCTION_FETCH_CONFLICT (63 - 16)
+#define TEXASR_ABORT (63 - 31)
+#define TEXASR_SUSPENDED (63 - 32)
+#define TEXASR_PRIVILEGE_HV (63 - 34)
+#define TEXASR_PRIVILEGE_PR (63 - 35)
+#define TEXASR_FAILURE_SUMMARY (63 - 36)
+#define TEXASR_TFIAR_EXACT (63 - 37)
+#define TEXASR_ROT (63 - 38)
+#define TEXASR_TRANSACTION_LEVEL (63 - 52) /* 12 bits */
+
+enum {
+ POWERPC_FLAG_NONE = 0x00000000,
+ /* Flag for MSR bit 25 signification (VRE/SPE) */
+ POWERPC_FLAG_SPE = 0x00000001,
+ POWERPC_FLAG_VRE = 0x00000002,
+ /* Flag for MSR bit 17 signification (TGPR/CE) */
+ POWERPC_FLAG_TGPR = 0x00000004,
+ POWERPC_FLAG_CE = 0x00000008,
+ /* Flag for MSR bit 10 signification (SE/DWE/UBLE) */
+ POWERPC_FLAG_SE = 0x00000010,
+ POWERPC_FLAG_DWE = 0x00000020,
+ POWERPC_FLAG_UBLE = 0x00000040,
+ /* Flag for MSR bit 9 signification (BE/DE) */
+ POWERPC_FLAG_BE = 0x00000080,
+ POWERPC_FLAG_DE = 0x00000100,
+ /* Flag for MSR bit 2 signification (PX/PMM) */
+ POWERPC_FLAG_PX = 0x00000200,
+ POWERPC_FLAG_PMM = 0x00000400,
+ /* Flag for special features */
+ /* Decrementer clock: RTC clock (POWER, 601) or bus clock */
+ POWERPC_FLAG_RTC_CLK = 0x00010000,
+ POWERPC_FLAG_BUS_CLK = 0x00020000,
+ /* Has CFAR */
+ POWERPC_FLAG_CFAR = 0x00040000,
+ /* Has VSX */
+ POWERPC_FLAG_VSX = 0x00080000,
+ /* Has Transaction Memory (ISA 2.07) */
+ POWERPC_FLAG_TM = 0x00100000,
+};
+
+/*****************************************************************************/
+/* Floating point status and control register */
+#define FPSCR_FX 31 /* Floating-point exception summary */
+#define FPSCR_FEX 30 /* Floating-point enabled exception summary */
+#define FPSCR_VX 29 /* Floating-point invalid operation exception summ. */
+#define FPSCR_OX 28 /* Floating-point overflow exception */
+#define FPSCR_UX 27 /* Floating-point underflow exception */
+#define FPSCR_ZX 26 /* Floating-point zero divide exception */
+#define FPSCR_XX 25 /* Floating-point inexact exception */
+#define FPSCR_VXSNAN 24 /* Floating-point invalid operation exception (sNan) */
+#define FPSCR_VXISI 23 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXIDI 22 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXZDZ 21 /* Floating-point invalid operation exception (zero) */
+#define FPSCR_VXIMZ 20 /* Floating-point invalid operation exception (inf) */
+#define FPSCR_VXVC 19 /* Floating-point invalid operation exception (comp) */
+#define FPSCR_FR 18 /* Floating-point fraction rounded */
+#define FPSCR_FI 17 /* Floating-point fraction inexact */
+#define FPSCR_C 16 /* Floating-point result class descriptor */
+#define FPSCR_FL 15 /* Floating-point less than or negative */
+#define FPSCR_FG 14 /* Floating-point greater than or negative */
+#define FPSCR_FE 13 /* Floating-point equal or zero */
+#define FPSCR_FU 12 /* Floating-point unordered or NaN */
+#define FPSCR_FPCC 12 /* Floating-point condition code */
+#define FPSCR_FPRF 12 /* Floating-point result flags */
+#define FPSCR_VXSOFT 10 /* Floating-point invalid operation exception (soft) */
+#define FPSCR_VXSQRT 9 /* Floating-point invalid operation exception (sqrt) */
+#define FPSCR_VXCVI 8 /* Floating-point invalid operation exception (int) */
+#define FPSCR_VE 7 /* Floating-point invalid operation exception enable */
+#define FPSCR_OE 6 /* Floating-point overflow exception enable */
+#define FPSCR_UE 5 /* Floating-point undeflow exception enable */
+#define FPSCR_ZE 4 /* Floating-point zero divide exception enable */
+#define FPSCR_XE 3 /* Floating-point inexact exception enable */
+#define FPSCR_NI 2 /* Floating-point non-IEEE mode */
+#define FPSCR_RN1 1
+#define FPSCR_RN 0 /* Floating-point rounding control */
+#define fpscr_fex (((env->fpscr) >> FPSCR_FEX) & 0x1)
+#define fpscr_vx (((env->fpscr) >> FPSCR_VX) & 0x1)
+#define fpscr_ox (((env->fpscr) >> FPSCR_OX) & 0x1)
+#define fpscr_ux (((env->fpscr) >> FPSCR_UX) & 0x1)
+#define fpscr_zx (((env->fpscr) >> FPSCR_ZX) & 0x1)
+#define fpscr_xx (((env->fpscr) >> FPSCR_XX) & 0x1)
+#define fpscr_vxsnan (((env->fpscr) >> FPSCR_VXSNAN) & 0x1)
+#define fpscr_vxisi (((env->fpscr) >> FPSCR_VXISI) & 0x1)
+#define fpscr_vxidi (((env->fpscr) >> FPSCR_VXIDI) & 0x1)
+#define fpscr_vxzdz (((env->fpscr) >> FPSCR_VXZDZ) & 0x1)
+#define fpscr_vximz (((env->fpscr) >> FPSCR_VXIMZ) & 0x1)
+#define fpscr_vxvc (((env->fpscr) >> FPSCR_VXVC) & 0x1)
+#define fpscr_fpcc (((env->fpscr) >> FPSCR_FPCC) & 0xF)
+#define fpscr_vxsoft (((env->fpscr) >> FPSCR_VXSOFT) & 0x1)
+#define fpscr_vxsqrt (((env->fpscr) >> FPSCR_VXSQRT) & 0x1)
+#define fpscr_vxcvi (((env->fpscr) >> FPSCR_VXCVI) & 0x1)
+#define fpscr_ve (((env->fpscr) >> FPSCR_VE) & 0x1)
+#define fpscr_oe (((env->fpscr) >> FPSCR_OE) & 0x1)
+#define fpscr_ue (((env->fpscr) >> FPSCR_UE) & 0x1)
+#define fpscr_ze (((env->fpscr) >> FPSCR_ZE) & 0x1)
+#define fpscr_xe (((env->fpscr) >> FPSCR_XE) & 0x1)
+#define fpscr_ni (((env->fpscr) >> FPSCR_NI) & 0x1)
+#define fpscr_rn (((env->fpscr) >> FPSCR_RN) & 0x3)
+/* Invalid operation exception summary */
+#define fpscr_ix ((env->fpscr) & ((1 << FPSCR_VXSNAN) | (1 << FPSCR_VXISI) | \
+ (1 << FPSCR_VXIDI) | (1 << FPSCR_VXZDZ) | \
+ (1 << FPSCR_VXIMZ) | (1 << FPSCR_VXVC) | \
+ (1 << FPSCR_VXSOFT) | (1 << FPSCR_VXSQRT) | \
+ (1 << FPSCR_VXCVI)))
+/* exception summary */
+#define fpscr_ex (((env->fpscr) >> FPSCR_XX) & 0x1F)
+/* enabled exception summary */
+#define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \
+ 0x1F)
+
+#define FP_FX (1ull << FPSCR_FX)
+#define FP_FEX (1ull << FPSCR_FEX)
+#define FP_VX (1ull << FPSCR_VX)
+#define FP_OX (1ull << FPSCR_OX)
+#define FP_UX (1ull << FPSCR_UX)
+#define FP_ZX (1ull << FPSCR_ZX)
+#define FP_XX (1ull << FPSCR_XX)
+#define FP_VXSNAN (1ull << FPSCR_VXSNAN)
+#define FP_VXISI (1ull << FPSCR_VXISI)
+#define FP_VXIDI (1ull << FPSCR_VXIDI)
+#define FP_VXZDZ (1ull << FPSCR_VXZDZ)
+#define FP_VXIMZ (1ull << FPSCR_VXIMZ)
+#define FP_VXVC (1ull << FPSCR_VXVC)
+#define FP_FR (1ull << FSPCR_FR)
+#define FP_FI (1ull << FPSCR_FI)
+#define FP_C (1ull << FPSCR_C)
+#define FP_FL (1ull << FPSCR_FL)
+#define FP_FG (1ull << FPSCR_FG)
+#define FP_FE (1ull << FPSCR_FE)
+#define FP_FU (1ull << FPSCR_FU)
+#define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU)
+#define FP_VXSOFT (1ull << FPSCR_VXSOFT)
+#define FP_VXSQRT (1ull << FPSCR_VXSQRT)
+#define FP_VXCVI (1ull << FPSCR_VXCVI)
+#define FP_VE (1ull << FPSCR_VE)
+#define FP_OE (1ull << FPSCR_OE)
+#define FP_UE (1ull << FPSCR_UE)
+#define FP_ZE (1ull << FPSCR_ZE)
+#define FP_XE (1ull << FPSCR_XE)
+#define FP_NI (1ull << FPSCR_NI)
+#define FP_RN1 (1ull << FPSCR_RN1)
+#define FP_RN (1ull << FPSCR_RN)
+
+/* the exception bits which can be cleared by mcrfs - includes FX */
+#define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \
+ FP_XX | FP_VXSNAN | FP_VXISI | FP_VXIDI | \
+ FP_VXZDZ | FP_VXIMZ | FP_VXVC | FP_VXSOFT | \
+ FP_VXSQRT | FP_VXCVI)
+
+/*****************************************************************************/
+/* Vector status and control register */
+#define VSCR_NJ 16 /* Vector non-java */
+#define VSCR_SAT 0 /* Vector saturation */
+#define vscr_nj (((env->vscr) >> VSCR_NJ) & 0x1)
+#define vscr_sat (((env->vscr) >> VSCR_SAT) & 0x1)
+
+/*****************************************************************************/
+/* BookE e500 MMU registers */
+
+#define MAS0_NV_SHIFT 0
+#define MAS0_NV_MASK (0xfff << MAS0_NV_SHIFT)
+
+#define MAS0_WQ_SHIFT 12
+#define MAS0_WQ_MASK (3 << MAS0_WQ_SHIFT)
+/* Write TLB entry regardless of reservation */
+#define MAS0_WQ_ALWAYS (0 << MAS0_WQ_SHIFT)
+/* Write TLB entry only already in use */
+#define MAS0_WQ_COND (1 << MAS0_WQ_SHIFT)
+/* Clear TLB entry */
+#define MAS0_WQ_CLR_RSRV (2 << MAS0_WQ_SHIFT)
+
+#define MAS0_HES_SHIFT 14
+#define MAS0_HES (1 << MAS0_HES_SHIFT)
+
+#define MAS0_ESEL_SHIFT 16
+#define MAS0_ESEL_MASK (0xfff << MAS0_ESEL_SHIFT)
+
+#define MAS0_TLBSEL_SHIFT 28
+#define MAS0_TLBSEL_MASK (3 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB0 (0 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB1 (1 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB2 (2 << MAS0_TLBSEL_SHIFT)
+#define MAS0_TLBSEL_TLB3 (3 << MAS0_TLBSEL_SHIFT)
+
+#define MAS0_ATSEL_SHIFT 31
+#define MAS0_ATSEL (1 << MAS0_ATSEL_SHIFT)
+#define MAS0_ATSEL_TLB 0
+#define MAS0_ATSEL_LRAT MAS0_ATSEL
+
+#define MAS1_TSIZE_SHIFT 7
+#define MAS1_TSIZE_MASK (0x1f << MAS1_TSIZE_SHIFT)
+
+#define MAS1_TS_SHIFT 12
+#define MAS1_TS (1 << MAS1_TS_SHIFT)
+
+#define MAS1_IND_SHIFT 13
+#define MAS1_IND (1 << MAS1_IND_SHIFT)
+
+#define MAS1_TID_SHIFT 16
+#define MAS1_TID_MASK (0x3fff << MAS1_TID_SHIFT)
+
+#define MAS1_IPROT_SHIFT 30
+#define MAS1_IPROT (1 << MAS1_IPROT_SHIFT)
+
+#define MAS1_VALID_SHIFT 31
+#define MAS1_VALID 0x80000000
+
+#define MAS2_EPN_SHIFT 12
+#define MAS2_EPN_MASK (~0ULL << MAS2_EPN_SHIFT)
+
+#define MAS2_ACM_SHIFT 6
+#define MAS2_ACM (1 << MAS2_ACM_SHIFT)
+
+#define MAS2_VLE_SHIFT 5
+#define MAS2_VLE (1 << MAS2_VLE_SHIFT)
+
+#define MAS2_W_SHIFT 4
+#define MAS2_W (1 << MAS2_W_SHIFT)
+
+#define MAS2_I_SHIFT 3
+#define MAS2_I (1 << MAS2_I_SHIFT)
+
+#define MAS2_M_SHIFT 2
+#define MAS2_M (1 << MAS2_M_SHIFT)
+
+#define MAS2_G_SHIFT 1
+#define MAS2_G (1 << MAS2_G_SHIFT)
+
+#define MAS2_E_SHIFT 0
+#define MAS2_E (1 << MAS2_E_SHIFT)
+
+#define MAS3_RPN_SHIFT 12
+#define MAS3_RPN_MASK (0xfffff << MAS3_RPN_SHIFT)
+
+#define MAS3_U0 0x00000200
+#define MAS3_U1 0x00000100
+#define MAS3_U2 0x00000080
+#define MAS3_U3 0x00000040
+#define MAS3_UX 0x00000020
+#define MAS3_SX 0x00000010
+#define MAS3_UW 0x00000008
+#define MAS3_SW 0x00000004
+#define MAS3_UR 0x00000002
+#define MAS3_SR 0x00000001
+#define MAS3_SPSIZE_SHIFT 1
+#define MAS3_SPSIZE_MASK (0x3e << MAS3_SPSIZE_SHIFT)
+
+#define MAS4_TLBSELD_SHIFT MAS0_TLBSEL_SHIFT
+#define MAS4_TLBSELD_MASK MAS0_TLBSEL_MASK
+#define MAS4_TIDSELD_MASK 0x00030000
+#define MAS4_TIDSELD_PID0 0x00000000
+#define MAS4_TIDSELD_PID1 0x00010000
+#define MAS4_TIDSELD_PID2 0x00020000
+#define MAS4_TIDSELD_PIDZ 0x00030000
+#define MAS4_INDD 0x00008000 /* Default IND */
+#define MAS4_TSIZED_SHIFT MAS1_TSIZE_SHIFT
+#define MAS4_TSIZED_MASK MAS1_TSIZE_MASK
+#define MAS4_ACMD 0x00000040
+#define MAS4_VLED 0x00000020
+#define MAS4_WD 0x00000010
+#define MAS4_ID 0x00000008
+#define MAS4_MD 0x00000004
+#define MAS4_GD 0x00000002
+#define MAS4_ED 0x00000001
+#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */
+#define MAS4_WIMGED_SHIFT 0
+
+#define MAS5_SGS 0x80000000
+#define MAS5_SLPID_MASK 0x00000fff
+
+#define MAS6_SPID0 0x3fff0000
+#define MAS6_SPID1 0x00007ffe
+#define MAS6_ISIZE(x) MAS1_TSIZE(x)
+#define MAS6_SAS 0x00000001
+#define MAS6_SPID MAS6_SPID0
+#define MAS6_SIND 0x00000002 /* Indirect page */
+#define MAS6_SIND_SHIFT 1
+#define MAS6_SPID_MASK 0x3fff0000
+#define MAS6_SPID_SHIFT 16
+#define MAS6_ISIZE_MASK 0x00000f80
+#define MAS6_ISIZE_SHIFT 7
+
+#define MAS7_RPN 0xffffffff
+
+#define MAS8_TGS 0x80000000
+#define MAS8_VF 0x40000000
+#define MAS8_TLBPID 0x00000fff
+
+/* Bit definitions for MMUCFG */
+#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
+#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
+#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */
+#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */
+#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */
+#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */
+#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */
+#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */
+#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */
+
+/* Bit definitions for MMUCSR0 */
+#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
+#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
+#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
+#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
+#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
+ MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
+#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */
+#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */
+#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */
+#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */
+
+/* TLBnCFG encoding */
+#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */
+#define TLBnCFG_HES 0x00002000 /* HW select supported */
+#define TLBnCFG_AVAIL 0x00004000 /* variable page size */
+#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */
+#define TLBnCFG_GTWE 0x00010000 /* Guest can write */
+#define TLBnCFG_IND 0x00020000 /* IND entries supported */
+#define TLBnCFG_PT 0x00040000 /* Can load from page table */
+#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */
+#define TLBnCFG_MINSIZE_SHIFT 20
+#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
+#define TLBnCFG_MAXSIZE_SHIFT 16
+#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
+#define TLBnCFG_ASSOC_SHIFT 24
+
+/* TLBnPS encoding */
+#define TLBnPS_4K 0x00000004
+#define TLBnPS_8K 0x00000008
+#define TLBnPS_16K 0x00000010
+#define TLBnPS_32K 0x00000020
+#define TLBnPS_64K 0x00000040
+#define TLBnPS_128K 0x00000080
+#define TLBnPS_256K 0x00000100
+#define TLBnPS_512K 0x00000200
+#define TLBnPS_1M 0x00000400
+#define TLBnPS_2M 0x00000800
+#define TLBnPS_4M 0x00001000
+#define TLBnPS_8M 0x00002000
+#define TLBnPS_16M 0x00004000
+#define TLBnPS_32M 0x00008000
+#define TLBnPS_64M 0x00010000
+#define TLBnPS_128M 0x00020000
+#define TLBnPS_256M 0x00040000
+#define TLBnPS_512M 0x00080000
+#define TLBnPS_1G 0x00100000
+#define TLBnPS_2G 0x00200000
+#define TLBnPS_4G 0x00400000
+#define TLBnPS_8G 0x00800000
+#define TLBnPS_16G 0x01000000
+#define TLBnPS_32G 0x02000000
+#define TLBnPS_64G 0x04000000
+#define TLBnPS_128G 0x08000000
+#define TLBnPS_256G 0x10000000
+
+/* tlbilx action encoding */
+#define TLBILX_T_ALL 0
+#define TLBILX_T_TID 1
+#define TLBILX_T_FULLMATCH 3
+#define TLBILX_T_CLASS0 4
+#define TLBILX_T_CLASS1 5
+#define TLBILX_T_CLASS2 6
+#define TLBILX_T_CLASS3 7
+
+/* BookE 2.06 helper defines */
+
+#define BOOKE206_FLUSH_TLB0 (1 << 0)
+#define BOOKE206_FLUSH_TLB1 (1 << 1)
+#define BOOKE206_FLUSH_TLB2 (1 << 2)
+#define BOOKE206_FLUSH_TLB3 (1 << 3)
+
+/* number of possible TLBs */
+#define BOOKE206_MAX_TLBN 4
+
+/*****************************************************************************/
+/* Embedded.Processor Control */
+
+#define DBELL_TYPE_SHIFT 27
+#define DBELL_TYPE_MASK (0x1f << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_DBELL (0x00 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_DBELL_CRIT (0x01 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_G_DBELL (0x02 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_G_DBELL_CRIT (0x03 << DBELL_TYPE_SHIFT)
+#define DBELL_TYPE_G_DBELL_MC (0x04 << DBELL_TYPE_SHIFT)
+
+#define DBELL_BRDCAST (1 << 26)
+#define DBELL_LPIDTAG_SHIFT 14
+#define DBELL_LPIDTAG_MASK (0xfff << DBELL_LPIDTAG_SHIFT)
+#define DBELL_PIRTAG_MASK 0x3fff
+
+/*****************************************************************************/
+/* Segment page size information, used by recent hash MMUs
+ * The format of this structure mirrors kvm_ppc_smmu_info
+ */
+
+#define PPC_PAGE_SIZES_MAX_SZ 8
+
+struct ppc_one_page_size {
+ uint32_t page_shift; /* Page shift (or 0) */
+ uint32_t pte_enc; /* Encoding in the HPTE (>>12) */
+};
+
+struct ppc_one_seg_page_size {
+ uint32_t page_shift; /* Base page shift of segment (or 0) */
+ uint32_t slb_enc; /* SLB encoding for BookS */
+ struct ppc_one_page_size enc[PPC_PAGE_SIZES_MAX_SZ];
+};
+
+struct ppc_segment_page_sizes {
+ struct ppc_one_seg_page_size sps[PPC_PAGE_SIZES_MAX_SZ];
+};
+
+
+/*****************************************************************************/
+/* The whole PowerPC CPU context */
+#define NB_MMU_MODES 8
+
+#define PPC_CPU_OPCODES_LEN 0x40
+#define PPC_CPU_INDIRECT_OPCODES_LEN 0x20
+
+struct CPUPPCState {
+ /* First are the most commonly used resources
+ * during translated code execution
+ */
+ /* general purpose registers */
+ target_ulong gpr[32];
+ /* Storage for GPR MSB, used by the SPE extension */
+ target_ulong gprh[32];
+ /* LR */
+ target_ulong lr;
+ /* CTR */
+ target_ulong ctr;
+ /* condition register */
+ uint32_t crf[8];
+#if defined(TARGET_PPC64)
+ /* CFAR */
+ target_ulong cfar;
+#endif
+ /* XER (with SO, OV, CA split out) */
+ target_ulong xer;
+ target_ulong so;
+ target_ulong ov;
+ target_ulong ca;
+ /* Reservation address */
+ target_ulong reserve_addr;
+ /* Reservation value */
+ target_ulong reserve_val;
+ target_ulong reserve_val2;
+ /* Reservation store address */
+ target_ulong reserve_ea;
+ /* Reserved store source register and size */
+ target_ulong reserve_info;
+
+ /* Those ones are used in supervisor mode only */
+ /* machine state register */
+ target_ulong msr;
+ /* temporary general purpose registers */
+ target_ulong tgpr[4]; /* Used to speed-up TLB assist handlers */
+
+ /* Floating point execution context */
+ float_status fp_status;
+ /* floating point registers */
+ float64 fpr[32];
+ /* floating point status and control register */
+ target_ulong fpscr;
+
+ /* Next instruction pointer */
+ target_ulong nip;
+
+ int access_type; /* when a memory exception occurs, the access
+ type is stored here */
+
+ CPU_COMMON
+
+ /* MMU context - only relevant for full system emulation */
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+ /* PowerPC 64 SLB area */
+ ppc_slb_t slb[MAX_SLB_ENTRIES];
+ int32_t slb_nr;
+ /* tcg TLB needs flush (deferred slb inval instruction typically) */
+#endif
+ /* segment registers */
+ hwaddr htab_base;
+ /* mask used to normalize hash value to PTEG index */
+ hwaddr htab_mask;
+ target_ulong sr[32];
+ /* externally stored hash table */
+ uint8_t *external_htab;
+ /* BATs */
+ uint32_t nb_BATs;
+ target_ulong DBAT[2][8];
+ target_ulong IBAT[2][8];
+ /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */
+ int32_t nb_tlb; /* Total number of TLB */
+ int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */
+ int nb_ways; /* Number of ways in the TLB set */
+ int last_way; /* Last used way used to allocate TLB in a LRU way */
+ int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */
+ int nb_pids; /* Number of available PID registers */
+ int tlb_type; /* Type of TLB we're dealing with */
+ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */
+ /* 403 dedicated access protection registers */
+ target_ulong pb[4];
+ bool tlb_dirty; /* Set to non-zero when modifying TLB */
+ bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */
+ uint32_t tlb_need_flush; /* Delayed flush needed */
+#define TLB_NEED_LOCAL_FLUSH 0x1
+#define TLB_NEED_GLOBAL_FLUSH 0x2
+#endif
+
+ /* Other registers */
+ /* Special purpose registers */
+ target_ulong spr[1024];
+ ppc_spr_t spr_cb[1024];
+ /* Altivec registers */
+ ppc_avr_t avr[32];
+ uint32_t vscr;
+ /* VSX registers */
+ uint64_t vsr[32];
+ /* SPE registers */
+ uint64_t spe_acc;
+ uint32_t spe_fscr;
+ /* SPE and Altivec can share a status since they will never be used
+ * simultaneously */
+ float_status vec_status;
+
+ /* Internal devices resources */
+ /* Time base and decrementer */
+ ppc_tb_t *tb_env;
+ /* Device control registers */
+ ppc_dcr_t *dcr_env;
+
+ int dcache_line_size;
+ int icache_line_size;
+
+ /* Those resources are used during exception processing */
+ /* CPU model definition */
+ target_ulong msr_mask;
+ powerpc_mmu_t mmu_model;
+ powerpc_excp_t excp_model;
+ powerpc_input_t bus_model;
+ int bfd_mach;
+ uint32_t flags;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+#if defined(TARGET_PPC64)
+ struct ppc_segment_page_sizes sps;
+ ppc_slb_t vrma_slb;
+ target_ulong rmls;
+ bool ci_large_pages;
+#endif
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+ uint64_t vpa_addr;
+ uint64_t slb_shadow_addr, slb_shadow_size;
+ uint64_t dtl_addr, dtl_size;
+#endif /* TARGET_PPC64 */
+
+ int error_code;
+ uint32_t pending_interrupts;
+#if !defined(CONFIG_USER_ONLY)
+ /* This is the IRQ controller, which is implementation dependent
+ * and only relevant when emulating a complete machine.
+ */
+ uint32_t irq_input_state;
+ void **irq_inputs;
+ /* Exception vectors */
+ target_ulong excp_vectors[POWERPC_EXCP_NB];
+ target_ulong excp_prefix;
+ target_ulong ivor_mask;
+ target_ulong ivpr_mask;
+ target_ulong hreset_vector;
+ hwaddr mpic_iack;
+ /* true when the external proxy facility mode is enabled */
+ bool mpic_proxy;
+ /* set when the processor has an HV mode, thus HV priv
+ * instructions and SPRs are diallowed if MSR:HV is 0
+ */
+ bool has_hv_mode;
+ /* On P7/P8, set when in PM state, we need to handle resume
+ * in a special way (such as routing some resume causes to
+ * 0x100), so flag this here.
+ */
+ bool in_pm_state;
+#endif
+
+ /* Those resources are used only during code translation */
+ /* opcode handlers */
+ opc_handler_t *opcodes[PPC_CPU_OPCODES_LEN];
+
+ /* Those resources are used only in QEMU core */
+ target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */
+ target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */
+ int immu_idx; /* precomputed MMU index to speed up insn access */
+ int dmmu_idx; /* precomputed MMU index to speed up data accesses */
+
+ /* Power management */
+ int (*check_pow)(CPUPPCState *env);
+
+#if !defined(CONFIG_USER_ONLY)
+ void *load_info; /* Holds boot loading state. */
+#endif
+
+ /* booke timers */
+
+ /* Specifies bit locations of the Time Base used to signal a fixed timer
+ * exception on a transition from 0 to 1. (watchdog or fixed-interval timer)
+ *
+ * 0 selects the least significant bit.
+ * 63 selects the most significant bit.
+ */
+ uint8_t fit_period[4];
+ uint8_t wdt_period[4];
+
+ /* Transactional memory state */
+ target_ulong tm_gpr[32];
+ ppc_avr_t tm_vsr[64];
+ uint64_t tm_cr;
+ uint64_t tm_lr;
+ uint64_t tm_ctr;
+ uint64_t tm_fpscr;
+ uint64_t tm_amr;
+ uint64_t tm_ppr;
+ uint64_t tm_vrsave;
+ uint32_t tm_vscr;
+ uint64_t tm_dscr;
+ uint64_t tm_tar;
+};
+
+#define SET_FIT_PERIOD(a_, b_, c_, d_) \
+do { \
+ env->fit_period[0] = (a_); \
+ env->fit_period[1] = (b_); \
+ env->fit_period[2] = (c_); \
+ env->fit_period[3] = (d_); \
+ } while (0)
+
+#define SET_WDT_PERIOD(a_, b_, c_, d_) \
+do { \
+ env->wdt_period[0] = (a_); \
+ env->wdt_period[1] = (b_); \
+ env->wdt_period[2] = (c_); \
+ env->wdt_period[3] = (d_); \
+ } while (0)
+
+/**
+ * PowerPCCPU:
+ * @env: #CPUPPCState
+ * @cpu_dt_id: CPU index used in the device tree. KVM uses this index too
+ * @max_compat: Maximal supported logical PVR from the command line
+ * @cpu_version: Current logical PVR, zero if in "raw" mode
+ *
+ * A PowerPC CPU.
+ */
+struct PowerPCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUPPCState env;
+ int cpu_dt_id;
+ uint32_t max_compat;
+ uint32_t cpu_version;
+
+ /* Fields related to migration compatibility hacks */
+ bool pre_2_8_migration;
+ target_ulong mig_msr_mask;
+ uint64_t mig_insns_flags;
+ uint64_t mig_insns_flags2;
+ uint32_t mig_nb_BATs;
+};
+
+static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env)
+{
+ return container_of(env, PowerPCCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(ppc_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(PowerPCCPU, env)
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr);
+PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr);
+
+void ppc_cpu_do_interrupt(CPUState *cpu);
+bool ppc_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void ppc_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+void ppc_cpu_dump_statistics(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr ppc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int ppc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int ppc_cpu_gdb_read_register_apple(CPUState *cpu, uint8_t *buf, int reg);
+int ppc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+int ppc_cpu_gdb_write_register_apple(CPUState *cpu, uint8_t *buf, int reg);
+int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+#ifndef CONFIG_USER_ONLY
+void ppc_cpu_do_system_reset(CPUState *cs);
+extern const struct VMStateDescription vmstate_ppc_cpu;
+#endif
+
+/*****************************************************************************/
+PowerPCCPU *cpu_ppc_init(const char *cpu_model);
+void ppc_translate_init(void);
+const char *ppc_cpu_lookup_alias(const char *alias);
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_ppc_signal_handler (int host_signum, void *pinfo,
+ void *puc);
+#if defined(CONFIG_USER_ONLY)
+int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+void ppc_store_sdr1 (CPUPPCState *env, target_ulong value);
+#endif /* !defined(CONFIG_USER_ONLY) */
+void ppc_store_msr (CPUPPCState *env, target_ulong value);
+
+void ppc_cpu_list (FILE *f, fprintf_function cpu_fprintf);
+int ppc_get_compat_smt_threads(PowerPCCPU *cpu);
+#if defined(TARGET_PPC64)
+void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp);
+#endif
+
+/* Time-base and decrementer management */
+#ifndef NO_CPU_IO_DEFS
+uint64_t cpu_ppc_load_tbl (CPUPPCState *env);
+uint32_t cpu_ppc_load_tbu (CPUPPCState *env);
+void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_atbl (CPUPPCState *env);
+uint32_t cpu_ppc_load_atbu (CPUPPCState *env);
+void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value);
+void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value);
+bool ppc_decr_clear_on_delivery(CPUPPCState *env);
+uint32_t cpu_ppc_load_decr (CPUPPCState *env);
+void cpu_ppc_store_decr (CPUPPCState *env, uint32_t value);
+uint32_t cpu_ppc_load_hdecr (CPUPPCState *env);
+void cpu_ppc_store_hdecr (CPUPPCState *env, uint32_t value);
+uint64_t cpu_ppc_load_purr (CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env);
+uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env);
+#if !defined(CONFIG_USER_ONLY)
+void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value);
+void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value);
+target_ulong load_40x_pit (CPUPPCState *env);
+void store_40x_pit (CPUPPCState *env, target_ulong val);
+void store_40x_dbcr0 (CPUPPCState *env, uint32_t val);
+void store_40x_sler (CPUPPCState *env, uint32_t val);
+void store_booke_tcr (CPUPPCState *env, target_ulong val);
+void store_booke_tsr (CPUPPCState *env, target_ulong val);
+void ppc_tlb_invalidate_all (CPUPPCState *env);
+void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr);
+void cpu_ppc_set_papr(PowerPCCPU *cpu);
+#endif
+#endif
+
+void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask);
+
+static inline uint64_t ppc_dump_gpr(CPUPPCState *env, int gprn)
+{
+ uint64_t gprv;
+
+ gprv = env->gpr[gprn];
+ if (env->flags & POWERPC_FLAG_SPE) {
+ /* If the CPU implements the SPE extension, we have to get the
+ * high bits of the GPR from the gprh storage area
+ */
+ gprv &= 0xFFFFFFFFULL;
+ gprv |= (uint64_t)env->gprh[gprn] << 32;
+ }
+
+ return gprv;
+}
+
+/* Device control registers */
+int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp);
+int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
+
+#define cpu_init(cpu_model) CPU(cpu_ppc_init(cpu_model))
+
+#define cpu_signal_handler cpu_ppc_signal_handler
+#define cpu_list ppc_cpu_list
+
+/* MMU modes definitions */
+#define MMU_USER_IDX 0
+static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
+{
+ return ifetch ? env->immu_idx : env->dmmu_idx;
+}
+
+#include "exec/cpu-all.h"
+
+/*****************************************************************************/
+/* CRF definitions */
+#define CRF_LT 3
+#define CRF_GT 2
+#define CRF_EQ 1
+#define CRF_SO 0
+#define CRF_CH (1 << CRF_LT)
+#define CRF_CL (1 << CRF_GT)
+#define CRF_CH_OR_CL (1 << CRF_EQ)
+#define CRF_CH_AND_CL (1 << CRF_SO)
+
+/* XER definitions */
+#define XER_SO 31
+#define XER_OV 30
+#define XER_CA 29
+#define XER_CMP 8
+#define XER_BC 0
+#define xer_so (env->so)
+#define xer_ov (env->ov)
+#define xer_ca (env->ca)
+#define xer_cmp ((env->xer >> XER_CMP) & 0xFF)
+#define xer_bc ((env->xer >> XER_BC) & 0x7F)
+
+/* SPR definitions */
+#define SPR_MQ (0x000)
+#define SPR_XER (0x001)
+#define SPR_601_VRTCU (0x004)
+#define SPR_601_VRTCL (0x005)
+#define SPR_601_UDECR (0x006)
+#define SPR_LR (0x008)
+#define SPR_CTR (0x009)
+#define SPR_UAMR (0x00C)
+#define SPR_DSCR (0x011)
+#define SPR_DSISR (0x012)
+#define SPR_DAR (0x013) /* DAE for PowerPC 601 */
+#define SPR_601_RTCU (0x014)
+#define SPR_601_RTCL (0x015)
+#define SPR_DECR (0x016)
+#define SPR_SDR1 (0x019)
+#define SPR_SRR0 (0x01A)
+#define SPR_SRR1 (0x01B)
+#define SPR_CFAR (0x01C)
+#define SPR_AMR (0x01D)
+#define SPR_ACOP (0x01F)
+#define SPR_BOOKE_PID (0x030)
+#define SPR_BOOKS_PID (0x030)
+#define SPR_BOOKE_DECAR (0x036)
+#define SPR_BOOKE_CSRR0 (0x03A)
+#define SPR_BOOKE_CSRR1 (0x03B)
+#define SPR_BOOKE_DEAR (0x03D)
+#define SPR_IAMR (0x03D)
+#define SPR_BOOKE_ESR (0x03E)
+#define SPR_BOOKE_IVPR (0x03F)
+#define SPR_MPC_EIE (0x050)
+#define SPR_MPC_EID (0x051)
+#define SPR_MPC_NRI (0x052)
+#define SPR_TFHAR (0x080)
+#define SPR_TFIAR (0x081)
+#define SPR_TEXASR (0x082)
+#define SPR_TEXASRU (0x083)
+#define SPR_UCTRL (0x088)
+#define SPR_MPC_CMPA (0x090)
+#define SPR_MPC_CMPB (0x091)
+#define SPR_MPC_CMPC (0x092)
+#define SPR_MPC_CMPD (0x093)
+#define SPR_MPC_ECR (0x094)
+#define SPR_MPC_DER (0x095)
+#define SPR_MPC_COUNTA (0x096)
+#define SPR_MPC_COUNTB (0x097)
+#define SPR_CTRL (0x098)
+#define SPR_MPC_CMPE (0x098)
+#define SPR_MPC_CMPF (0x099)
+#define SPR_FSCR (0x099)
+#define SPR_MPC_CMPG (0x09A)
+#define SPR_MPC_CMPH (0x09B)
+#define SPR_MPC_LCTRL1 (0x09C)
+#define SPR_MPC_LCTRL2 (0x09D)
+#define SPR_UAMOR (0x09D)
+#define SPR_MPC_ICTRL (0x09E)
+#define SPR_MPC_BAR (0x09F)
+#define SPR_PSPB (0x09F)
+#define SPR_DAWR (0x0B4)
+#define SPR_RPR (0x0BA)
+#define SPR_CIABR (0x0BB)
+#define SPR_DAWRX (0x0BC)
+#define SPR_HFSCR (0x0BE)
+#define SPR_VRSAVE (0x100)
+#define SPR_USPRG0 (0x100)
+#define SPR_USPRG1 (0x101)
+#define SPR_USPRG2 (0x102)
+#define SPR_USPRG3 (0x103)
+#define SPR_USPRG4 (0x104)
+#define SPR_USPRG5 (0x105)
+#define SPR_USPRG6 (0x106)
+#define SPR_USPRG7 (0x107)
+#define SPR_VTBL (0x10C)
+#define SPR_VTBU (0x10D)
+#define SPR_SPRG0 (0x110)
+#define SPR_SPRG1 (0x111)
+#define SPR_SPRG2 (0x112)
+#define SPR_SPRG3 (0x113)
+#define SPR_SPRG4 (0x114)
+#define SPR_SCOMC (0x114)
+#define SPR_SPRG5 (0x115)
+#define SPR_SCOMD (0x115)
+#define SPR_SPRG6 (0x116)
+#define SPR_SPRG7 (0x117)
+#define SPR_ASR (0x118)
+#define SPR_EAR (0x11A)
+#define SPR_TBL (0x11C)
+#define SPR_TBU (0x11D)
+#define SPR_TBU40 (0x11E)
+#define SPR_SVR (0x11E)
+#define SPR_BOOKE_PIR (0x11E)
+#define SPR_PVR (0x11F)
+#define SPR_HSPRG0 (0x130)
+#define SPR_BOOKE_DBSR (0x130)
+#define SPR_HSPRG1 (0x131)
+#define SPR_HDSISR (0x132)
+#define SPR_HDAR (0x133)
+#define SPR_BOOKE_EPCR (0x133)
+#define SPR_SPURR (0x134)
+#define SPR_BOOKE_DBCR0 (0x134)
+#define SPR_IBCR (0x135)
+#define SPR_PURR (0x135)
+#define SPR_BOOKE_DBCR1 (0x135)
+#define SPR_DBCR (0x136)
+#define SPR_HDEC (0x136)
+#define SPR_BOOKE_DBCR2 (0x136)
+#define SPR_HIOR (0x137)
+#define SPR_MBAR (0x137)
+#define SPR_RMOR (0x138)
+#define SPR_BOOKE_IAC1 (0x138)
+#define SPR_HRMOR (0x139)
+#define SPR_BOOKE_IAC2 (0x139)
+#define SPR_HSRR0 (0x13A)
+#define SPR_BOOKE_IAC3 (0x13A)
+#define SPR_HSRR1 (0x13B)
+#define SPR_BOOKE_IAC4 (0x13B)
+#define SPR_BOOKE_DAC1 (0x13C)
+#define SPR_MMCRH (0x13C)
+#define SPR_DABR2 (0x13D)
+#define SPR_BOOKE_DAC2 (0x13D)
+#define SPR_TFMR (0x13D)
+#define SPR_BOOKE_DVC1 (0x13E)
+#define SPR_LPCR (0x13E)
+#define SPR_BOOKE_DVC2 (0x13F)
+#define SPR_LPIDR (0x13F)
+#define SPR_BOOKE_TSR (0x150)
+#define SPR_HMER (0x150)
+#define SPR_HMEER (0x151)
+#define SPR_PCR (0x152)
+#define SPR_BOOKE_LPIDR (0x152)
+#define SPR_BOOKE_TCR (0x154)
+#define SPR_BOOKE_TLB0PS (0x158)
+#define SPR_BOOKE_TLB1PS (0x159)
+#define SPR_BOOKE_TLB2PS (0x15A)
+#define SPR_BOOKE_TLB3PS (0x15B)
+#define SPR_AMOR (0x15D)
+#define SPR_BOOKE_MAS7_MAS3 (0x174)
+#define SPR_BOOKE_IVOR0 (0x190)
+#define SPR_BOOKE_IVOR1 (0x191)
+#define SPR_BOOKE_IVOR2 (0x192)
+#define SPR_BOOKE_IVOR3 (0x193)
+#define SPR_BOOKE_IVOR4 (0x194)
+#define SPR_BOOKE_IVOR5 (0x195)
+#define SPR_BOOKE_IVOR6 (0x196)
+#define SPR_BOOKE_IVOR7 (0x197)
+#define SPR_BOOKE_IVOR8 (0x198)
+#define SPR_BOOKE_IVOR9 (0x199)
+#define SPR_BOOKE_IVOR10 (0x19A)
+#define SPR_BOOKE_IVOR11 (0x19B)
+#define SPR_BOOKE_IVOR12 (0x19C)
+#define SPR_BOOKE_IVOR13 (0x19D)
+#define SPR_BOOKE_IVOR14 (0x19E)
+#define SPR_BOOKE_IVOR15 (0x19F)
+#define SPR_BOOKE_IVOR38 (0x1B0)
+#define SPR_BOOKE_IVOR39 (0x1B1)
+#define SPR_BOOKE_IVOR40 (0x1B2)
+#define SPR_BOOKE_IVOR41 (0x1B3)
+#define SPR_BOOKE_IVOR42 (0x1B4)
+#define SPR_BOOKE_GIVOR2 (0x1B8)
+#define SPR_BOOKE_GIVOR3 (0x1B9)
+#define SPR_BOOKE_GIVOR4 (0x1BA)
+#define SPR_BOOKE_GIVOR8 (0x1BB)
+#define SPR_BOOKE_GIVOR13 (0x1BC)
+#define SPR_BOOKE_GIVOR14 (0x1BD)
+#define SPR_TIR (0x1BE)
+#define SPR_BOOKE_SPEFSCR (0x200)
+#define SPR_Exxx_BBEAR (0x201)
+#define SPR_Exxx_BBTAR (0x202)
+#define SPR_Exxx_L1CFG0 (0x203)
+#define SPR_Exxx_L1CFG1 (0x204)
+#define SPR_Exxx_NPIDR (0x205)
+#define SPR_ATBL (0x20E)
+#define SPR_ATBU (0x20F)
+#define SPR_IBAT0U (0x210)
+#define SPR_BOOKE_IVOR32 (0x210)
+#define SPR_RCPU_MI_GRA (0x210)
+#define SPR_IBAT0L (0x211)
+#define SPR_BOOKE_IVOR33 (0x211)
+#define SPR_IBAT1U (0x212)
+#define SPR_BOOKE_IVOR34 (0x212)
+#define SPR_IBAT1L (0x213)
+#define SPR_BOOKE_IVOR35 (0x213)
+#define SPR_IBAT2U (0x214)
+#define SPR_BOOKE_IVOR36 (0x214)
+#define SPR_IBAT2L (0x215)
+#define SPR_BOOKE_IVOR37 (0x215)
+#define SPR_IBAT3U (0x216)
+#define SPR_IBAT3L (0x217)
+#define SPR_DBAT0U (0x218)
+#define SPR_RCPU_L2U_GRA (0x218)
+#define SPR_DBAT0L (0x219)
+#define SPR_DBAT1U (0x21A)
+#define SPR_DBAT1L (0x21B)
+#define SPR_DBAT2U (0x21C)
+#define SPR_DBAT2L (0x21D)
+#define SPR_DBAT3U (0x21E)
+#define SPR_DBAT3L (0x21F)
+#define SPR_IBAT4U (0x230)
+#define SPR_RPCU_BBCMCR (0x230)
+#define SPR_MPC_IC_CST (0x230)
+#define SPR_Exxx_CTXCR (0x230)
+#define SPR_IBAT4L (0x231)
+#define SPR_MPC_IC_ADR (0x231)
+#define SPR_Exxx_DBCR3 (0x231)
+#define SPR_IBAT5U (0x232)
+#define SPR_MPC_IC_DAT (0x232)
+#define SPR_Exxx_DBCNT (0x232)
+#define SPR_IBAT5L (0x233)
+#define SPR_IBAT6U (0x234)
+#define SPR_IBAT6L (0x235)
+#define SPR_IBAT7U (0x236)
+#define SPR_IBAT7L (0x237)
+#define SPR_DBAT4U (0x238)
+#define SPR_RCPU_L2U_MCR (0x238)
+#define SPR_MPC_DC_CST (0x238)
+#define SPR_Exxx_ALTCTXCR (0x238)
+#define SPR_DBAT4L (0x239)
+#define SPR_MPC_DC_ADR (0x239)
+#define SPR_DBAT5U (0x23A)
+#define SPR_BOOKE_MCSRR0 (0x23A)
+#define SPR_MPC_DC_DAT (0x23A)
+#define SPR_DBAT5L (0x23B)
+#define SPR_BOOKE_MCSRR1 (0x23B)
+#define SPR_DBAT6U (0x23C)
+#define SPR_BOOKE_MCSR (0x23C)
+#define SPR_DBAT6L (0x23D)
+#define SPR_Exxx_MCAR (0x23D)
+#define SPR_DBAT7U (0x23E)
+#define SPR_BOOKE_DSRR0 (0x23E)
+#define SPR_DBAT7L (0x23F)
+#define SPR_BOOKE_DSRR1 (0x23F)
+#define SPR_BOOKE_SPRG8 (0x25C)
+#define SPR_BOOKE_SPRG9 (0x25D)
+#define SPR_BOOKE_MAS0 (0x270)
+#define SPR_BOOKE_MAS1 (0x271)
+#define SPR_BOOKE_MAS2 (0x272)
+#define SPR_BOOKE_MAS3 (0x273)
+#define SPR_BOOKE_MAS4 (0x274)
+#define SPR_BOOKE_MAS5 (0x275)
+#define SPR_BOOKE_MAS6 (0x276)
+#define SPR_BOOKE_PID1 (0x279)
+#define SPR_BOOKE_PID2 (0x27A)
+#define SPR_MPC_DPDR (0x280)
+#define SPR_MPC_IMMR (0x288)
+#define SPR_BOOKE_TLB0CFG (0x2B0)
+#define SPR_BOOKE_TLB1CFG (0x2B1)
+#define SPR_BOOKE_TLB2CFG (0x2B2)
+#define SPR_BOOKE_TLB3CFG (0x2B3)
+#define SPR_BOOKE_EPR (0x2BE)
+#define SPR_PERF0 (0x300)
+#define SPR_RCPU_MI_RBA0 (0x300)
+#define SPR_MPC_MI_CTR (0x300)
+#define SPR_POWER_USIER (0x300)
+#define SPR_PERF1 (0x301)
+#define SPR_RCPU_MI_RBA1 (0x301)
+#define SPR_POWER_UMMCR2 (0x301)
+#define SPR_PERF2 (0x302)
+#define SPR_RCPU_MI_RBA2 (0x302)
+#define SPR_MPC_MI_AP (0x302)
+#define SPR_POWER_UMMCRA (0x302)
+#define SPR_PERF3 (0x303)
+#define SPR_RCPU_MI_RBA3 (0x303)
+#define SPR_MPC_MI_EPN (0x303)
+#define SPR_POWER_UPMC1 (0x303)
+#define SPR_PERF4 (0x304)
+#define SPR_POWER_UPMC2 (0x304)
+#define SPR_PERF5 (0x305)
+#define SPR_MPC_MI_TWC (0x305)
+#define SPR_POWER_UPMC3 (0x305)
+#define SPR_PERF6 (0x306)
+#define SPR_MPC_MI_RPN (0x306)
+#define SPR_POWER_UPMC4 (0x306)
+#define SPR_PERF7 (0x307)
+#define SPR_POWER_UPMC5 (0x307)
+#define SPR_PERF8 (0x308)
+#define SPR_RCPU_L2U_RBA0 (0x308)
+#define SPR_MPC_MD_CTR (0x308)
+#define SPR_POWER_UPMC6 (0x308)
+#define SPR_PERF9 (0x309)
+#define SPR_RCPU_L2U_RBA1 (0x309)
+#define SPR_MPC_MD_CASID (0x309)
+#define SPR_970_UPMC7 (0X309)
+#define SPR_PERFA (0x30A)
+#define SPR_RCPU_L2U_RBA2 (0x30A)
+#define SPR_MPC_MD_AP (0x30A)
+#define SPR_970_UPMC8 (0X30A)
+#define SPR_PERFB (0x30B)
+#define SPR_RCPU_L2U_RBA3 (0x30B)
+#define SPR_MPC_MD_EPN (0x30B)
+#define SPR_POWER_UMMCR0 (0X30B)
+#define SPR_PERFC (0x30C)
+#define SPR_MPC_MD_TWB (0x30C)
+#define SPR_POWER_USIAR (0X30C)
+#define SPR_PERFD (0x30D)
+#define SPR_MPC_MD_TWC (0x30D)
+#define SPR_POWER_USDAR (0X30D)
+#define SPR_PERFE (0x30E)
+#define SPR_MPC_MD_RPN (0x30E)
+#define SPR_POWER_UMMCR1 (0X30E)
+#define SPR_PERFF (0x30F)
+#define SPR_MPC_MD_TW (0x30F)
+#define SPR_UPERF0 (0x310)
+#define SPR_POWER_SIER (0x310)
+#define SPR_UPERF1 (0x311)
+#define SPR_POWER_MMCR2 (0x311)
+#define SPR_UPERF2 (0x312)
+#define SPR_POWER_MMCRA (0X312)
+#define SPR_UPERF3 (0x313)
+#define SPR_POWER_PMC1 (0X313)
+#define SPR_UPERF4 (0x314)
+#define SPR_POWER_PMC2 (0X314)
+#define SPR_UPERF5 (0x315)
+#define SPR_POWER_PMC3 (0X315)
+#define SPR_UPERF6 (0x316)
+#define SPR_POWER_PMC4 (0X316)
+#define SPR_UPERF7 (0x317)
+#define SPR_POWER_PMC5 (0X317)
+#define SPR_UPERF8 (0x318)
+#define SPR_POWER_PMC6 (0X318)
+#define SPR_UPERF9 (0x319)
+#define SPR_970_PMC7 (0X319)
+#define SPR_UPERFA (0x31A)
+#define SPR_970_PMC8 (0X31A)
+#define SPR_UPERFB (0x31B)
+#define SPR_POWER_MMCR0 (0X31B)
+#define SPR_UPERFC (0x31C)
+#define SPR_POWER_SIAR (0X31C)
+#define SPR_UPERFD (0x31D)
+#define SPR_POWER_SDAR (0X31D)
+#define SPR_UPERFE (0x31E)
+#define SPR_POWER_MMCR1 (0X31E)
+#define SPR_UPERFF (0x31F)
+#define SPR_RCPU_MI_RA0 (0x320)
+#define SPR_MPC_MI_DBCAM (0x320)
+#define SPR_BESCRS (0x320)
+#define SPR_RCPU_MI_RA1 (0x321)
+#define SPR_MPC_MI_DBRAM0 (0x321)
+#define SPR_BESCRSU (0x321)
+#define SPR_RCPU_MI_RA2 (0x322)
+#define SPR_MPC_MI_DBRAM1 (0x322)
+#define SPR_BESCRR (0x322)
+#define SPR_RCPU_MI_RA3 (0x323)
+#define SPR_BESCRRU (0x323)
+#define SPR_EBBHR (0x324)
+#define SPR_EBBRR (0x325)
+#define SPR_BESCR (0x326)
+#define SPR_RCPU_L2U_RA0 (0x328)
+#define SPR_MPC_MD_DBCAM (0x328)
+#define SPR_RCPU_L2U_RA1 (0x329)
+#define SPR_MPC_MD_DBRAM0 (0x329)
+#define SPR_RCPU_L2U_RA2 (0x32A)
+#define SPR_MPC_MD_DBRAM1 (0x32A)
+#define SPR_RCPU_L2U_RA3 (0x32B)
+#define SPR_TAR (0x32F)
+#define SPR_IC (0x350)
+#define SPR_VTB (0x351)
+#define SPR_MMCRC (0x353)
+#define SPR_440_INV0 (0x370)
+#define SPR_440_INV1 (0x371)
+#define SPR_440_INV2 (0x372)
+#define SPR_440_INV3 (0x373)
+#define SPR_440_ITV0 (0x374)
+#define SPR_440_ITV1 (0x375)
+#define SPR_440_ITV2 (0x376)
+#define SPR_440_ITV3 (0x377)
+#define SPR_440_CCR1 (0x378)
+#define SPR_TACR (0x378)
+#define SPR_TCSCR (0x379)
+#define SPR_CSIGR (0x37a)
+#define SPR_DCRIPR (0x37B)
+#define SPR_POWER_SPMC1 (0x37C)
+#define SPR_POWER_SPMC2 (0x37D)
+#define SPR_POWER_MMCRS (0x37E)
+#define SPR_WORT (0x37F)
+#define SPR_PPR (0x380)
+#define SPR_750_GQR0 (0x390)
+#define SPR_440_DNV0 (0x390)
+#define SPR_750_GQR1 (0x391)
+#define SPR_440_DNV1 (0x391)
+#define SPR_750_GQR2 (0x392)
+#define SPR_440_DNV2 (0x392)
+#define SPR_750_GQR3 (0x393)
+#define SPR_440_DNV3 (0x393)
+#define SPR_750_GQR4 (0x394)
+#define SPR_440_DTV0 (0x394)
+#define SPR_750_GQR5 (0x395)
+#define SPR_440_DTV1 (0x395)
+#define SPR_750_GQR6 (0x396)
+#define SPR_440_DTV2 (0x396)
+#define SPR_750_GQR7 (0x397)
+#define SPR_440_DTV3 (0x397)
+#define SPR_750_THRM4 (0x398)
+#define SPR_750CL_HID2 (0x398)
+#define SPR_440_DVLIM (0x398)
+#define SPR_750_WPAR (0x399)
+#define SPR_440_IVLIM (0x399)
+#define SPR_TSCR (0x399)
+#define SPR_750_DMAU (0x39A)
+#define SPR_750_DMAL (0x39B)
+#define SPR_440_RSTCFG (0x39B)
+#define SPR_BOOKE_DCDBTRL (0x39C)
+#define SPR_BOOKE_DCDBTRH (0x39D)
+#define SPR_BOOKE_ICDBTRL (0x39E)
+#define SPR_BOOKE_ICDBTRH (0x39F)
+#define SPR_74XX_UMMCR2 (0x3A0)
+#define SPR_7XX_UPMC5 (0x3A1)
+#define SPR_7XX_UPMC6 (0x3A2)
+#define SPR_UBAMR (0x3A7)
+#define SPR_7XX_UMMCR0 (0x3A8)
+#define SPR_7XX_UPMC1 (0x3A9)
+#define SPR_7XX_UPMC2 (0x3AA)
+#define SPR_7XX_USIAR (0x3AB)
+#define SPR_7XX_UMMCR1 (0x3AC)
+#define SPR_7XX_UPMC3 (0x3AD)
+#define SPR_7XX_UPMC4 (0x3AE)
+#define SPR_USDA (0x3AF)
+#define SPR_40x_ZPR (0x3B0)
+#define SPR_BOOKE_MAS7 (0x3B0)
+#define SPR_74XX_MMCR2 (0x3B0)
+#define SPR_7XX_PMC5 (0x3B1)
+#define SPR_40x_PID (0x3B1)
+#define SPR_7XX_PMC6 (0x3B2)
+#define SPR_440_MMUCR (0x3B2)
+#define SPR_4xx_CCR0 (0x3B3)
+#define SPR_BOOKE_EPLC (0x3B3)
+#define SPR_405_IAC3 (0x3B4)
+#define SPR_BOOKE_EPSC (0x3B4)
+#define SPR_405_IAC4 (0x3B5)
+#define SPR_405_DVC1 (0x3B6)
+#define SPR_405_DVC2 (0x3B7)
+#define SPR_BAMR (0x3B7)
+#define SPR_7XX_MMCR0 (0x3B8)
+#define SPR_7XX_PMC1 (0x3B9)
+#define SPR_40x_SGR (0x3B9)
+#define SPR_7XX_PMC2 (0x3BA)
+#define SPR_40x_DCWR (0x3BA)
+#define SPR_7XX_SIAR (0x3BB)
+#define SPR_405_SLER (0x3BB)
+#define SPR_7XX_MMCR1 (0x3BC)
+#define SPR_405_SU0R (0x3BC)
+#define SPR_401_SKR (0x3BC)
+#define SPR_7XX_PMC3 (0x3BD)
+#define SPR_405_DBCR1 (0x3BD)
+#define SPR_7XX_PMC4 (0x3BE)
+#define SPR_SDA (0x3BF)
+#define SPR_403_VTBL (0x3CC)
+#define SPR_403_VTBU (0x3CD)
+#define SPR_DMISS (0x3D0)
+#define SPR_DCMP (0x3D1)
+#define SPR_HASH1 (0x3D2)
+#define SPR_HASH2 (0x3D3)
+#define SPR_BOOKE_ICDBDR (0x3D3)
+#define SPR_TLBMISS (0x3D4)
+#define SPR_IMISS (0x3D4)
+#define SPR_40x_ESR (0x3D4)
+#define SPR_PTEHI (0x3D5)
+#define SPR_ICMP (0x3D5)
+#define SPR_40x_DEAR (0x3D5)
+#define SPR_PTELO (0x3D6)
+#define SPR_RPA (0x3D6)
+#define SPR_40x_EVPR (0x3D6)
+#define SPR_L3PM (0x3D7)
+#define SPR_403_CDBCR (0x3D7)
+#define SPR_L3ITCR0 (0x3D8)
+#define SPR_TCR (0x3D8)
+#define SPR_40x_TSR (0x3D8)
+#define SPR_IBR (0x3DA)
+#define SPR_40x_TCR (0x3DA)
+#define SPR_ESASRR (0x3DB)
+#define SPR_40x_PIT (0x3DB)
+#define SPR_403_TBL (0x3DC)
+#define SPR_403_TBU (0x3DD)
+#define SPR_SEBR (0x3DE)
+#define SPR_40x_SRR2 (0x3DE)
+#define SPR_SER (0x3DF)
+#define SPR_40x_SRR3 (0x3DF)
+#define SPR_L3OHCR (0x3E8)
+#define SPR_L3ITCR1 (0x3E9)
+#define SPR_L3ITCR2 (0x3EA)
+#define SPR_L3ITCR3 (0x3EB)
+#define SPR_HID0 (0x3F0)
+#define SPR_40x_DBSR (0x3F0)
+#define SPR_HID1 (0x3F1)
+#define SPR_IABR (0x3F2)
+#define SPR_40x_DBCR0 (0x3F2)
+#define SPR_601_HID2 (0x3F2)
+#define SPR_Exxx_L1CSR0 (0x3F2)
+#define SPR_ICTRL (0x3F3)
+#define SPR_HID2 (0x3F3)
+#define SPR_750CL_HID4 (0x3F3)
+#define SPR_Exxx_L1CSR1 (0x3F3)
+#define SPR_440_DBDR (0x3F3)
+#define SPR_LDSTDB (0x3F4)
+#define SPR_750_TDCL (0x3F4)
+#define SPR_40x_IAC1 (0x3F4)
+#define SPR_MMUCSR0 (0x3F4)
+#define SPR_970_HID4 (0x3F4)
+#define SPR_DABR (0x3F5)
+#define DABR_MASK (~(target_ulong)0x7)
+#define SPR_Exxx_BUCSR (0x3F5)
+#define SPR_40x_IAC2 (0x3F5)
+#define SPR_601_HID5 (0x3F5)
+#define SPR_40x_DAC1 (0x3F6)
+#define SPR_MSSCR0 (0x3F6)
+#define SPR_970_HID5 (0x3F6)
+#define SPR_MSSSR0 (0x3F7)
+#define SPR_MSSCR1 (0x3F7)
+#define SPR_DABRX (0x3F7)
+#define SPR_40x_DAC2 (0x3F7)
+#define SPR_MMUCFG (0x3F7)
+#define SPR_LDSTCR (0x3F8)
+#define SPR_L2PMCR (0x3F8)
+#define SPR_750FX_HID2 (0x3F8)
+#define SPR_Exxx_L1FINV0 (0x3F8)
+#define SPR_L2CR (0x3F9)
+#define SPR_L3CR (0x3FA)
+#define SPR_750_TDCH (0x3FA)
+#define SPR_IABR2 (0x3FA)
+#define SPR_40x_DCCR (0x3FA)
+#define SPR_ICTC (0x3FB)
+#define SPR_40x_ICCR (0x3FB)
+#define SPR_THRM1 (0x3FC)
+#define SPR_403_PBL1 (0x3FC)
+#define SPR_SP (0x3FD)
+#define SPR_THRM2 (0x3FD)
+#define SPR_403_PBU1 (0x3FD)
+#define SPR_604_HID13 (0x3FD)
+#define SPR_LT (0x3FE)
+#define SPR_THRM3 (0x3FE)
+#define SPR_RCPU_FPECR (0x3FE)
+#define SPR_403_PBL2 (0x3FE)
+#define SPR_PIR (0x3FF)
+#define SPR_403_PBU2 (0x3FF)
+#define SPR_601_HID15 (0x3FF)
+#define SPR_604_HID15 (0x3FF)
+#define SPR_E500_SVR (0x3FF)
+
+/* Disable MAS Interrupt Updates for Hypervisor */
+#define EPCR_DMIUH (1 << 22)
+/* Disable Guest TLB Management Instructions */
+#define EPCR_DGTMI (1 << 23)
+/* Guest Interrupt Computation Mode */
+#define EPCR_GICM (1 << 24)
+/* Interrupt Computation Mode */
+#define EPCR_ICM (1 << 25)
+/* Disable Embedded Hypervisor Debug */
+#define EPCR_DUVD (1 << 26)
+/* Instruction Storage Interrupt Directed to Guest State */
+#define EPCR_ISIGS (1 << 27)
+/* Data Storage Interrupt Directed to Guest State */
+#define EPCR_DSIGS (1 << 28)
+/* Instruction TLB Error Interrupt Directed to Guest State */
+#define EPCR_ITLBGS (1 << 29)
+/* Data TLB Error Interrupt Directed to Guest State */
+#define EPCR_DTLBGS (1 << 30)
+/* External Input Interrupt Directed to Guest State */
+#define EPCR_EXTGS (1 << 31)
+
+#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */
+#define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */
+#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
+#define L1CSR0_DCE 0x00000001 /* Data Cache Enable */
+
+#define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */
+#define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */
+#define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */
+#define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */
+#define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */
+
+/* HID0 bits */
+#define HID0_DEEPNAP (1 << 24) /* pre-2.06 */
+#define HID0_DOZE (1 << 23) /* pre-2.06 */
+#define HID0_NAP (1 << 22) /* pre-2.06 */
+#define HID0_HILE (1ull << (63 - 19)) /* POWER8 */
+
+/*****************************************************************************/
+/* PowerPC Instructions types definitions */
+enum {
+ PPC_NONE = 0x0000000000000000ULL,
+ /* PowerPC base instructions set */
+ PPC_INSNS_BASE = 0x0000000000000001ULL,
+ /* integer operations instructions */
+#define PPC_INTEGER PPC_INSNS_BASE
+ /* flow control instructions */
+#define PPC_FLOW PPC_INSNS_BASE
+ /* virtual memory instructions */
+#define PPC_MEM PPC_INSNS_BASE
+ /* ld/st with reservation instructions */
+#define PPC_RES PPC_INSNS_BASE
+ /* spr/msr access instructions */
+#define PPC_MISC PPC_INSNS_BASE
+ /* Deprecated instruction sets */
+ /* Original POWER instruction set */
+ PPC_POWER = 0x0000000000000002ULL,
+ /* POWER2 instruction set extension */
+ PPC_POWER2 = 0x0000000000000004ULL,
+ /* Power RTC support */
+ PPC_POWER_RTC = 0x0000000000000008ULL,
+ /* Power-to-PowerPC bridge (601) */
+ PPC_POWER_BR = 0x0000000000000010ULL,
+ /* 64 bits PowerPC instruction set */
+ PPC_64B = 0x0000000000000020ULL,
+ /* New 64 bits extensions (PowerPC 2.0x) */
+ PPC_64BX = 0x0000000000000040ULL,
+ /* 64 bits hypervisor extensions */
+ PPC_64H = 0x0000000000000080ULL,
+ /* New wait instruction (PowerPC 2.0x) */
+ PPC_WAIT = 0x0000000000000100ULL,
+ /* Time base mftb instruction */
+ PPC_MFTB = 0x0000000000000200ULL,
+
+ /* Fixed-point unit extensions */
+ /* PowerPC 602 specific */
+ PPC_602_SPEC = 0x0000000000000400ULL,
+ /* isel instruction */
+ PPC_ISEL = 0x0000000000000800ULL,
+ /* popcntb instruction */
+ PPC_POPCNTB = 0x0000000000001000ULL,
+ /* string load / store */
+ PPC_STRING = 0x0000000000002000ULL,
+ /* real mode cache inhibited load / store */
+ PPC_CILDST = 0x0000000000004000ULL,
+
+ /* Floating-point unit extensions */
+ /* Optional floating point instructions */
+ PPC_FLOAT = 0x0000000000010000ULL,
+ /* New floating-point extensions (PowerPC 2.0x) */
+ PPC_FLOAT_EXT = 0x0000000000020000ULL,
+ PPC_FLOAT_FSQRT = 0x0000000000040000ULL,
+ PPC_FLOAT_FRES = 0x0000000000080000ULL,
+ PPC_FLOAT_FRSQRTE = 0x0000000000100000ULL,
+ PPC_FLOAT_FRSQRTES = 0x0000000000200000ULL,
+ PPC_FLOAT_FSEL = 0x0000000000400000ULL,
+ PPC_FLOAT_STFIWX = 0x0000000000800000ULL,
+
+ /* Vector/SIMD extensions */
+ /* Altivec support */
+ PPC_ALTIVEC = 0x0000000001000000ULL,
+ /* PowerPC 2.03 SPE extension */
+ PPC_SPE = 0x0000000002000000ULL,
+ /* PowerPC 2.03 SPE single-precision floating-point extension */
+ PPC_SPE_SINGLE = 0x0000000004000000ULL,
+ /* PowerPC 2.03 SPE double-precision floating-point extension */
+ PPC_SPE_DOUBLE = 0x0000000008000000ULL,
+
+ /* Optional memory control instructions */
+ PPC_MEM_TLBIA = 0x0000000010000000ULL,
+ PPC_MEM_TLBIE = 0x0000000020000000ULL,
+ PPC_MEM_TLBSYNC = 0x0000000040000000ULL,
+ /* sync instruction */
+ PPC_MEM_SYNC = 0x0000000080000000ULL,
+ /* eieio instruction */
+ PPC_MEM_EIEIO = 0x0000000100000000ULL,
+
+ /* Cache control instructions */
+ PPC_CACHE = 0x0000000200000000ULL,
+ /* icbi instruction */
+ PPC_CACHE_ICBI = 0x0000000400000000ULL,
+ /* dcbz instruction */
+ PPC_CACHE_DCBZ = 0x0000000800000000ULL,
+ /* dcba instruction */
+ PPC_CACHE_DCBA = 0x0000002000000000ULL,
+ /* Freescale cache locking instructions */
+ PPC_CACHE_LOCK = 0x0000004000000000ULL,
+
+ /* MMU related extensions */
+ /* external control instructions */
+ PPC_EXTERN = 0x0000010000000000ULL,
+ /* segment register access instructions */
+ PPC_SEGMENT = 0x0000020000000000ULL,
+ /* PowerPC 6xx TLB management instructions */
+ PPC_6xx_TLB = 0x0000040000000000ULL,
+ /* PowerPC 74xx TLB management instructions */
+ PPC_74xx_TLB = 0x0000080000000000ULL,
+ /* PowerPC 40x TLB management instructions */
+ PPC_40x_TLB = 0x0000100000000000ULL,
+ /* segment register access instructions for PowerPC 64 "bridge" */
+ PPC_SEGMENT_64B = 0x0000200000000000ULL,
+ /* SLB management */
+ PPC_SLBI = 0x0000400000000000ULL,
+
+ /* Embedded PowerPC dedicated instructions */
+ PPC_WRTEE = 0x0001000000000000ULL,
+ /* PowerPC 40x exception model */
+ PPC_40x_EXCP = 0x0002000000000000ULL,
+ /* PowerPC 405 Mac instructions */
+ PPC_405_MAC = 0x0004000000000000ULL,
+ /* PowerPC 440 specific instructions */
+ PPC_440_SPEC = 0x0008000000000000ULL,
+ /* BookE (embedded) PowerPC specification */
+ PPC_BOOKE = 0x0010000000000000ULL,
+ /* mfapidi instruction */
+ PPC_MFAPIDI = 0x0020000000000000ULL,
+ /* tlbiva instruction */
+ PPC_TLBIVA = 0x0040000000000000ULL,
+ /* tlbivax instruction */
+ PPC_TLBIVAX = 0x0080000000000000ULL,
+ /* PowerPC 4xx dedicated instructions */
+ PPC_4xx_COMMON = 0x0100000000000000ULL,
+ /* PowerPC 40x ibct instructions */
+ PPC_40x_ICBT = 0x0200000000000000ULL,
+ /* rfmci is not implemented in all BookE PowerPC */
+ PPC_RFMCI = 0x0400000000000000ULL,
+ /* rfdi instruction */
+ PPC_RFDI = 0x0800000000000000ULL,
+ /* DCR accesses */
+ PPC_DCR = 0x1000000000000000ULL,
+ /* DCR extended accesse */
+ PPC_DCRX = 0x2000000000000000ULL,
+ /* user-mode DCR access, implemented in PowerPC 460 */
+ PPC_DCRUX = 0x4000000000000000ULL,
+ /* popcntw and popcntd instructions */
+ PPC_POPCNTWD = 0x8000000000000000ULL,
+
+#define PPC_TCG_INSNS (PPC_INSNS_BASE | PPC_POWER | PPC_POWER2 \
+ | PPC_POWER_RTC | PPC_POWER_BR | PPC_64B \
+ | PPC_64BX | PPC_64H | PPC_WAIT | PPC_MFTB \
+ | PPC_602_SPEC | PPC_ISEL | PPC_POPCNTB \
+ | PPC_STRING | PPC_FLOAT | PPC_FLOAT_EXT \
+ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRES \
+ | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES \
+ | PPC_FLOAT_FSEL | PPC_FLOAT_STFIWX \
+ | PPC_ALTIVEC | PPC_SPE | PPC_SPE_SINGLE \
+ | PPC_SPE_DOUBLE | PPC_MEM_TLBIA \
+ | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC \
+ | PPC_MEM_SYNC | PPC_MEM_EIEIO \
+ | PPC_CACHE | PPC_CACHE_ICBI \
+ | PPC_CACHE_DCBZ \
+ | PPC_CACHE_DCBA | PPC_CACHE_LOCK \
+ | PPC_EXTERN | PPC_SEGMENT | PPC_6xx_TLB \
+ | PPC_74xx_TLB | PPC_40x_TLB | PPC_SEGMENT_64B \
+ | PPC_SLBI | PPC_WRTEE | PPC_40x_EXCP \
+ | PPC_405_MAC | PPC_440_SPEC | PPC_BOOKE \
+ | PPC_MFAPIDI | PPC_TLBIVA | PPC_TLBIVAX \
+ | PPC_4xx_COMMON | PPC_40x_ICBT | PPC_RFMCI \
+ | PPC_RFDI | PPC_DCR | PPC_DCRX | PPC_DCRUX \
+ | PPC_POPCNTWD | PPC_CILDST)
+
+ /* extended type values */
+
+ /* BookE 2.06 PowerPC specification */
+ PPC2_BOOKE206 = 0x0000000000000001ULL,
+ /* VSX (extensions to Altivec / VMX) */
+ PPC2_VSX = 0x0000000000000002ULL,
+ /* Decimal Floating Point (DFP) */
+ PPC2_DFP = 0x0000000000000004ULL,
+ /* Embedded.Processor Control */
+ PPC2_PRCNTL = 0x0000000000000008ULL,
+ /* Byte-reversed, indexed, double-word load and store */
+ PPC2_DBRX = 0x0000000000000010ULL,
+ /* Book I 2.05 PowerPC specification */
+ PPC2_ISA205 = 0x0000000000000020ULL,
+ /* VSX additions in ISA 2.07 */
+ PPC2_VSX207 = 0x0000000000000040ULL,
+ /* ISA 2.06B bpermd */
+ PPC2_PERM_ISA206 = 0x0000000000000080ULL,
+ /* ISA 2.06B divide extended variants */
+ PPC2_DIVE_ISA206 = 0x0000000000000100ULL,
+ /* ISA 2.06B larx/stcx. instructions */
+ PPC2_ATOMIC_ISA206 = 0x0000000000000200ULL,
+ /* ISA 2.06B floating point integer conversion */
+ PPC2_FP_CVT_ISA206 = 0x0000000000000400ULL,
+ /* ISA 2.06B floating point test instructions */
+ PPC2_FP_TST_ISA206 = 0x0000000000000800ULL,
+ /* ISA 2.07 bctar instruction */
+ PPC2_BCTAR_ISA207 = 0x0000000000001000ULL,
+ /* ISA 2.07 load/store quadword */
+ PPC2_LSQ_ISA207 = 0x0000000000002000ULL,
+ /* ISA 2.07 Altivec */
+ PPC2_ALTIVEC_207 = 0x0000000000004000ULL,
+ /* PowerISA 2.07 Book3s specification */
+ PPC2_ISA207S = 0x0000000000008000ULL,
+ /* Double precision floating point conversion for signed integer 64 */
+ PPC2_FP_CVT_S64 = 0x0000000000010000ULL,
+ /* Transactional Memory (ISA 2.07, Book II) */
+ PPC2_TM = 0x0000000000020000ULL,
+ /* Server PM instructgions (ISA 2.06, Book III) */
+ PPC2_PM_ISA206 = 0x0000000000040000ULL,
+ /* POWER ISA 3.0 */
+ PPC2_ISA300 = 0x0000000000080000ULL,
+
+#define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \
+ PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \
+ PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | \
+ PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206 | \
+ PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | \
+ PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \
+ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \
+ PPC2_ISA300)
+};
+
+/*****************************************************************************/
+/* Memory access type :
+ * may be needed for precise access rights control and precise exceptions.
+ */
+enum {
+ /* 1 bit to define user level / supervisor access */
+ ACCESS_USER = 0x00,
+ ACCESS_SUPER = 0x01,
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_INT = 0x20, /* Integer load/store access */
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
+ ACCESS_RES = 0x40, /* load/store with reservation */
+ ACCESS_EXT = 0x50, /* external access */
+ ACCESS_CACHE = 0x60, /* Cache manipulation */
+};
+
+/* Hardware interruption sources:
+ * all those exception can be raised simulteaneously
+ */
+/* Input pins definitions */
+enum {
+ /* 6xx bus input pins */
+ PPC6xx_INPUT_HRESET = 0,
+ PPC6xx_INPUT_SRESET = 1,
+ PPC6xx_INPUT_CKSTP_IN = 2,
+ PPC6xx_INPUT_MCP = 3,
+ PPC6xx_INPUT_SMI = 4,
+ PPC6xx_INPUT_INT = 5,
+ PPC6xx_INPUT_TBEN = 6,
+ PPC6xx_INPUT_WAKEUP = 7,
+ PPC6xx_INPUT_NB,
+};
+
+enum {
+ /* Embedded PowerPC input pins */
+ PPCBookE_INPUT_HRESET = 0,
+ PPCBookE_INPUT_SRESET = 1,
+ PPCBookE_INPUT_CKSTP_IN = 2,
+ PPCBookE_INPUT_MCP = 3,
+ PPCBookE_INPUT_SMI = 4,
+ PPCBookE_INPUT_INT = 5,
+ PPCBookE_INPUT_CINT = 6,
+ PPCBookE_INPUT_NB,
+};
+
+enum {
+ /* PowerPC E500 input pins */
+ PPCE500_INPUT_RESET_CORE = 0,
+ PPCE500_INPUT_MCK = 1,
+ PPCE500_INPUT_CINT = 3,
+ PPCE500_INPUT_INT = 4,
+ PPCE500_INPUT_DEBUG = 6,
+ PPCE500_INPUT_NB,
+};
+
+enum {
+ /* PowerPC 40x input pins */
+ PPC40x_INPUT_RESET_CORE = 0,
+ PPC40x_INPUT_RESET_CHIP = 1,
+ PPC40x_INPUT_RESET_SYS = 2,
+ PPC40x_INPUT_CINT = 3,
+ PPC40x_INPUT_INT = 4,
+ PPC40x_INPUT_HALT = 5,
+ PPC40x_INPUT_DEBUG = 6,
+ PPC40x_INPUT_NB,
+};
+
+enum {
+ /* RCPU input pins */
+ PPCRCPU_INPUT_PORESET = 0,
+ PPCRCPU_INPUT_HRESET = 1,
+ PPCRCPU_INPUT_SRESET = 2,
+ PPCRCPU_INPUT_IRQ0 = 3,
+ PPCRCPU_INPUT_IRQ1 = 4,
+ PPCRCPU_INPUT_IRQ2 = 5,
+ PPCRCPU_INPUT_IRQ3 = 6,
+ PPCRCPU_INPUT_IRQ4 = 7,
+ PPCRCPU_INPUT_IRQ5 = 8,
+ PPCRCPU_INPUT_IRQ6 = 9,
+ PPCRCPU_INPUT_IRQ7 = 10,
+ PPCRCPU_INPUT_NB,
+};
+
+#if defined(TARGET_PPC64)
+enum {
+ /* PowerPC 970 input pins */
+ PPC970_INPUT_HRESET = 0,
+ PPC970_INPUT_SRESET = 1,
+ PPC970_INPUT_CKSTP = 2,
+ PPC970_INPUT_TBEN = 3,
+ PPC970_INPUT_MCP = 4,
+ PPC970_INPUT_INT = 5,
+ PPC970_INPUT_THINT = 6,
+ PPC970_INPUT_NB,
+};
+
+enum {
+ /* POWER7 input pins */
+ POWER7_INPUT_INT = 0,
+ /* POWER7 probably has other inputs, but we don't care about them
+ * for any existing machine. We can wire these up when we need
+ * them */
+ POWER7_INPUT_NB,
+};
+#endif
+
+/* Hardware exceptions definitions */
+enum {
+ /* External hardware exception sources */
+ PPC_INTERRUPT_RESET = 0, /* Reset exception */
+ PPC_INTERRUPT_WAKEUP, /* Wakeup exception */
+ PPC_INTERRUPT_MCK, /* Machine check exception */
+ PPC_INTERRUPT_EXT, /* External interrupt */
+ PPC_INTERRUPT_SMI, /* System management interrupt */
+ PPC_INTERRUPT_CEXT, /* Critical external interrupt */
+ PPC_INTERRUPT_DEBUG, /* External debug exception */
+ PPC_INTERRUPT_THERM, /* Thermal exception */
+ /* Internal hardware exception sources */
+ PPC_INTERRUPT_DECR, /* Decrementer exception */
+ PPC_INTERRUPT_HDECR, /* Hypervisor decrementer exception */
+ PPC_INTERRUPT_PIT, /* Programmable inteval timer interrupt */
+ PPC_INTERRUPT_FIT, /* Fixed interval timer interrupt */
+ PPC_INTERRUPT_WDT, /* Watchdog timer interrupt */
+ PPC_INTERRUPT_CDOORBELL, /* Critical doorbell interrupt */
+ PPC_INTERRUPT_DOORBELL, /* Doorbell interrupt */
+ PPC_INTERRUPT_PERFM, /* Performance monitor interrupt */
+ PPC_INTERRUPT_HMI, /* Hypervisor Maintainance interrupt */
+ PPC_INTERRUPT_HDOORBELL, /* Hypervisor Doorbell interrupt */
+};
+
+/* Processor Compatibility mask (PCR) */
+enum {
+ PCR_COMPAT_2_05 = 1ull << (63-62),
+ PCR_COMPAT_2_06 = 1ull << (63-61),
+ PCR_COMPAT_2_07 = 1ull << (63-60),
+ PCR_VEC_DIS = 1ull << (63-0), /* Vec. disable (bit NA since POWER8) */
+ PCR_VSX_DIS = 1ull << (63-1), /* VSX disable (bit NA since POWER8) */
+ PCR_TM_DIS = 1ull << (63-2), /* Trans. memory disable (POWER8) */
+};
+
+/* HMER/HMEER */
+enum {
+ HMER_MALFUNCTION_ALERT = 1ull << (63 - 0),
+ HMER_PROC_RECV_DONE = 1ull << (63 - 2),
+ HMER_PROC_RECV_ERROR_MASKED = 1ull << (63 - 3),
+ HMER_TFAC_ERROR = 1ull << (63 - 4),
+ HMER_TFMR_PARITY_ERROR = 1ull << (63 - 5),
+ HMER_XSCOM_FAIL = 1ull << (63 - 8),
+ HMER_XSCOM_DONE = 1ull << (63 - 9),
+ HMER_PROC_RECV_AGAIN = 1ull << (63 - 11),
+ HMER_WARN_RISE = 1ull << (63 - 14),
+ HMER_WARN_FALL = 1ull << (63 - 15),
+ HMER_SCOM_FIR_HMI = 1ull << (63 - 16),
+ HMER_TRIG_FIR_HMI = 1ull << (63 - 17),
+ HMER_HYP_RESOURCE_ERR = 1ull << (63 - 20),
+ HMER_XSCOM_STATUS_MASK = 7ull << (63 - 23),
+ HMER_XSCOM_STATUS_LSH = (63 - 23),
+};
+
+/* Alternate Interrupt Location (AIL) */
+enum {
+ AIL_NONE = 0,
+ AIL_RESERVED = 1,
+ AIL_0001_8000 = 2,
+ AIL_C000_0000_0000_4000 = 3,
+};
+
+/*****************************************************************************/
+
+static inline target_ulong cpu_read_xer(CPUPPCState *env)
+{
+ return env->xer | (env->so << XER_SO) | (env->ov << XER_OV) | (env->ca << XER_CA);
+}
+
+static inline void cpu_write_xer(CPUPPCState *env, target_ulong xer)
+{
+ env->so = (xer >> XER_SO) & 1;
+ env->ov = (xer >> XER_OV) & 1;
+ env->ca = (xer >> XER_CA) & 1;
+ env->xer = xer & ~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA));
+}
+
+static inline void cpu_get_tb_cpu_state(CPUPPCState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->nip;
+ *cs_base = 0;
+ *flags = env->hflags;
+}
+
+void QEMU_NORETURN raise_exception(CPUPPCState *env, uint32_t exception);
+void QEMU_NORETURN raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr);
+void QEMU_NORETURN raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code);
+void QEMU_NORETURN raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr);
+
+#if !defined(CONFIG_USER_ONLY)
+static inline int booke206_tlbm_id(CPUPPCState *env, ppcmas_tlb_t *tlbm)
+{
+ uintptr_t tlbml = (uintptr_t)tlbm;
+ uintptr_t tlbl = (uintptr_t)env->tlb.tlbm;
+
+ return (tlbml - tlbl) / sizeof(env->tlb.tlbm[0]);
+}
+
+static inline int booke206_tlb_size(CPUPPCState *env, int tlbn)
+{
+ uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+ int r = tlbncfg & TLBnCFG_N_ENTRY;
+ return r;
+}
+
+static inline int booke206_tlb_ways(CPUPPCState *env, int tlbn)
+{
+ uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+ int r = tlbncfg >> TLBnCFG_ASSOC_SHIFT;
+ return r;
+}
+
+static inline int booke206_tlbm_to_tlbn(CPUPPCState *env, ppcmas_tlb_t *tlbm)
+{
+ int id = booke206_tlbm_id(env, tlbm);
+ int end = 0;
+ int i;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ end += booke206_tlb_size(env, i);
+ if (id < end) {
+ return i;
+ }
+ }
+
+ cpu_abort(CPU(ppc_env_get_cpu(env)), "Unknown TLBe: %d\n", id);
+ return 0;
+}
+
+static inline int booke206_tlbm_to_way(CPUPPCState *env, ppcmas_tlb_t *tlb)
+{
+ int tlbn = booke206_tlbm_to_tlbn(env, tlb);
+ int tlbid = booke206_tlbm_id(env, tlb);
+ return tlbid & (booke206_tlb_ways(env, tlbn) - 1);
+}
+
+static inline ppcmas_tlb_t *booke206_get_tlbm(CPUPPCState *env, const int tlbn,
+ target_ulong ea, int way)
+{
+ int r;
+ uint32_t ways = booke206_tlb_ways(env, tlbn);
+ int ways_bits = ctz32(ways);
+ int tlb_bits = ctz32(booke206_tlb_size(env, tlbn));
+ int i;
+
+ way &= ways - 1;
+ ea >>= MAS2_EPN_SHIFT;
+ ea &= (1 << (tlb_bits - ways_bits)) - 1;
+ r = (ea << ways_bits) | way;
+
+ if (r >= booke206_tlb_size(env, tlbn)) {
+ return NULL;
+ }
+
+ /* bump up to tlbn index */
+ for (i = 0; i < tlbn; i++) {
+ r += booke206_tlb_size(env, i);
+ }
+
+ return &env->tlb.tlbm[r];
+}
+
+/* returns bitmap of supported page sizes for a given TLB */
+static inline uint32_t booke206_tlbnps(CPUPPCState *env, const int tlbn)
+{
+ bool mav2 = false;
+ uint32_t ret = 0;
+
+ if (mav2) {
+ ret = env->spr[SPR_BOOKE_TLB0PS + tlbn];
+ } else {
+ uint32_t tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+ uint32_t min = (tlbncfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
+ uint32_t max = (tlbncfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
+ int i;
+ for (i = min; i <= max; i++) {
+ ret |= (1 << (i << 1));
+ }
+ }
+
+ return ret;
+}
+
+#endif
+
+static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr)
+{
+ if (env->mmu_model == POWERPC_MMU_BOOKE206) {
+ return msr & (1ULL << MSR_CM);
+ }
+
+ return msr & (1ULL << MSR_SF);
+}
+
+/**
+ * Check whether register rx is in the range between start and
+ * start + nregs (as needed by the LSWX and LSWI instructions)
+ */
+static inline bool lsw_reg_in_range(int start, int nregs, int rx)
+{
+ return (start + nregs <= 32 && rx >= start && rx < start + nregs) ||
+ (start + nregs > 32 && (rx >= start || rx < start + nregs - 32));
+}
+
+extern void (*cpu_ppc_hypercall)(PowerPCCPU *);
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env);
+
+/**
+ * ppc_get_vcpu_dt_id:
+ * @cs: a PowerPCCPU struct.
+ *
+ * Returns a device-tree ID for a CPU.
+ */
+int ppc_get_vcpu_dt_id(PowerPCCPU *cpu);
+
+/**
+ * ppc_get_vcpu_by_dt_id:
+ * @cpu_dt_id: a device tree id
+ *
+ * Searches for a CPU by @cpu_dt_id.
+ *
+ * Returns: a PowerPCCPU struct
+ */
+PowerPCCPU *ppc_get_vcpu_by_dt_id(int cpu_dt_id);
+
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len);
+#endif /* PPC_CPU_H */
diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c
new file mode 100644
index 0000000000..9164fe701b
--- /dev/null
+++ b/target/ppc/dfp_helper.c
@@ -0,0 +1,1331 @@
+/*
+ * PowerPC Decimal Floating Point (DPF) emulation helpers for QEMU.
+ *
+ * Copyright (c) 2014 IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+#define DECNUMDIGITS 34
+#include "libdecnumber/decContext.h"
+#include "libdecnumber/decNumber.h"
+#include "libdecnumber/dpd/decimal32.h"
+#include "libdecnumber/dpd/decimal64.h"
+#include "libdecnumber/dpd/decimal128.h"
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define HI_IDX 0
+#define LO_IDX 1
+#else
+#define HI_IDX 1
+#define LO_IDX 0
+#endif
+
+struct PPC_DFP {
+ CPUPPCState *env;
+ uint64_t t64[2], a64[2], b64[2];
+ decNumber t, a, b;
+ decContext context;
+ uint8_t crbf;
+};
+
+static void dfp_prepare_rounding_mode(decContext *context, uint64_t fpscr)
+{
+ enum rounding rnd;
+
+ switch ((fpscr >> 32) & 0x7) {
+ case 0:
+ rnd = DEC_ROUND_HALF_EVEN;
+ break;
+ case 1:
+ rnd = DEC_ROUND_DOWN;
+ break;
+ case 2:
+ rnd = DEC_ROUND_CEILING;
+ break;
+ case 3:
+ rnd = DEC_ROUND_FLOOR;
+ break;
+ case 4:
+ rnd = DEC_ROUND_HALF_UP;
+ break;
+ case 5:
+ rnd = DEC_ROUND_HALF_DOWN;
+ break;
+ case 6:
+ rnd = DEC_ROUND_UP;
+ break;
+ case 7:
+ rnd = DEC_ROUND_05UP;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ decContextSetRounding(context, rnd);
+}
+
+static void dfp_set_round_mode_from_immediate(uint8_t r, uint8_t rmc,
+ struct PPC_DFP *dfp)
+{
+ enum rounding rnd;
+ if (r == 0) {
+ switch (rmc & 3) {
+ case 0:
+ rnd = DEC_ROUND_HALF_EVEN;
+ break;
+ case 1:
+ rnd = DEC_ROUND_DOWN;
+ break;
+ case 2:
+ rnd = DEC_ROUND_HALF_UP;
+ break;
+ case 3: /* use FPSCR rounding mode */
+ return;
+ default:
+ assert(0); /* cannot get here */
+ }
+ } else { /* r == 1 */
+ switch (rmc & 3) {
+ case 0:
+ rnd = DEC_ROUND_CEILING;
+ break;
+ case 1:
+ rnd = DEC_ROUND_FLOOR;
+ break;
+ case 2:
+ rnd = DEC_ROUND_UP;
+ break;
+ case 3:
+ rnd = DEC_ROUND_HALF_DOWN;
+ break;
+ default:
+ assert(0); /* cannot get here */
+ }
+ }
+ decContextSetRounding(&dfp->context, rnd);
+}
+
+static void dfp_prepare_decimal64(struct PPC_DFP *dfp, uint64_t *a,
+ uint64_t *b, CPUPPCState *env)
+{
+ decContextDefault(&dfp->context, DEC_INIT_DECIMAL64);
+ dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
+ dfp->env = env;
+
+ if (a) {
+ dfp->a64[0] = *a;
+ decimal64ToNumber((decimal64 *)dfp->a64, &dfp->a);
+ } else {
+ dfp->a64[0] = 0;
+ decNumberZero(&dfp->a);
+ }
+
+ if (b) {
+ dfp->b64[0] = *b;
+ decimal64ToNumber((decimal64 *)dfp->b64, &dfp->b);
+ } else {
+ dfp->b64[0] = 0;
+ decNumberZero(&dfp->b);
+ }
+}
+
+static void dfp_prepare_decimal128(struct PPC_DFP *dfp, uint64_t *a,
+ uint64_t *b, CPUPPCState *env)
+{
+ decContextDefault(&dfp->context, DEC_INIT_DECIMAL128);
+ dfp_prepare_rounding_mode(&dfp->context, env->fpscr);
+ dfp->env = env;
+
+ if (a) {
+ dfp->a64[0] = a[HI_IDX];
+ dfp->a64[1] = a[LO_IDX];
+ decimal128ToNumber((decimal128 *)dfp->a64, &dfp->a);
+ } else {
+ dfp->a64[0] = dfp->a64[1] = 0;
+ decNumberZero(&dfp->a);
+ }
+
+ if (b) {
+ dfp->b64[0] = b[HI_IDX];
+ dfp->b64[1] = b[LO_IDX];
+ decimal128ToNumber((decimal128 *)dfp->b64, &dfp->b);
+ } else {
+ dfp->b64[0] = dfp->b64[1] = 0;
+ decNumberZero(&dfp->b);
+ }
+}
+
+static void dfp_set_FPSCR_flag(struct PPC_DFP *dfp, uint64_t flag,
+ uint64_t enabled)
+{
+ dfp->env->fpscr |= (flag | FP_FX);
+ if (dfp->env->fpscr & enabled) {
+ dfp->env->fpscr |= FP_FEX;
+ }
+}
+
+static void dfp_set_FPRF_from_FRT_with_context(struct PPC_DFP *dfp,
+ decContext *context)
+{
+ uint64_t fprf = 0;
+
+ /* construct FPRF */
+ switch (decNumberClass(&dfp->t, context)) {
+ case DEC_CLASS_SNAN:
+ fprf = 0x01;
+ break;
+ case DEC_CLASS_QNAN:
+ fprf = 0x11;
+ break;
+ case DEC_CLASS_NEG_INF:
+ fprf = 0x09;
+ break;
+ case DEC_CLASS_NEG_NORMAL:
+ fprf = 0x08;
+ break;
+ case DEC_CLASS_NEG_SUBNORMAL:
+ fprf = 0x18;
+ break;
+ case DEC_CLASS_NEG_ZERO:
+ fprf = 0x12;
+ break;
+ case DEC_CLASS_POS_ZERO:
+ fprf = 0x02;
+ break;
+ case DEC_CLASS_POS_SUBNORMAL:
+ fprf = 0x14;
+ break;
+ case DEC_CLASS_POS_NORMAL:
+ fprf = 0x04;
+ break;
+ case DEC_CLASS_POS_INF:
+ fprf = 0x05;
+ break;
+ default:
+ assert(0); /* should never get here */
+ }
+ dfp->env->fpscr &= ~(0x1F << 12);
+ dfp->env->fpscr |= (fprf << 12);
+}
+
+static void dfp_set_FPRF_from_FRT(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT_with_context(dfp, &dfp->context);
+}
+
+static void dfp_set_FPRF_from_FRT_short(struct PPC_DFP *dfp)
+{
+ decContext shortContext;
+ decContextDefault(&shortContext, DEC_INIT_DECIMAL32);
+ dfp_set_FPRF_from_FRT_with_context(dfp, &shortContext);
+}
+
+static void dfp_set_FPRF_from_FRT_long(struct PPC_DFP *dfp)
+{
+ decContext longContext;
+ decContextDefault(&longContext, DEC_INIT_DECIMAL64);
+ dfp_set_FPRF_from_FRT_with_context(dfp, &longContext);
+}
+
+static void dfp_check_for_OX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Overflow) {
+ dfp_set_FPSCR_flag(dfp, FP_OX, FP_OE);
+ }
+}
+
+static void dfp_check_for_UX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Underflow) {
+ dfp_set_FPSCR_flag(dfp, FP_UX, FP_UE);
+ }
+}
+
+static void dfp_check_for_XX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Inexact) {
+ dfp_set_FPSCR_flag(dfp, FP_XX | FP_FI, FP_XE);
+ }
+}
+
+static void dfp_check_for_ZX(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Division_by_zero) {
+ dfp_set_FPSCR_flag(dfp, FP_ZX, FP_ZE);
+ }
+}
+
+static void dfp_check_for_VXSNAN(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if (decNumberIsSNaN(&dfp->a) || decNumberIsSNaN(&dfp->b)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
+ }
+ }
+}
+
+static void dfp_check_for_VXSNAN_and_convert_to_QNaN(struct PPC_DFP *dfp)
+{
+ if (decNumberIsSNaN(&dfp->t)) {
+ dfp->t.bits &= ~DECSNAN;
+ dfp->t.bits |= DECNAN;
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FP_VE);
+ }
+}
+
+static void dfp_check_for_VXISI(struct PPC_DFP *dfp, int testForSameSign)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
+ int same = decNumberClass(&dfp->a, &dfp->context) ==
+ decNumberClass(&dfp->b, &dfp->context);
+ if ((same && testForSameSign) || (!same && !testForSameSign)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXISI, FP_VE);
+ }
+ }
+ }
+}
+
+static void dfp_check_for_VXISI_add(struct PPC_DFP *dfp)
+{
+ dfp_check_for_VXISI(dfp, 0);
+}
+
+static void dfp_check_for_VXISI_subtract(struct PPC_DFP *dfp)
+{
+ dfp_check_for_VXISI(dfp, 1);
+}
+
+static void dfp_check_for_VXIMZ(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if ((decNumberIsInfinite(&dfp->a) && decNumberIsZero(&dfp->b)) ||
+ (decNumberIsInfinite(&dfp->b) && decNumberIsZero(&dfp->a))) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIMZ, FP_VE);
+ }
+ }
+}
+
+static void dfp_check_for_VXZDZ(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Division_undefined) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXZDZ, FP_VE);
+ }
+}
+
+static void dfp_check_for_VXIDI(struct PPC_DFP *dfp)
+{
+ if (dfp->context.status & DEC_Invalid_operation) {
+ if (decNumberIsInfinite(&dfp->a) && decNumberIsInfinite(&dfp->b)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXIDI, FP_VE);
+ }
+ }
+}
+
+static void dfp_check_for_VXVC(struct PPC_DFP *dfp)
+{
+ if (decNumberIsNaN(&dfp->a) || decNumberIsNaN(&dfp->b)) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXVC, FP_VE);
+ }
+}
+
+static void dfp_check_for_VXCVI(struct PPC_DFP *dfp)
+{
+ if ((dfp->context.status & DEC_Invalid_operation) &&
+ (!decNumberIsSNaN(&dfp->a)) &&
+ (!decNumberIsSNaN(&dfp->b))) {
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
+ }
+}
+
+static void dfp_set_CRBF_from_T(struct PPC_DFP *dfp)
+{
+ if (decNumberIsNaN(&dfp->t)) {
+ dfp->crbf = 1;
+ } else if (decNumberIsZero(&dfp->t)) {
+ dfp->crbf = 2;
+ } else if (decNumberIsNegative(&dfp->t)) {
+ dfp->crbf = 8;
+ } else {
+ dfp->crbf = 4;
+ }
+}
+
+static void dfp_set_FPCC_from_CRBF(struct PPC_DFP *dfp)
+{
+ dfp->env->fpscr &= ~(0xF << 12);
+ dfp->env->fpscr |= (dfp->crbf << 12);
+}
+
+static inline void dfp_makeQNaN(decNumber *dn)
+{
+ dn->bits &= ~DECSPECIAL;
+ dn->bits |= DECNAN;
+}
+
+static inline int dfp_get_digit(decNumber *dn, int n)
+{
+ assert(DECDPUN == 3);
+ int unit = n / DECDPUN;
+ int dig = n % DECDPUN;
+ switch (dig) {
+ case 0:
+ return dn->lsu[unit] % 10;
+ case 1:
+ return (dn->lsu[unit] / 10) % 10;
+ case 2:
+ return dn->lsu[unit] / 100;
+ }
+ g_assert_not_reached();
+}
+
+#define DFP_HELPER_TAB(op, dnop, postprocs, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
+ postprocs(&dfp); \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+static void ADD_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXISI_add(dfp);
+}
+
+DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64)
+DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128)
+
+static void SUB_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXISI_subtract(dfp);
+}
+
+DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64)
+DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128)
+
+static void MUL_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXIMZ(dfp);
+}
+
+DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64)
+DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128)
+
+static void DIV_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_OX(dfp);
+ dfp_check_for_UX(dfp);
+ dfp_check_for_ZX(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXZDZ(dfp);
+ dfp_check_for_VXIDI(dfp);
+}
+
+DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64)
+DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128)
+
+#define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \
+uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ dnop(&dfp.t, &dfp.a, &dfp.b, &dfp.context); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
+ postprocs(&dfp); \
+ return dfp.crbf; \
+}
+
+static void CMPU_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_CRBF_from_T(dfp);
+ dfp_set_FPCC_from_CRBF(dfp);
+ dfp_check_for_VXSNAN(dfp);
+}
+
+DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64)
+DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128)
+
+static void CMPO_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_CRBF_from_T(dfp);
+ dfp_set_FPCC_from_CRBF(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXVC(dfp);
+}
+
+DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64)
+DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128)
+
+#define DFP_HELPER_TSTDC(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint32_t dcm) \
+{ \
+ struct PPC_DFP dfp; \
+ int match = 0; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, 0, env); \
+ \
+ match |= (dcm & 0x20) && decNumberIsZero(&dfp.a); \
+ match |= (dcm & 0x10) && decNumberIsSubnormal(&dfp.a, &dfp.context); \
+ match |= (dcm & 0x08) && decNumberIsNormal(&dfp.a, &dfp.context); \
+ match |= (dcm & 0x04) && decNumberIsInfinite(&dfp.a); \
+ match |= (dcm & 0x02) && decNumberIsQNaN(&dfp.a); \
+ match |= (dcm & 0x01) && decNumberIsSNaN(&dfp.a); \
+ \
+ if (decNumberIsNegative(&dfp.a)) { \
+ dfp.crbf = match ? 0xA : 0x8; \
+ } else { \
+ dfp.crbf = match ? 0x2 : 0x0; \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTDC(dtstdc, 64)
+DFP_HELPER_TSTDC(dtstdcq, 128)
+
+#define DFP_HELPER_TSTDG(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint32_t dcm) \
+{ \
+ struct PPC_DFP dfp; \
+ int minexp, maxexp, nzero_digits, nzero_idx, is_negative, is_zero, \
+ is_extreme_exp, is_subnormal, is_normal, leftmost_is_nonzero, \
+ match; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, 0, env); \
+ \
+ if ((size) == 64) { \
+ minexp = -398; \
+ maxexp = 369; \
+ nzero_digits = 16; \
+ nzero_idx = 5; \
+ } else if ((size) == 128) { \
+ minexp = -6176; \
+ maxexp = 6111; \
+ nzero_digits = 34; \
+ nzero_idx = 11; \
+ } \
+ \
+ is_negative = decNumberIsNegative(&dfp.a); \
+ is_zero = decNumberIsZero(&dfp.a); \
+ is_extreme_exp = (dfp.a.exponent == maxexp) || \
+ (dfp.a.exponent == minexp); \
+ is_subnormal = decNumberIsSubnormal(&dfp.a, &dfp.context); \
+ is_normal = decNumberIsNormal(&dfp.a, &dfp.context); \
+ leftmost_is_nonzero = (dfp.a.digits == nzero_digits) && \
+ (dfp.a.lsu[nzero_idx] != 0); \
+ match = 0; \
+ \
+ match |= (dcm & 0x20) && is_zero && !is_extreme_exp; \
+ match |= (dcm & 0x10) && is_zero && is_extreme_exp; \
+ match |= (dcm & 0x08) && \
+ (is_subnormal || (is_normal && is_extreme_exp)); \
+ match |= (dcm & 0x04) && is_normal && !is_extreme_exp && \
+ !leftmost_is_nonzero; \
+ match |= (dcm & 0x02) && is_normal && !is_extreme_exp && \
+ leftmost_is_nonzero; \
+ match |= (dcm & 0x01) && decNumberIsSpecial(&dfp.a); \
+ \
+ if (is_negative) { \
+ dfp.crbf = match ? 0xA : 0x8; \
+ } else { \
+ dfp.crbf = match ? 0x2 : 0x0; \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTDG(dtstdg, 64)
+DFP_HELPER_TSTDG(dtstdgq, 128)
+
+#define DFP_HELPER_TSTEX(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ int expa, expb, a_is_special, b_is_special; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ \
+ expa = dfp.a.exponent; \
+ expb = dfp.b.exponent; \
+ a_is_special = decNumberIsSpecial(&dfp.a); \
+ b_is_special = decNumberIsSpecial(&dfp.b); \
+ \
+ if (a_is_special || b_is_special) { \
+ int atype = a_is_special ? (decNumberIsNaN(&dfp.a) ? 4 : 2) : 1; \
+ int btype = b_is_special ? (decNumberIsNaN(&dfp.b) ? 4 : 2) : 1; \
+ dfp.crbf = (atype ^ btype) ? 0x1 : 0x2; \
+ } else if (expa < expb) { \
+ dfp.crbf = 0x8; \
+ } else if (expa > expb) { \
+ dfp.crbf = 0x4; \
+ } else { \
+ dfp.crbf = 0x2; \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTEX(dtstex, 64)
+DFP_HELPER_TSTEX(dtstexq, 128)
+
+#define DFP_HELPER_TSTSF(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint64_t *a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned k; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ k = *a & 0x3F; \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ dfp.crbf = 1; \
+ } else if (k == 0) { \
+ dfp.crbf = 4; \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ /* Zero has no sig digits */ \
+ dfp.crbf = 4; \
+ } else { \
+ unsigned nsd = dfp.b.digits; \
+ if (k < nsd) { \
+ dfp.crbf = 8; \
+ } else if (k > nsd) { \
+ dfp.crbf = 4; \
+ } else { \
+ dfp.crbf = 2; \
+ } \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTSF(dtstsf, 64)
+DFP_HELPER_TSTSF(dtstsfq, 128)
+
+#define DFP_HELPER_TSTSFI(op, size) \
+uint32_t helper_##op(CPUPPCState *env, uint32_t a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned uim; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ uim = a & 0x3F; \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ dfp.crbf = 1; \
+ } else if (uim == 0) { \
+ dfp.crbf = 4; \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ /* Zero has no sig digits */ \
+ dfp.crbf = 4; \
+ } else { \
+ unsigned nsd = dfp.b.digits; \
+ if (uim < nsd) { \
+ dfp.crbf = 8; \
+ } else if (uim > nsd) { \
+ dfp.crbf = 4; \
+ } else { \
+ dfp.crbf = 2; \
+ } \
+ } \
+ \
+ dfp_set_FPCC_from_CRBF(&dfp); \
+ return dfp.crbf; \
+}
+
+DFP_HELPER_TSTSFI(dtstsfi, 64)
+DFP_HELPER_TSTSFI(dtstsfiq, 128)
+
+static void QUA_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+ dfp_check_for_VXCVI(dfp);
+}
+
+static void dfp_quantize(uint8_t rmc, struct PPC_DFP *dfp)
+{
+ dfp_set_round_mode_from_immediate(0, rmc, dfp);
+ decNumberQuantize(&dfp->t, &dfp->b, &dfp->a, &dfp->context);
+ if (decNumberIsSNaN(&dfp->a)) {
+ dfp->t = dfp->a;
+ dfp_makeQNaN(&dfp->t);
+ } else if (decNumberIsSNaN(&dfp->b)) {
+ dfp->t = dfp->b;
+ dfp_makeQNaN(&dfp->t);
+ } else if (decNumberIsQNaN(&dfp->a)) {
+ dfp->t = dfp->a;
+ } else if (decNumberIsQNaN(&dfp->b)) {
+ dfp->t = dfp->b;
+ }
+}
+
+#define DFP_HELPER_QUAI(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, \
+ uint32_t te, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ decNumberFromUInt32(&dfp.a, 1); \
+ dfp.a.exponent = (int32_t)((int8_t)(te << 3) >> 3); \
+ \
+ dfp_quantize(rmc, &dfp); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
+ &dfp.context); \
+ QUA_PPs(&dfp); \
+ \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_QUAI(dquai, 64)
+DFP_HELPER_QUAI(dquaiq, 128)
+
+#define DFP_HELPER_QUA(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
+ uint64_t *b, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, b, env); \
+ \
+ dfp_quantize(rmc, &dfp); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
+ &dfp.context); \
+ QUA_PPs(&dfp); \
+ \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_QUA(dqua, 64)
+DFP_HELPER_QUA(dquaq, 128)
+
+static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax,
+ struct PPC_DFP *dfp)
+{
+ int msd_orig, msd_rslt;
+
+ if (unlikely((ref_sig == 0) || (dfp->b.digits <= ref_sig))) {
+ dfp->t = dfp->b;
+ if (decNumberIsSNaN(&dfp->b)) {
+ dfp_makeQNaN(&dfp->t);
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXSNAN, FPSCR_VE);
+ }
+ return;
+ }
+
+ /* Reround is equivalent to quantizing b with 1**E(n) where */
+ /* n = exp(b) + numDigits(b) - reference_significance. */
+
+ decNumberFromUInt32(&dfp->a, 1);
+ dfp->a.exponent = dfp->b.exponent + dfp->b.digits - ref_sig;
+
+ if (unlikely(dfp->a.exponent > xmax)) {
+ dfp->t.digits = 0;
+ dfp->t.bits &= ~DECNEG;
+ dfp_makeQNaN(&dfp->t);
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE);
+ return;
+ }
+
+ dfp_quantize(rmc, dfp);
+
+ msd_orig = dfp_get_digit(&dfp->b, dfp->b.digits-1);
+ msd_rslt = dfp_get_digit(&dfp->t, dfp->t.digits-1);
+
+ /* If the quantization resulted in rounding up to the next magnitude, */
+ /* then we need to shift the significand and adjust the exponent. */
+
+ if (unlikely((msd_orig == 9) && (msd_rslt == 1))) {
+
+ decNumber negone;
+
+ decNumberFromInt32(&negone, -1);
+ decNumberShift(&dfp->t, &dfp->t, &negone, &dfp->context);
+ dfp->t.exponent++;
+
+ if (unlikely(dfp->t.exponent > xmax)) {
+ dfp_makeQNaN(&dfp->t);
+ dfp->t.digits = 0;
+ dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FP_VE);
+ /* Inhibit XX in this case */
+ decContextClearStatus(&dfp->context, DEC_Inexact);
+ }
+ }
+}
+
+#define DFP_HELPER_RRND(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
+ uint64_t *b, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ int32_t ref_sig = *a & 0x3F; \
+ int32_t xmax = ((size) == 64) ? 369 : 6111; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ _dfp_reround(rmc, ref_sig, xmax, &dfp); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
+ &dfp.context); \
+ QUA_PPs(&dfp); \
+ \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_RRND(drrnd, 64)
+DFP_HELPER_RRND(drrndq, 128)
+
+#define DFP_HELPER_RINT(op, postprocs, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, \
+ uint32_t r, uint32_t rmc) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ dfp_set_round_mode_from_immediate(r, rmc, &dfp); \
+ decNumberToIntegralExact(&dfp.t, &dfp.b, &dfp.context); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
+ postprocs(&dfp); \
+ \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+static void RINTX_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_XX(dfp);
+ dfp_check_for_VXSNAN(dfp);
+}
+
+DFP_HELPER_RINT(drintx, RINTX_PPs, 64)
+DFP_HELPER_RINT(drintxq, RINTX_PPs, 128)
+
+static void RINTN_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_VXSNAN(dfp);
+}
+
+DFP_HELPER_RINT(drintn, RINTN_PPs, 64)
+DFP_HELPER_RINT(drintnq, RINTN_PPs, 128)
+
+void helper_dctdp(CPUPPCState *env, uint64_t *t, uint64_t *b)
+{
+ struct PPC_DFP dfp;
+ uint32_t b_short = *b;
+ dfp_prepare_decimal64(&dfp, 0, 0, env);
+ decimal32ToNumber((decimal32 *)&b_short, &dfp.t);
+ decimal64FromNumber((decimal64 *)t, &dfp.t, &dfp.context);
+ dfp_set_FPRF_from_FRT(&dfp);
+}
+
+void helper_dctqpq(CPUPPCState *env, uint64_t *t, uint64_t *b)
+{
+ struct PPC_DFP dfp;
+ dfp_prepare_decimal128(&dfp, 0, 0, env);
+ decimal64ToNumber((decimal64 *)b, &dfp.t);
+
+ dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
+ dfp_set_FPRF_from_FRT(&dfp);
+
+ decimal128FromNumber((decimal128 *)&dfp.t64, &dfp.t, &dfp.context);
+ t[0] = dfp.t64[HI_IDX];
+ t[1] = dfp.t64[LO_IDX];
+}
+
+void helper_drsp(CPUPPCState *env, uint64_t *t, uint64_t *b)
+{
+ struct PPC_DFP dfp;
+ uint32_t t_short = 0;
+ dfp_prepare_decimal64(&dfp, 0, b, env);
+ decimal32FromNumber((decimal32 *)&t_short, &dfp.b, &dfp.context);
+ decimal32ToNumber((decimal32 *)&t_short, &dfp.t);
+
+ dfp_set_FPRF_from_FRT_short(&dfp);
+ dfp_check_for_OX(&dfp);
+ dfp_check_for_UX(&dfp);
+ dfp_check_for_XX(&dfp);
+
+ *t = t_short;
+}
+
+void helper_drdpq(CPUPPCState *env, uint64_t *t, uint64_t *b)
+{
+ struct PPC_DFP dfp;
+ dfp_prepare_decimal128(&dfp, 0, b, env);
+ decimal64FromNumber((decimal64 *)&dfp.t64, &dfp.b, &dfp.context);
+ decimal64ToNumber((decimal64 *)&dfp.t64, &dfp.t);
+
+ dfp_check_for_VXSNAN_and_convert_to_QNaN(&dfp);
+ dfp_set_FPRF_from_FRT_long(&dfp);
+ dfp_check_for_OX(&dfp);
+ dfp_check_for_UX(&dfp);
+ dfp_check_for_XX(&dfp);
+
+ decimal64FromNumber((decimal64 *)dfp.t64, &dfp.t, &dfp.context);
+ t[0] = dfp.t64[0];
+ t[1] = 0;
+}
+
+#define DFP_HELPER_CFFIX(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ decNumberFromInt64(&dfp.t, (int64_t)(*b)); \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, &dfp.context); \
+ CFFIX_PPs(&dfp); \
+ \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+static void CFFIX_PPs(struct PPC_DFP *dfp)
+{
+ dfp_set_FPRF_from_FRT(dfp);
+ dfp_check_for_XX(dfp);
+}
+
+DFP_HELPER_CFFIX(dcffix, 64)
+DFP_HELPER_CFFIX(dcffixq, 128)
+
+#define DFP_HELPER_CTFIX(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ uint64_t invalid_flags = FP_VX | FP_VXCVI; \
+ if (decNumberIsInfinite(&dfp.b)) { \
+ dfp.t64[0] = decNumberIsNegative(&dfp.b) ? INT64_MIN : INT64_MAX; \
+ } else { /* NaN */ \
+ dfp.t64[0] = INT64_MIN; \
+ if (decNumberIsSNaN(&dfp.b)) { \
+ invalid_flags |= FP_VXSNAN; \
+ } \
+ } \
+ dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); \
+ } else if (unlikely(decNumberIsZero(&dfp.b))) { \
+ dfp.t64[0] = 0; \
+ } else { \
+ decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); \
+ dfp.t64[0] = decNumberIntegralToInt64(&dfp.b, &dfp.context); \
+ if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { \
+ dfp.t64[0] = decNumberIsNegative(&dfp.b) ? INT64_MIN : INT64_MAX; \
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); \
+ } else { \
+ dfp_check_for_XX(&dfp); \
+ } \
+ } \
+ \
+ *t = dfp.t64[0]; \
+}
+
+DFP_HELPER_CTFIX(dctfix, 64)
+DFP_HELPER_CTFIX(dctfixq, 128)
+
+static inline void dfp_set_bcd_digit_64(uint64_t *t, uint8_t digit,
+ unsigned n)
+{
+ *t |= ((uint64_t)(digit & 0xF) << (n << 2));
+}
+
+static inline void dfp_set_bcd_digit_128(uint64_t *t, uint8_t digit,
+ unsigned n)
+{
+ t[(n & 0x10) ? HI_IDX : LO_IDX] |=
+ ((uint64_t)(digit & 0xF) << ((n & 15) << 2));
+}
+
+static inline void dfp_set_sign_64(uint64_t *t, uint8_t sgn)
+{
+ *t <<= 4;
+ *t |= (sgn & 0xF);
+}
+
+static inline void dfp_set_sign_128(uint64_t *t, uint8_t sgn)
+{
+ t[HI_IDX] <<= 4;
+ t[HI_IDX] |= (t[LO_IDX] >> 60);
+ t[LO_IDX] <<= 4;
+ t[LO_IDX] |= (sgn & 0xF);
+}
+
+#define DFP_HELPER_DEDPD(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, uint32_t sp) \
+{ \
+ struct PPC_DFP dfp; \
+ uint8_t digits[34]; \
+ int i, N; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ decNumberGetBCD(&dfp.b, digits); \
+ dfp.t64[0] = dfp.t64[1] = 0; \
+ N = dfp.b.digits; \
+ \
+ for (i = 0; (i < N) && (i < (size)/4); i++) { \
+ dfp_set_bcd_digit_##size(dfp.t64, digits[N-i-1], i); \
+ } \
+ \
+ if (sp & 2) { \
+ uint8_t sgn; \
+ \
+ if (decNumberIsNegative(&dfp.b)) { \
+ sgn = 0xD; \
+ } else { \
+ sgn = ((sp & 1) ? 0xF : 0xC); \
+ } \
+ dfp_set_sign_##size(dfp.t64, sgn); \
+ } \
+ \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_DEDPD(ddedpd, 64)
+DFP_HELPER_DEDPD(ddedpdq, 128)
+
+static inline uint8_t dfp_get_bcd_digit_64(uint64_t *t, unsigned n)
+{
+ return *t >> ((n << 2) & 63) & 15;
+}
+
+static inline uint8_t dfp_get_bcd_digit_128(uint64_t *t, unsigned n)
+{
+ return t[(n & 0x10) ? HI_IDX : LO_IDX] >> ((n << 2) & 63) & 15;
+}
+
+#define DFP_HELPER_ENBCD(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b, uint32_t s) \
+{ \
+ struct PPC_DFP dfp; \
+ uint8_t digits[32]; \
+ int n = 0, offset = 0, sgn = 0, nonzero = 0; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ decNumberZero(&dfp.t); \
+ \
+ if (s) { \
+ uint8_t sgnNibble = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
+ switch (sgnNibble) { \
+ case 0xD: \
+ case 0xB: \
+ sgn = 1; \
+ break; \
+ case 0xC: \
+ case 0xF: \
+ case 0xA: \
+ case 0xE: \
+ sgn = 0; \
+ break; \
+ default: \
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ return; \
+ } \
+ } \
+ \
+ while (offset < (size)/4) { \
+ n++; \
+ digits[(size)/4-n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \
+ if (digits[(size)/4-n] > 10) { \
+ dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \
+ return; \
+ } else { \
+ nonzero |= (digits[(size)/4-n] > 0); \
+ } \
+ } \
+ \
+ if (nonzero) { \
+ decNumberSetBCD(&dfp.t, digits+((size)/4)-n, n); \
+ } \
+ \
+ if (s && sgn) { \
+ dfp.t.bits |= DECNEG; \
+ } \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
+ &dfp.context); \
+ dfp_set_FPRF_from_FRT(&dfp); \
+ if ((size) == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if ((size) == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_ENBCD(denbcd, 64)
+DFP_HELPER_ENBCD(denbcdq, 128)
+
+#define DFP_HELPER_XEX(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ if (unlikely(decNumberIsSpecial(&dfp.b))) { \
+ if (decNumberIsInfinite(&dfp.b)) { \
+ *t = -1; \
+ } else if (decNumberIsSNaN(&dfp.b)) { \
+ *t = -3; \
+ } else if (decNumberIsQNaN(&dfp.b)) { \
+ *t = -2; \
+ } else { \
+ assert(0); \
+ } \
+ } else { \
+ if ((size) == 64) { \
+ *t = dfp.b.exponent + 398; \
+ } else if ((size) == 128) { \
+ *t = dfp.b.exponent + 6176; \
+ } else { \
+ assert(0); \
+ } \
+ } \
+}
+
+DFP_HELPER_XEX(dxex, 64)
+DFP_HELPER_XEX(dxexq, 128)
+
+static void dfp_set_raw_exp_64(uint64_t *t, uint64_t raw)
+{
+ *t &= 0x8003ffffffffffffULL;
+ *t |= (raw << (63-13));
+}
+
+static void dfp_set_raw_exp_128(uint64_t *t, uint64_t raw)
+{
+ t[HI_IDX] &= 0x80003fffffffffffULL;
+ t[HI_IDX] |= (raw << (63-17));
+}
+
+#define DFP_HELPER_IEX(op, size) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, uint64_t *b) \
+{ \
+ struct PPC_DFP dfp; \
+ uint64_t raw_qnan, raw_snan, raw_inf, max_exp; \
+ int bias; \
+ int64_t exp = *((int64_t *)a); \
+ \
+ dfp_prepare_decimal##size(&dfp, 0, b, env); \
+ \
+ if ((size) == 64) { \
+ max_exp = 767; \
+ raw_qnan = 0x1F00; \
+ raw_snan = 0x1F80; \
+ raw_inf = 0x1E00; \
+ bias = 398; \
+ } else if ((size) == 128) { \
+ max_exp = 12287; \
+ raw_qnan = 0x1f000; \
+ raw_snan = 0x1f800; \
+ raw_inf = 0x1e000; \
+ bias = 6176; \
+ } else { \
+ assert(0); \
+ } \
+ \
+ if (unlikely((exp < 0) || (exp > max_exp))) { \
+ dfp.t64[0] = dfp.b64[0]; \
+ dfp.t64[1] = dfp.b64[1]; \
+ if (exp == -1) { \
+ dfp_set_raw_exp_##size(dfp.t64, raw_inf); \
+ } else if (exp == -3) { \
+ dfp_set_raw_exp_##size(dfp.t64, raw_snan); \
+ } else { \
+ dfp_set_raw_exp_##size(dfp.t64, raw_qnan); \
+ } \
+ } else { \
+ dfp.t = dfp.b; \
+ if (unlikely(decNumberIsSpecial(&dfp.t))) { \
+ dfp.t.bits &= ~DECSPECIAL; \
+ } \
+ dfp.t.exponent = exp - bias; \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
+ &dfp.context); \
+ } \
+ if (size == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else if (size == 128) { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_IEX(diex, 64)
+DFP_HELPER_IEX(diexq, 128)
+
+static void dfp_clear_lmd_from_g5msb(uint64_t *t)
+{
+
+ /* The most significant 5 bits of the PowerPC DFP format combine bits */
+ /* from the left-most decimal digit (LMD) and the biased exponent. */
+ /* This routine clears the LMD bits while preserving the exponent */
+ /* bits. See "Figure 80: Encoding of bits 0:4 of the G field for */
+ /* Finite Numbers" in the Power ISA for additional details. */
+
+ uint64_t g5msb = (*t >> 58) & 0x1F;
+
+ if ((g5msb >> 3) < 3) { /* LMD in [0-7] ? */
+ *t &= ~(7ULL << 58);
+ } else {
+ switch (g5msb & 7) {
+ case 0:
+ case 1:
+ g5msb = 0;
+ break;
+ case 2:
+ case 3:
+ g5msb = 0x8;
+ break;
+ case 4:
+ case 5:
+ g5msb = 0x10;
+ break;
+ case 6:
+ g5msb = 0x1E;
+ break;
+ case 7:
+ g5msb = 0x1F;
+ break;
+ }
+
+ *t &= ~(0x1fULL << 58);
+ *t |= (g5msb << 58);
+ }
+}
+
+#define DFP_HELPER_SHIFT(op, size, shift_left) \
+void helper_##op(CPUPPCState *env, uint64_t *t, uint64_t *a, \
+ uint32_t sh) \
+{ \
+ struct PPC_DFP dfp; \
+ unsigned max_digits = ((size) == 64) ? 16 : 34; \
+ \
+ dfp_prepare_decimal##size(&dfp, a, 0, env); \
+ \
+ if (sh <= max_digits) { \
+ \
+ decNumber shd; \
+ unsigned special = dfp.a.bits & DECSPECIAL; \
+ \
+ if (shift_left) { \
+ decNumberFromUInt32(&shd, sh); \
+ } else { \
+ decNumberFromInt32(&shd, -((int32_t)sh)); \
+ } \
+ \
+ dfp.a.bits &= ~DECSPECIAL; \
+ decNumberShift(&dfp.t, &dfp.a, &shd, &dfp.context); \
+ \
+ dfp.t.bits |= special; \
+ if (special && (dfp.t.digits >= max_digits)) { \
+ dfp.t.digits = max_digits - 1; \
+ } \
+ \
+ decimal##size##FromNumber((decimal##size *)dfp.t64, &dfp.t, \
+ &dfp.context); \
+ } else { \
+ if ((size) == 64) { \
+ dfp.t64[0] = dfp.a64[0] & 0xFFFC000000000000ULL; \
+ dfp_clear_lmd_from_g5msb(dfp.t64); \
+ } else { \
+ dfp.t64[HI_IDX] = dfp.a64[HI_IDX] & \
+ 0xFFFFC00000000000ULL; \
+ dfp_clear_lmd_from_g5msb(dfp.t64 + HI_IDX); \
+ dfp.t64[LO_IDX] = 0; \
+ } \
+ } \
+ \
+ if ((size) == 64) { \
+ t[0] = dfp.t64[0]; \
+ } else { \
+ t[0] = dfp.t64[HI_IDX]; \
+ t[1] = dfp.t64[LO_IDX]; \
+ } \
+}
+
+DFP_HELPER_SHIFT(dscli, 64, 1)
+DFP_HELPER_SHIFT(dscliq, 128, 1)
+DFP_HELPER_SHIFT(dscri, 64, 0)
+DFP_HELPER_SHIFT(dscriq, 128, 0)
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
new file mode 100644
index 0000000000..93369d4fe5
--- /dev/null
+++ b/target/ppc/excp_helper.c
@@ -0,0 +1,1142 @@
+/*
+ * PowerPC exception emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#include "helper_regs.h"
+
+//#define DEBUG_OP
+//#define DEBUG_SOFTWARE_TLB
+//#define DEBUG_EXCEPTIONS
+
+#ifdef DEBUG_EXCEPTIONS
+# define LOG_EXCP(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_EXCP(...) do { } while (0)
+#endif
+
+/*****************************************************************************/
+/* PowerPC Hypercall emulation */
+
+void (*cpu_ppc_hypercall)(PowerPCCPU *);
+
+/*****************************************************************************/
+/* Exception processing */
+#if defined(CONFIG_USER_ONLY)
+void ppc_cpu_do_interrupt(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+}
+
+static void ppc_hw_interrupt(CPUPPCState *env)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+}
+#else /* defined(CONFIG_USER_ONLY) */
+static inline void dump_syscall(CPUPPCState *env)
+{
+ qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64
+ " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64
+ " nip=" TARGET_FMT_lx "\n",
+ ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3),
+ ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5),
+ ppc_dump_gpr(env, 6), env->nip);
+}
+
+/* Note that this function should be greatly optimized
+ * when called with a constant excp, from ppc_hw_interrupt
+ */
+static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr, new_msr, vector;
+ int srr0, srr1, asrr0, asrr1, lev, ail;
+ bool lpes0;
+
+ qemu_log_mask(CPU_LOG_INT, "Raise exception at " TARGET_FMT_lx
+ " => %08x (%02x)\n", env->nip, excp, env->error_code);
+
+ /* new srr1 value excluding must-be-zero bits */
+ if (excp_model == POWERPC_EXCP_BOOKE) {
+ msr = env->msr;
+ } else {
+ msr = env->msr & ~0x783f0000ULL;
+ }
+
+ /* new interrupt handler msr preserves existing HV and ME unless
+ * explicitly overriden
+ */
+ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB);
+
+ /* target registers */
+ srr0 = SPR_SRR0;
+ srr1 = SPR_SRR1;
+ asrr0 = -1;
+ asrr1 = -1;
+
+ /* check for special resume at 0x100 from doze/nap/sleep/winkle on P7/P8 */
+ if (env->in_pm_state) {
+ env->in_pm_state = false;
+
+ /* Pretend to be returning from doze always as we don't lose state */
+ msr |= (0x1ull << (63 - 47));
+
+ /* Non-machine check are routed to 0x100 with a wakeup cause
+ * encoded in SRR1
+ */
+ if (excp != POWERPC_EXCP_MCHECK) {
+ switch (excp) {
+ case POWERPC_EXCP_RESET:
+ msr |= 0x4ull << (63 - 45);
+ break;
+ case POWERPC_EXCP_EXTERNAL:
+ msr |= 0x8ull << (63 - 45);
+ break;
+ case POWERPC_EXCP_DECR:
+ msr |= 0x6ull << (63 - 45);
+ break;
+ case POWERPC_EXCP_SDOOR:
+ msr |= 0x5ull << (63 - 45);
+ break;
+ case POWERPC_EXCP_SDOOR_HV:
+ msr |= 0x3ull << (63 - 45);
+ break;
+ case POWERPC_EXCP_HV_MAINT:
+ msr |= 0xaull << (63 - 45);
+ break;
+ default:
+ cpu_abort(cs, "Unsupported exception %d in Power Save mode\n",
+ excp);
+ }
+ excp = POWERPC_EXCP_RESET;
+ }
+ }
+
+ /* Exception targetting modifiers
+ *
+ * LPES0 is supported on POWER7/8
+ * LPES1 is not supported (old iSeries mode)
+ *
+ * On anything else, we behave as if LPES0 is 1
+ * (externals don't alter MSR:HV)
+ *
+ * AIL is initialized here but can be cleared by
+ * selected exceptions
+ */
+#if defined(TARGET_PPC64)
+ if (excp_model == POWERPC_EXCP_POWER7 ||
+ excp_model == POWERPC_EXCP_POWER8) {
+ lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ if (excp_model == POWERPC_EXCP_POWER8) {
+ ail = (env->spr[SPR_LPCR] & LPCR_AIL) >> LPCR_AIL_SHIFT;
+ } else {
+ ail = 0;
+ }
+ } else
+#endif /* defined(TARGET_PPC64) */
+ {
+ lpes0 = true;
+ ail = 0;
+ }
+
+ /* Hypervisor emulation assistance interrupt only exists on server
+ * arch 2.05 server or later. We also don't want to generate it if
+ * we don't have HVB in msr_mask (PAPR mode).
+ */
+ if (excp == POWERPC_EXCP_HV_EMU
+#if defined(TARGET_PPC64)
+ && !((env->mmu_model & POWERPC_MMU_64) && (env->msr_mask & MSR_HVB))
+#endif /* defined(TARGET_PPC64) */
+
+ ) {
+ excp = POWERPC_EXCP_PROGRAM;
+ }
+
+ switch (excp) {
+ case POWERPC_EXCP_NONE:
+ /* Should never happen */
+ return;
+ case POWERPC_EXCP_CRITICAL: /* Critical input */
+ switch (excp_model) {
+ case POWERPC_EXCP_40x:
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ case POWERPC_EXCP_G2:
+ break;
+ default:
+ goto excp_invalid;
+ }
+ break;
+ case POWERPC_EXCP_MCHECK: /* Machine check exception */
+ if (msr_me == 0) {
+ /* Machine check exception is not enabled.
+ * Enter checkstop state.
+ */
+ fprintf(stderr, "Machine check while not allowed. "
+ "Entering checkstop state\n");
+ if (qemu_log_separate()) {
+ qemu_log("Machine check while not allowed. "
+ "Entering checkstop state\n");
+ }
+ cs->halted = 1;
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /* ISA specifies HV, but can be delivered to guest with HV clear
+ * (e.g., see FWNMI in PAPR).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ ail = 0;
+
+ /* machine check exceptions don't have ME set */
+ new_msr &= ~((target_ulong)1 << MSR_ME);
+
+ /* XXX: should also have something loaded in DAR / DSISR */
+ switch (excp_model) {
+ case POWERPC_EXCP_40x:
+ srr0 = SPR_40x_SRR2;
+ srr1 = SPR_40x_SRR3;
+ break;
+ case POWERPC_EXCP_BOOKE:
+ /* FIXME: choose one or the other based on CPU type */
+ srr0 = SPR_BOOKE_MCSRR0;
+ srr1 = SPR_BOOKE_MCSRR1;
+ asrr0 = SPR_BOOKE_CSRR0;
+ asrr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DSI: /* Data storage exception */
+ LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx
+ "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]);
+ break;
+ case POWERPC_EXCP_ISI: /* Instruction storage exception */
+ LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx
+ "\n", msr, env->nip);
+ msr |= env->error_code;
+ break;
+ case POWERPC_EXCP_EXTERNAL: /* External input */
+ cs = CPU(cpu);
+
+ if (!lpes0) {
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ }
+ if (env->mpic_proxy) {
+ /* IACK the IRQ on delivery */
+ env->spr[SPR_BOOKE_EPR] = ldl_phys(cs->as, env->mpic_iack);
+ }
+ break;
+ case POWERPC_EXCP_ALIGN: /* Alignment exception */
+ /* Get rS/rD and rA from faulting opcode */
+ /* Note: the opcode fields will not be set properly for a direct
+ * store load/store, but nobody cares as nobody actually uses
+ * direct store segments.
+ */
+ env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16;
+ break;
+ case POWERPC_EXCP_PROGRAM: /* Program exception */
+ switch (env->error_code & ~0xF) {
+ case POWERPC_EXCP_FP:
+ if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) {
+ LOG_EXCP("Ignore floating point exception\n");
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+ return;
+ }
+
+ /* FP exceptions always have NIP pointing to the faulting
+ * instruction, so always use store_next and claim we are
+ * precise in the MSR.
+ */
+ msr |= 0x00100000;
+ break;
+ case POWERPC_EXCP_INVAL:
+ LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip);
+ msr |= 0x00080000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PIL;
+ break;
+ case POWERPC_EXCP_PRIV:
+ msr |= 0x00040000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PPR;
+ break;
+ case POWERPC_EXCP_TRAP:
+ msr |= 0x00020000;
+ env->spr[SPR_BOOKE_ESR] = ESR_PTR;
+ break;
+ default:
+ /* Should never occur */
+ cpu_abort(cs, "Invalid program exception %d. Aborting\n",
+ env->error_code);
+ break;
+ }
+ break;
+ case POWERPC_EXCP_SYSCALL: /* System call exception */
+ dump_syscall(env);
+ lev = env->error_code;
+
+ /* We need to correct the NIP which in this case is supposed
+ * to point to the next instruction
+ */
+ env->nip += 4;
+
+ /* "PAPR mode" built-in hypercall emulation */
+ if ((lev == 1) && cpu_ppc_hypercall) {
+ cpu_ppc_hypercall(cpu);
+ return;
+ }
+ if (lev == 1) {
+ new_msr |= (target_ulong)MSR_HVB;
+ }
+ break;
+ case POWERPC_EXCP_FPU: /* Floating-point unavailable exception */
+ case POWERPC_EXCP_APU: /* Auxiliary processor unavailable */
+ case POWERPC_EXCP_DECR: /* Decrementer exception */
+ break;
+ case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */
+ /* FIT on 4xx */
+ LOG_EXCP("FIT exception\n");
+ break;
+ case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */
+ LOG_EXCP("WDT exception\n");
+ switch (excp_model) {
+ case POWERPC_EXCP_BOOKE:
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DTLB: /* Data TLB error */
+ case POWERPC_EXCP_ITLB: /* Instruction TLB error */
+ break;
+ case POWERPC_EXCP_DEBUG: /* Debug interrupt */
+ switch (excp_model) {
+ case POWERPC_EXCP_BOOKE:
+ /* FIXME: choose one or the other based on CPU type */
+ srr0 = SPR_BOOKE_DSRR0;
+ srr1 = SPR_BOOKE_DSRR1;
+ asrr0 = SPR_BOOKE_CSRR0;
+ asrr1 = SPR_BOOKE_CSRR1;
+ break;
+ default:
+ break;
+ }
+ /* XXX: TODO */
+ cpu_abort(cs, "Debug exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_SPEU: /* SPE/embedded floating-point unavailable */
+ env->spr[SPR_BOOKE_ESR] = ESR_SPV;
+ break;
+ case POWERPC_EXCP_EFPDI: /* Embedded floating-point data interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "Embedded floating point data exception "
+ "is not implemented yet !\n");
+ env->spr[SPR_BOOKE_ESR] = ESR_SPV;
+ break;
+ case POWERPC_EXCP_EFPRI: /* Embedded floating-point round interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "Embedded floating point round exception "
+ "is not implemented yet !\n");
+ env->spr[SPR_BOOKE_ESR] = ESR_SPV;
+ break;
+ case POWERPC_EXCP_EPERFM: /* Embedded performance monitor interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "Performance counter exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_DOORI: /* Embedded doorbell interrupt */
+ break;
+ case POWERPC_EXCP_DOORCI: /* Embedded doorbell critical interrupt */
+ srr0 = SPR_BOOKE_CSRR0;
+ srr1 = SPR_BOOKE_CSRR1;
+ break;
+ case POWERPC_EXCP_RESET: /* System reset exception */
+ /* A power-saving exception sets ME, otherwise it is unchanged */
+ if (msr_pow) {
+ /* indicate that we resumed from power save mode */
+ msr |= 0x10000;
+ new_msr |= ((target_ulong)1 << MSR_ME);
+ }
+ if (env->msr_mask & MSR_HVB) {
+ /* ISA specifies HV, but can be delivered to guest with HV clear
+ * (e.g., see FWNMI in PAPR, NMI injection in QEMU).
+ */
+ new_msr |= (target_ulong)MSR_HVB;
+ } else {
+ if (msr_pow) {
+ cpu_abort(cs, "Trying to deliver power-saving system reset "
+ "exception %d with no HV support\n", excp);
+ }
+ }
+ ail = 0;
+ break;
+ case POWERPC_EXCP_DSEG: /* Data segment exception */
+ case POWERPC_EXCP_ISEG: /* Instruction segment exception */
+ case POWERPC_EXCP_TRACE: /* Trace exception */
+ break;
+ case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */
+ case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */
+ case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */
+ case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */
+ case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */
+ case POWERPC_EXCP_HV_EMU:
+ srr0 = SPR_HSRR0;
+ srr1 = SPR_HSRR1;
+ new_msr |= (target_ulong)MSR_HVB;
+ new_msr |= env->msr & ((target_ulong)1 << MSR_RI);
+ break;
+ case POWERPC_EXCP_VPU: /* Vector unavailable exception */
+ case POWERPC_EXCP_VSXU: /* VSX unavailable exception */
+ case POWERPC_EXCP_FU: /* Facility unavailable exception */
+#ifdef TARGET_PPC64
+ env->spr[SPR_FSCR] |= ((target_ulong)env->error_code << 56);
+#endif
+ break;
+ case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */
+ LOG_EXCP("PIT exception\n");
+ break;
+ case POWERPC_EXCP_IO: /* IO error exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "601 IO error exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_RUNM: /* Run mode exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "601 run mode exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_EMUL: /* Emulation trap exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "602 emulation trap exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_IFTLB: /* Instruction fetch TLB error */
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ goto tlb_miss_tgpr;
+ case POWERPC_EXCP_7x5:
+ goto tlb_miss;
+ case POWERPC_EXCP_74xx:
+ goto tlb_miss_74xx;
+ default:
+ cpu_abort(cs, "Invalid instruction TLB miss exception\n");
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DLTLB: /* Data load TLB miss */
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ goto tlb_miss_tgpr;
+ case POWERPC_EXCP_7x5:
+ goto tlb_miss;
+ case POWERPC_EXCP_74xx:
+ goto tlb_miss_74xx;
+ default:
+ cpu_abort(cs, "Invalid data load TLB miss exception\n");
+ break;
+ }
+ break;
+ case POWERPC_EXCP_DSTLB: /* Data store TLB miss */
+ switch (excp_model) {
+ case POWERPC_EXCP_602:
+ case POWERPC_EXCP_603:
+ case POWERPC_EXCP_603E:
+ case POWERPC_EXCP_G2:
+ tlb_miss_tgpr:
+ /* Swap temporary saved registers with GPRs */
+ if (!(new_msr & ((target_ulong)1 << MSR_TGPR))) {
+ new_msr |= (target_ulong)1 << MSR_TGPR;
+ hreg_swap_gpr_tgpr(env);
+ }
+ goto tlb_miss;
+ case POWERPC_EXCP_7x5:
+ tlb_miss:
+#if defined(DEBUG_SOFTWARE_TLB)
+ if (qemu_log_enabled()) {
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_IMISS];
+ cmp = &env->spr[SPR_ICMP];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB) {
+ es = "DL";
+ } else {
+ es = "DS";
+ }
+ en = 'D';
+ miss = &env->spr[SPR_DMISS];
+ cmp = &env->spr[SPR_DCMP];
+ }
+ qemu_log("6xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " H1 " TARGET_FMT_lx " H2 "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->spr[SPR_HASH1], env->spr[SPR_HASH2],
+ env->error_code);
+ }
+#endif
+ msr |= env->crf[0] << 28;
+ msr |= env->error_code; /* key, D/I, S/L bits */
+ /* Set way using a LRU mechanism */
+ msr |= ((env->last_way + 1) & (env->nb_ways - 1)) << 17;
+ break;
+ case POWERPC_EXCP_74xx:
+ tlb_miss_74xx:
+#if defined(DEBUG_SOFTWARE_TLB)
+ if (qemu_log_enabled()) {
+ const char *es;
+ target_ulong *miss, *cmp;
+ int en;
+
+ if (excp == POWERPC_EXCP_IFTLB) {
+ es = "I";
+ en = 'I';
+ miss = &env->spr[SPR_TLBMISS];
+ cmp = &env->spr[SPR_PTEHI];
+ } else {
+ if (excp == POWERPC_EXCP_DLTLB) {
+ es = "DL";
+ } else {
+ es = "DS";
+ }
+ en = 'D';
+ miss = &env->spr[SPR_TLBMISS];
+ cmp = &env->spr[SPR_PTEHI];
+ }
+ qemu_log("74xx %sTLB miss: %cM " TARGET_FMT_lx " %cC "
+ TARGET_FMT_lx " %08x\n", es, en, *miss, en, *cmp,
+ env->error_code);
+ }
+#endif
+ msr |= env->error_code; /* key bit */
+ break;
+ default:
+ cpu_abort(cs, "Invalid data store TLB miss exception\n");
+ break;
+ }
+ break;
+ case POWERPC_EXCP_FPA: /* Floating-point assist exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "Floating point assist exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_DABR: /* Data address breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "DABR exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_IABR: /* Instruction address breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "IABR exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_SMI: /* System management interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "SMI exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_THERM: /* Thermal interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs, "Thermal management exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_PERFM: /* Embedded performance monitor interrupt */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "Performance counter exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_VPUA: /* Vector assist exception */
+ /* XXX: TODO */
+ cpu_abort(cs, "VPU assist exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_SOFTP: /* Soft patch exception */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "970 soft-patch exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_MAINT: /* Maintenance exception */
+ /* XXX: TODO */
+ cpu_abort(cs,
+ "970 maintenance exception is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_MEXTBR: /* Maskable external breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "Maskable external exception "
+ "is not implemented yet !\n");
+ break;
+ case POWERPC_EXCP_NMEXTBR: /* Non maskable external breakpoint */
+ /* XXX: TODO */
+ cpu_abort(cs, "Non maskable external exception "
+ "is not implemented yet !\n");
+ break;
+ default:
+ excp_invalid:
+ cpu_abort(cs, "Invalid PowerPC exception %d. Aborting\n", excp);
+ break;
+ }
+
+ /* Save PC */
+ env->spr[srr0] = env->nip;
+
+ /* Save MSR */
+ env->spr[srr1] = msr;
+
+ /* Sanity check */
+ if (!(env->msr_mask & MSR_HVB)) {
+ if (new_msr & MSR_HVB) {
+ cpu_abort(cs, "Trying to deliver HV exception (MSR) %d with "
+ "no HV support\n", excp);
+ }
+ if (srr0 == SPR_HSRR0) {
+ cpu_abort(cs, "Trying to deliver HV exception (HSRR) %d with "
+ "no HV support\n", excp);
+ }
+ }
+
+ /* If any alternate SRR register are defined, duplicate saved values */
+ if (asrr0 != -1) {
+ env->spr[asrr0] = env->spr[srr0];
+ }
+ if (asrr1 != -1) {
+ env->spr[asrr1] = env->spr[srr1];
+ }
+
+ /* Sort out endianness of interrupt, this differs depending on the
+ * CPU, the HV mode, etc...
+ */
+#ifdef TARGET_PPC64
+ if (excp_model == POWERPC_EXCP_POWER7) {
+ if (!(new_msr & MSR_HVB) && (env->spr[SPR_LPCR] & LPCR_ILE)) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (excp_model == POWERPC_EXCP_POWER8) {
+ if (new_msr & MSR_HVB) {
+ if (env->spr[SPR_HID0] & HID0_HILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (env->spr[SPR_LPCR] & LPCR_ILE) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+ } else if (msr_ile) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+#else
+ if (msr_ile) {
+ new_msr |= (target_ulong)1 << MSR_LE;
+ }
+#endif
+
+ /* Jump to handler */
+ vector = env->excp_vectors[excp];
+ if (vector == (target_ulong)-1ULL) {
+ cpu_abort(cs, "Raised an exception without defined vector %d\n",
+ excp);
+ }
+ vector |= env->excp_prefix;
+
+ /* AIL only works if there is no HV transition and we are running with
+ * translations enabled
+ */
+ if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) ||
+ ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) {
+ ail = 0;
+ }
+ /* Handle AIL */
+ if (ail) {
+ new_msr |= (1 << MSR_IR) | (1 << MSR_DR);
+ switch(ail) {
+ case AIL_0001_8000:
+ vector |= 0x18000;
+ break;
+ case AIL_C000_0000_0000_4000:
+ vector |= 0xc000000000004000ull;
+ break;
+ default:
+ cpu_abort(cs, "Invalid AIL combination %d\n", ail);
+ break;
+ }
+ }
+
+#if defined(TARGET_PPC64)
+ if (excp_model == POWERPC_EXCP_BOOKE) {
+ if (env->spr[SPR_BOOKE_EPCR] & EPCR_ICM) {
+ /* Cat.64-bit: EPCR.ICM is copied to MSR.CM */
+ new_msr |= (target_ulong)1 << MSR_CM;
+ } else {
+ vector = (uint32_t)vector;
+ }
+ } else {
+ if (!msr_isf && !(env->mmu_model & POWERPC_MMU_64)) {
+ vector = (uint32_t)vector;
+ } else {
+ new_msr |= (target_ulong)1 << MSR_SF;
+ }
+ }
+#endif
+ /* We don't use hreg_store_msr here as already have treated
+ * any special case that could occur. Just store MSR and update hflags
+ *
+ * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
+ * will prevent setting of the HV bit which some exceptions might need
+ * to do.
+ */
+ env->msr = new_msr & env->msr_mask;
+ hreg_compute_hflags(env);
+ env->nip = vector;
+ /* Reset exception state */
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+
+ /* Any interrupt is context synchronizing, check if TCG TLB
+ * needs a delayed flush on ppc64
+ */
+ check_tlb_flush(env, false);
+}
+
+void ppc_cpu_do_interrupt(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ powerpc_excp(cpu, env->excp_model, cs->exception_index);
+}
+
+static void ppc_hw_interrupt(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+#if 0
+ CPUState *cs = CPU(cpu);
+
+ qemu_log_mask(CPU_LOG_INT, "%s: %p pending %08x req %08x me %d ee %d\n",
+ __func__, env, env->pending_interrupts,
+ cs->interrupt_request, (int)msr_me, (int)msr_ee);
+#endif
+ /* External reset */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_RESET)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_RESET);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+ return;
+ }
+ /* Machine check exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_MCK)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_MCK);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_MCHECK);
+ return;
+ }
+#if 0 /* TODO */
+ /* External debug exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DEBUG)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DEBUG);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DEBUG);
+ return;
+ }
+#endif
+ /* Hypervisor decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_HDECR)) {
+ /* LPCR will be clear when not supported so this will work */
+ bool hdice = !!(env->spr[SPR_LPCR] & LPCR_HDICE);
+ if ((msr_ee != 0 || msr_hv == 0) && hdice) {
+ /* HDEC clears on delivery */
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_HDECR);
+ return;
+ }
+ }
+ /* Extermal interrupt can ignore MSR:EE under some circumstances */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_EXT)) {
+ bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0);
+ if (msr_ee != 0 || (env->has_hv_mode && msr_hv == 0 && !lpes0)) {
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL);
+ return;
+ }
+ }
+ if (msr_ce != 0) {
+ /* External critical interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CEXT)) {
+ /* Taking a critical external interrupt does not clear the external
+ * critical interrupt status
+ */
+#if 0
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CEXT);
+#endif
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_CRITICAL);
+ return;
+ }
+ }
+ if (msr_ee != 0) {
+ /* Watchdog timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_WDT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_WDT);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_WDT);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_CDOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_CDOORBELL);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORCI);
+ return;
+ }
+ /* Fixed interval timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_FIT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_FIT);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_FIT);
+ return;
+ }
+ /* Programmable interval timer on embedded PowerPC */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PIT)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PIT);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PIT);
+ return;
+ }
+ /* Decrementer exception */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DECR)) {
+ if (ppc_decr_clear_on_delivery(env)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DECR);
+ }
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DECR);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_DOORBELL)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_DOORBELL);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_DOORI);
+ return;
+ }
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_PERFM)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_PERFM);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_PERFM);
+ return;
+ }
+ /* Thermal interrupt */
+ if (env->pending_interrupts & (1 << PPC_INTERRUPT_THERM)) {
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_THERM);
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_THERM);
+ return;
+ }
+ }
+}
+
+void ppc_cpu_do_system_reset(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ ppc_hw_interrupt(env);
+ if (env->pending_interrupts == 0) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+ return true;
+ }
+ return false;
+}
+
+#if defined(DEBUG_OP)
+static void cpu_dump_rfi(target_ulong RA, target_ulong msr)
+{
+ qemu_log("Return from exception at " TARGET_FMT_lx " with flags "
+ TARGET_FMT_lx "\n", RA, msr);
+}
+#endif
+
+/*****************************************************************************/
+/* Exceptions processing helpers */
+
+void raise_exception_err_ra(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code, uintptr_t raddr)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ cs->exception_index = exception;
+ env->error_code = error_code;
+ cpu_loop_exit_restore(cs, raddr);
+}
+
+void raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+void raise_exception_ra(CPUPPCState *env, uint32_t exception,
+ uintptr_t raddr)
+{
+ raise_exception_err_ra(env, exception, 0, raddr);
+}
+
+void helper_raise_exception_err(CPUPPCState *env, uint32_t exception,
+ uint32_t error_code)
+{
+ raise_exception_err_ra(env, exception, error_code, 0);
+}
+
+void helper_raise_exception(CPUPPCState *env, uint32_t exception)
+{
+ raise_exception_err_ra(env, exception, 0, 0);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void helper_store_msr(CPUPPCState *env, target_ulong val)
+{
+ uint32_t excp = hreg_store_msr(env, val, 0);
+
+ if (excp != 0) {
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ raise_exception(env, excp);
+ }
+}
+
+#if defined(TARGET_PPC64)
+void helper_pminsn(CPUPPCState *env, powerpc_pm_insn_t insn)
+{
+ CPUState *cs;
+
+ cs = CPU(ppc_env_get_cpu(env));
+ cs->halted = 1;
+ env->in_pm_state = true;
+
+ /* The architecture specifies that HDEC interrupts are
+ * discarded in PM states
+ */
+ env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR);
+
+ /* Technically, nap doesn't set EE, but if we don't set it
+ * then ppc_hw_interrupt() won't deliver. We could add some
+ * other tests there based on LPCR but it's simpler to just
+ * whack EE in. It will be cleared by the 0x100 at wakeup
+ * anyway. It will still be observable by the guest in SRR1
+ * but this doesn't seem to be a problem.
+ */
+ env->msr |= (1ull << MSR_EE);
+ raise_exception(env, EXCP_HLT);
+}
+#endif /* defined(TARGET_PPC64) */
+
+static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ /* MSR:POW cannot be set by any form of rfi */
+ msr &= ~(1ULL << MSR_POW);
+
+#if defined(TARGET_PPC64)
+ /* Switching to 32-bit ? Crop the nip */
+ if (!msr_is_64bit(env, msr)) {
+ nip = (uint32_t)nip;
+ }
+#else
+ nip = (uint32_t)nip;
+#endif
+ /* XXX: beware: this is false if VLE is supported */
+ env->nip = nip & ~((target_ulong)0x00000003);
+ hreg_store_msr(env, msr, 1);
+#if defined(DEBUG_OP)
+ cpu_dump_rfi(env->nip, env->msr);
+#endif
+ /* No need to raise an exception here,
+ * as rfi is always the last insn of a TB
+ */
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+
+ /* Context synchronizing: check if TCG TLB needs flush */
+ check_tlb_flush(env, false);
+}
+
+void helper_rfi(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1] & 0xfffffffful);
+}
+
+#define MSR_BOOK3S_MASK
+#if defined(TARGET_PPC64)
+void helper_rfid(CPUPPCState *env)
+{
+ /* The architeture defines a number of rules for which bits
+ * can change but in practice, we handle this in hreg_store_msr()
+ * which will be called by do_rfi(), so there is no need to filter
+ * here
+ */
+ do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1]);
+}
+
+void helper_hrfid(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+}
+#endif
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+void helper_40x_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3]);
+}
+
+void helper_rfci(CPUPPCState *env)
+{
+ do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1]);
+}
+
+void helper_rfdi(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1]);
+}
+
+void helper_rfmci(CPUPPCState *env)
+{
+ /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
+ do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+}
+#endif
+
+void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) ||
+ ((int32_t)arg1 > (int32_t)arg2 && (flags & 0x08)) ||
+ ((int32_t)arg1 == (int32_t)arg2 && (flags & 0x04)) ||
+ ((uint32_t)arg1 < (uint32_t)arg2 && (flags & 0x02)) ||
+ ((uint32_t)arg1 > (uint32_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+
+#if defined(TARGET_PPC64)
+void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2,
+ uint32_t flags)
+{
+ if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) ||
+ ((int64_t)arg1 > (int64_t)arg2 && (flags & 0x08)) ||
+ ((int64_t)arg1 == (int64_t)arg2 && (flags & 0x04)) ||
+ ((uint64_t)arg1 < (uint64_t)arg2 && (flags & 0x02)) ||
+ ((uint64_t)arg1 > (uint64_t)arg2 && (flags & 0x01))))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_TRAP, GETPC());
+ }
+}
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+void helper_rfsvc(CPUPPCState *env)
+{
+ do_rfi(env, env->lr, env->ctr & 0x0000FFFF);
+}
+
+/* Embedded.Processor Control */
+static int dbell2irq(target_ulong rb)
+{
+ int msg = rb & DBELL_TYPE_MASK;
+ int irq = -1;
+
+ switch (msg) {
+ case DBELL_TYPE_DBELL:
+ irq = PPC_INTERRUPT_DOORBELL;
+ break;
+ case DBELL_TYPE_DBELL_CRIT:
+ irq = PPC_INTERRUPT_CDOORBELL;
+ break;
+ case DBELL_TYPE_G_DBELL:
+ case DBELL_TYPE_G_DBELL_CRIT:
+ case DBELL_TYPE_G_DBELL_MC:
+ /* XXX implement */
+ default:
+ break;
+ }
+
+ return irq;
+}
+
+void helper_msgclr(CPUPPCState *env, target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+
+ if (irq < 0) {
+ return;
+ }
+
+ env->pending_interrupts &= ~(1 << irq);
+}
+
+void helper_msgsnd(target_ulong rb)
+{
+ int irq = dbell2irq(rb);
+ int pir = rb & DBELL_PIRTAG_MASK;
+ CPUState *cs;
+
+ if (irq < 0) {
+ return;
+ }
+
+ CPU_FOREACH(cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+
+ if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) {
+ cenv->pending_interrupts |= 1 << irq;
+ cpu_interrupt(cs, CPU_INTERRUPT_HARD);
+ }
+ }
+}
+#endif
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
new file mode 100644
index 0000000000..8a389e19af
--- /dev/null
+++ b/target/ppc/fpu_helper.c
@@ -0,0 +1,2789 @@
+/*
+ * PowerPC floating point and SPE emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+
+#define float64_snan_to_qnan(x) ((x) | 0x0008000000000000ULL)
+#define float32_snan_to_qnan(x) ((x) | 0x00400000)
+
+/*****************************************************************************/
+/* Floating point operations helpers */
+uint64_t helper_float32_to_float64(CPUPPCState *env, uint32_t arg)
+{
+ CPU_FloatU f;
+ CPU_DoubleU d;
+
+ f.l = arg;
+ d.d = float32_to_float64(f.f, &env->fp_status);
+ return d.ll;
+}
+
+uint32_t helper_float64_to_float32(CPUPPCState *env, uint64_t arg)
+{
+ CPU_FloatU f;
+ CPU_DoubleU d;
+
+ d.ll = arg;
+ f.f = float64_to_float32(d.d, &env->fp_status);
+ return f.l;
+}
+
+static inline int isden(float64 d)
+{
+ CPU_DoubleU u;
+
+ u.d = d;
+
+ return ((u.ll >> 52) & 0x7FF) == 0;
+}
+
+static inline int ppc_float32_get_unbiased_exp(float32 f)
+{
+ return ((f >> 23) & 0xFF) - 127;
+}
+
+static inline int ppc_float64_get_unbiased_exp(float64 f)
+{
+ return ((f >> 52) & 0x7FF) - 1023;
+}
+
+void helper_compute_fprf(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+ int isneg;
+ int fprf;
+
+ farg.ll = arg;
+ isneg = float64_is_neg(farg.d);
+ if (unlikely(float64_is_any_nan(farg.d))) {
+ if (float64_is_signaling_nan(farg.d, &env->fp_status)) {
+ /* Signaling NaN: flags are undefined */
+ fprf = 0x00;
+ } else {
+ /* Quiet NaN */
+ fprf = 0x11;
+ }
+ } else if (unlikely(float64_is_infinity(farg.d))) {
+ /* +/- infinity */
+ if (isneg) {
+ fprf = 0x09;
+ } else {
+ fprf = 0x05;
+ }
+ } else {
+ if (float64_is_zero(farg.d)) {
+ /* +/- zero */
+ if (isneg) {
+ fprf = 0x12;
+ } else {
+ fprf = 0x02;
+ }
+ } else {
+ if (isden(farg.d)) {
+ /* Denormalized numbers */
+ fprf = 0x10;
+ } else {
+ /* Normalized numbers */
+ fprf = 0x00;
+ }
+ if (isneg) {
+ fprf |= 0x08;
+ } else {
+ fprf |= 0x04;
+ }
+ }
+ }
+ /* We update FPSCR_FPRF */
+ env->fpscr &= ~(0x1F << FPSCR_FPRF);
+ env->fpscr |= fprf << FPSCR_FPRF;
+}
+
+/* Floating-point invalid operations exception */
+static inline __attribute__((__always_inline__))
+uint64_t float_invalid_op_excp(CPUPPCState *env, int op, int set_fpcc)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ uint64_t ret = 0;
+ int ve;
+
+ ve = fpscr_ve;
+ switch (op) {
+ case POWERPC_EXCP_FP_VXSNAN:
+ env->fpscr |= 1 << FPSCR_VXSNAN;
+ break;
+ case POWERPC_EXCP_FP_VXSOFT:
+ env->fpscr |= 1 << FPSCR_VXSOFT;
+ break;
+ case POWERPC_EXCP_FP_VXISI:
+ /* Magnitude subtraction of infinities */
+ env->fpscr |= 1 << FPSCR_VXISI;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXIDI:
+ /* Division of infinity by infinity */
+ env->fpscr |= 1 << FPSCR_VXIDI;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXZDZ:
+ /* Division of zero by zero */
+ env->fpscr |= 1 << FPSCR_VXZDZ;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXIMZ:
+ /* Multiplication of zero by infinity */
+ env->fpscr |= 1 << FPSCR_VXIMZ;
+ goto update_arith;
+ case POWERPC_EXCP_FP_VXVC:
+ /* Ordered comparison of NaN */
+ env->fpscr |= 1 << FPSCR_VXVC;
+ if (set_fpcc) {
+ env->fpscr &= ~(0xF << FPSCR_FPCC);
+ env->fpscr |= 0x11 << FPSCR_FPCC;
+ }
+ /* We must update the target FPR before raising the exception */
+ if (ve != 0) {
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* Exception is differed */
+ ve = 0;
+ }
+ break;
+ case POWERPC_EXCP_FP_VXSQRT:
+ /* Square root of a negative number */
+ env->fpscr |= 1 << FPSCR_VXSQRT;
+ update_arith:
+ env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
+ if (ve == 0) {
+ /* Set the result to quiet NaN */
+ ret = 0x7FF8000000000000ULL;
+ if (set_fpcc) {
+ env->fpscr &= ~(0xF << FPSCR_FPCC);
+ env->fpscr |= 0x11 << FPSCR_FPCC;
+ }
+ }
+ break;
+ case POWERPC_EXCP_FP_VXCVI:
+ /* Invalid conversion */
+ env->fpscr |= 1 << FPSCR_VXCVI;
+ env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
+ if (ve == 0) {
+ /* Set the result to quiet NaN */
+ ret = 0x7FF8000000000000ULL;
+ if (set_fpcc) {
+ env->fpscr &= ~(0xF << FPSCR_FPCC);
+ env->fpscr |= 0x11 << FPSCR_FPCC;
+ }
+ }
+ break;
+ }
+ /* Update the floating-point invalid operation summary */
+ env->fpscr |= 1 << FPSCR_VX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (ve != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ if (msr_fe0 != 0 || msr_fe1 != 0) {
+ /* GETPC() works here because this is inline */
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | op, GETPC());
+ }
+ }
+ return ret;
+}
+
+static inline void float_zero_divide_excp(CPUPPCState *env, uintptr_t raddr)
+{
+ env->fpscr |= 1 << FPSCR_ZX;
+ env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_ze != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ if (msr_fe0 != 0 || msr_fe1 != 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX,
+ raddr);
+ }
+ }
+}
+
+static inline void float_overflow_excp(CPUPPCState *env)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ env->fpscr |= 1 << FPSCR_OX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_oe != 0) {
+ /* XXX: should adjust the result */
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We must update the target FPR before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
+ } else {
+ env->fpscr |= 1 << FPSCR_XX;
+ env->fpscr |= 1 << FPSCR_FI;
+ }
+}
+
+static inline void float_underflow_excp(CPUPPCState *env)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ env->fpscr |= 1 << FPSCR_UX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_ue != 0) {
+ /* XXX: should adjust the result */
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We must update the target FPR before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
+ }
+}
+
+static inline void float_inexact_excp(CPUPPCState *env)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+
+ env->fpscr |= 1 << FPSCR_XX;
+ /* Update the floating-point exception summary */
+ env->fpscr |= FP_FX;
+ if (fpscr_xe != 0) {
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We must update the target FPR before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
+ }
+}
+
+static inline void fpscr_set_rounding_mode(CPUPPCState *env)
+{
+ int rnd_type;
+
+ /* Set rounding mode */
+ switch (fpscr_rn) {
+ case 0:
+ /* Best approximation (round to nearest) */
+ rnd_type = float_round_nearest_even;
+ break;
+ case 1:
+ /* Smaller magnitude (round toward zero) */
+ rnd_type = float_round_to_zero;
+ break;
+ case 2:
+ /* Round toward +infinite */
+ rnd_type = float_round_up;
+ break;
+ default:
+ case 3:
+ /* Round toward -infinite */
+ rnd_type = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->fp_status);
+}
+
+void helper_fpscr_clrbit(CPUPPCState *env, uint32_t bit)
+{
+ int prev;
+
+ prev = (env->fpscr >> bit) & 1;
+ env->fpscr &= ~(1 << bit);
+ if (prev == 1) {
+ switch (bit) {
+ case FPSCR_RN1:
+ case FPSCR_RN:
+ fpscr_set_rounding_mode(env);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ int prev;
+
+ prev = (env->fpscr >> bit) & 1;
+ env->fpscr |= 1 << bit;
+ if (prev == 0) {
+ switch (bit) {
+ case FPSCR_VX:
+ env->fpscr |= FP_FX;
+ if (fpscr_ve) {
+ goto raise_ve;
+ }
+ break;
+ case FPSCR_OX:
+ env->fpscr |= FP_FX;
+ if (fpscr_oe) {
+ goto raise_oe;
+ }
+ break;
+ case FPSCR_UX:
+ env->fpscr |= FP_FX;
+ if (fpscr_ue) {
+ goto raise_ue;
+ }
+ break;
+ case FPSCR_ZX:
+ env->fpscr |= FP_FX;
+ if (fpscr_ze) {
+ goto raise_ze;
+ }
+ break;
+ case FPSCR_XX:
+ env->fpscr |= FP_FX;
+ if (fpscr_xe) {
+ goto raise_xe;
+ }
+ break;
+ case FPSCR_VXSNAN:
+ case FPSCR_VXISI:
+ case FPSCR_VXIDI:
+ case FPSCR_VXZDZ:
+ case FPSCR_VXIMZ:
+ case FPSCR_VXVC:
+ case FPSCR_VXSOFT:
+ case FPSCR_VXSQRT:
+ case FPSCR_VXCVI:
+ env->fpscr |= 1 << FPSCR_VX;
+ env->fpscr |= FP_FX;
+ if (fpscr_ve != 0) {
+ goto raise_ve;
+ }
+ break;
+ case FPSCR_VE:
+ if (fpscr_vx != 0) {
+ raise_ve:
+ env->error_code = POWERPC_EXCP_FP;
+ if (fpscr_vxsnan) {
+ env->error_code |= POWERPC_EXCP_FP_VXSNAN;
+ }
+ if (fpscr_vxisi) {
+ env->error_code |= POWERPC_EXCP_FP_VXISI;
+ }
+ if (fpscr_vxidi) {
+ env->error_code |= POWERPC_EXCP_FP_VXIDI;
+ }
+ if (fpscr_vxzdz) {
+ env->error_code |= POWERPC_EXCP_FP_VXZDZ;
+ }
+ if (fpscr_vximz) {
+ env->error_code |= POWERPC_EXCP_FP_VXIMZ;
+ }
+ if (fpscr_vxvc) {
+ env->error_code |= POWERPC_EXCP_FP_VXVC;
+ }
+ if (fpscr_vxsoft) {
+ env->error_code |= POWERPC_EXCP_FP_VXSOFT;
+ }
+ if (fpscr_vxsqrt) {
+ env->error_code |= POWERPC_EXCP_FP_VXSQRT;
+ }
+ if (fpscr_vxcvi) {
+ env->error_code |= POWERPC_EXCP_FP_VXCVI;
+ }
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_OE:
+ if (fpscr_ox != 0) {
+ raise_oe:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_UE:
+ if (fpscr_ux != 0) {
+ raise_ue:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_ZE:
+ if (fpscr_zx != 0) {
+ raise_ze:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_XE:
+ if (fpscr_xx != 0) {
+ raise_xe:
+ env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
+ goto raise_excp;
+ }
+ break;
+ case FPSCR_RN1:
+ case FPSCR_RN:
+ fpscr_set_rounding_mode(env);
+ break;
+ default:
+ break;
+ raise_excp:
+ /* Update the floating-point enabled exception summary */
+ env->fpscr |= 1 << FPSCR_FEX;
+ /* We have to update Rc1 before raising the exception */
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ break;
+ }
+ }
+}
+
+void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ target_ulong prev, new;
+ int i;
+
+ prev = env->fpscr;
+ new = (target_ulong)arg;
+ new &= ~0x60000000LL;
+ new |= prev & 0x60000000LL;
+ for (i = 0; i < sizeof(target_ulong) * 2; i++) {
+ if (mask & (1 << i)) {
+ env->fpscr &= ~(0xFLL << (4 * i));
+ env->fpscr |= new & (0xFLL << (4 * i));
+ }
+ }
+ /* Update VX and FEX */
+ if (fpscr_ix != 0) {
+ env->fpscr |= 1 << FPSCR_VX;
+ } else {
+ env->fpscr &= ~(1 << FPSCR_VX);
+ }
+ if ((fpscr_ex & fpscr_eex) != 0) {
+ env->fpscr |= 1 << FPSCR_FEX;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ /* XXX: we should compute it properly */
+ env->error_code = POWERPC_EXCP_FP;
+ } else {
+ env->fpscr &= ~(1 << FPSCR_FEX);
+ }
+ fpscr_set_rounding_mode(env);
+}
+
+void store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask)
+{
+ helper_store_fpscr(env, arg, mask);
+}
+
+static void do_float_check_status(CPUPPCState *env, uintptr_t raddr)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ int status = get_float_exception_flags(&env->fp_status);
+
+ if (status & float_flag_divbyzero) {
+ float_zero_divide_excp(env, raddr);
+ } else if (status & float_flag_overflow) {
+ float_overflow_excp(env);
+ } else if (status & float_flag_underflow) {
+ float_underflow_excp(env);
+ } else if (status & float_flag_inexact) {
+ float_inexact_excp(env);
+ }
+
+ if (cs->exception_index == POWERPC_EXCP_PROGRAM &&
+ (env->error_code & POWERPC_EXCP_FP)) {
+ /* Differred floating-point exception after target FPR update */
+ if (msr_fe0 != 0 || msr_fe1 != 0) {
+ raise_exception_err_ra(env, cs->exception_index,
+ env->error_code, raddr);
+ }
+ }
+}
+
+static inline __attribute__((__always_inline__))
+void float_check_status(CPUPPCState *env)
+{
+ /* GETPC() works here because this is inline */
+ do_float_check_status(env, GETPC());
+}
+
+void helper_float_check_status(CPUPPCState *env)
+{
+ do_float_check_status(env, GETPC());
+}
+
+void helper_reset_fpstatus(CPUPPCState *env)
+{
+ set_float_exception_flags(0, &env->fp_status);
+}
+
+/* fadd - fadd. */
+uint64_t helper_fadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
+ float64_is_neg(farg1.d) != float64_is_neg(farg2.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
+ /* sNaN addition */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ farg1.d = float64_add(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fsub - fsub. */
+uint64_t helper_fsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_infinity(farg1.d) && float64_is_infinity(farg2.d) &&
+ float64_is_neg(farg1.d) == float64_is_neg(farg2.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
+ /* sNaN subtraction */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ farg1.d = float64_sub(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fmul - fmul. */
+uint64_t helper_fmul(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
+ /* sNaN multiplication */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ farg1.d = float64_mul(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+/* fdiv - fdiv. */
+uint64_t helper_fdiv(CPUPPCState *env, uint64_t arg1, uint64_t arg2)
+{
+ CPU_DoubleU farg1, farg2;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_infinity(farg1.d) &&
+ float64_is_infinity(farg2.d))) {
+ /* Division of infinity by infinity */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, 1);
+ } else if (unlikely(float64_is_zero(farg1.d) && float64_is_zero(farg2.d))) {
+ /* Division of zero by zero */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status))) {
+ /* sNaN division */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ farg1.d = float64_div(farg1.d, farg2.d, &env->fp_status);
+ }
+
+ return farg1.ll;
+}
+
+
+#define FPU_FCTI(op, cvt, nanval) \
+uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
+{ \
+ CPU_DoubleU farg; \
+ \
+ farg.ll = arg; \
+ farg.ll = float64_to_##cvt(farg.d, &env->fp_status); \
+ \
+ if (unlikely(env->fp_status.float_exception_flags)) { \
+ if (float64_is_any_nan(arg)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
+ if (float64_is_signaling_nan(arg, &env->fp_status)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1); \
+ } \
+ farg.ll = nanval; \
+ } else if (env->fp_status.float_exception_flags & \
+ float_flag_invalid) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 1); \
+ } \
+ float_check_status(env); \
+ } \
+ return farg.ll; \
+ }
+
+FPU_FCTI(fctiw, int32, 0x80000000U)
+FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U)
+FPU_FCTI(fctiwu, uint32, 0x00000000U)
+FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U)
+FPU_FCTI(fctid, int64, 0x8000000000000000ULL)
+FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL)
+FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL)
+FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL)
+
+#define FPU_FCFI(op, cvtr, is_single) \
+uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \
+{ \
+ CPU_DoubleU farg; \
+ \
+ if (is_single) { \
+ float32 tmp = cvtr(arg, &env->fp_status); \
+ farg.d = float32_to_float64(tmp, &env->fp_status); \
+ } else { \
+ farg.d = cvtr(arg, &env->fp_status); \
+ } \
+ float_check_status(env); \
+ return farg.ll; \
+}
+
+FPU_FCFI(fcfid, int64_to_float64, 0)
+FPU_FCFI(fcfids, int64_to_float32, 1)
+FPU_FCFI(fcfidu, uint64_to_float64, 0)
+FPU_FCFI(fcfidus, uint64_to_float32, 1)
+
+static inline uint64_t do_fri(CPUPPCState *env, uint64_t arg,
+ int rounding_mode)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN round */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ farg.ll = arg | 0x0008000000000000ULL;
+ } else {
+ int inexact = get_float_exception_flags(&env->fp_status) &
+ float_flag_inexact;
+ set_float_rounding_mode(rounding_mode, &env->fp_status);
+ farg.ll = float64_round_to_int(farg.d, &env->fp_status);
+ /* Restore rounding mode from FPSCR */
+ fpscr_set_rounding_mode(env);
+
+ /* fri* does not set FPSCR[XX] */
+ if (!inexact) {
+ env->fp_status.float_exception_flags &= ~float_flag_inexact;
+ }
+ }
+ float_check_status(env);
+ return farg.ll;
+}
+
+uint64_t helper_frin(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_ties_away);
+}
+
+uint64_t helper_friz(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_to_zero);
+}
+
+uint64_t helper_frip(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_up);
+}
+
+uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
+{
+ return do_fri(env, arg, float_round_down);
+}
+
+/* fmadd - fmadd. */
+uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
+ /* sNaN operation */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) &&
+ float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+ }
+
+ return farg1.ll;
+}
+
+/* fmsub - fmsub. */
+uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) &&
+ float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
+ /* sNaN operation */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) &&
+ float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+ }
+ return farg1.ll;
+}
+
+/* fnmadd - fnmadd. */
+uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
+ /* sNaN operation */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) &&
+ float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+ if (likely(!float64_is_any_nan(farg1.d))) {
+ farg1.d = float64_chs(farg1.d);
+ }
+ }
+ return farg1.ll;
+}
+
+/* fnmsub - fnmsub. */
+uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
+{
+ CPU_DoubleU farg1, farg2, farg3;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+ farg3.ll = arg3;
+
+ if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
+ (float64_is_zero(farg1.d) &&
+ float64_is_infinity(farg2.d)))) {
+ /* Multiplication of zero by infinity */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
+ } else {
+ if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg3.d, &env->fp_status))) {
+ /* sNaN operation */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ /* This is the way the PowerPC specification defines it */
+ float128 ft0_128, ft1_128;
+
+ ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
+ ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
+ ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
+ if (unlikely(float128_is_infinity(ft0_128) &&
+ float64_is_infinity(farg3.d) &&
+ float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
+ /* Magnitude subtraction of infinities */
+ farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
+ } else {
+ ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
+ ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
+ farg1.d = float128_to_float64(ft0_128, &env->fp_status);
+ }
+ if (likely(!float64_is_any_nan(farg1.d))) {
+ farg1.d = float64_chs(farg1.d);
+ }
+ }
+ return farg1.ll;
+}
+
+/* frsp - frsp. */
+uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN square root */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+
+ return farg.ll;
+}
+
+/* fsqrt - fsqrt. */
+uint64_t helper_fsqrt(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_any_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN reciprocal square root */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ farg.ll = float64_snan_to_qnan(farg.ll);
+ }
+ } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
+ /* Square root of a negative nonzero number */
+ farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ } else {
+ farg.d = float64_sqrt(farg.d, &env->fp_status);
+ }
+ return farg.ll;
+}
+
+/* fre - fre. */
+uint64_t helper_fre(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN reciprocal */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ return farg.d;
+}
+
+/* fres - fres. */
+uint64_t helper_fres(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+ float32 f32;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN reciprocal */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ f32 = float64_to_float32(farg.d, &env->fp_status);
+ farg.d = float32_to_float64(f32, &env->fp_status);
+
+ return farg.ll;
+}
+
+/* frsqrte - frsqrte. */
+uint64_t helper_frsqrte(CPUPPCState *env, uint64_t arg)
+{
+ CPU_DoubleU farg;
+
+ farg.ll = arg;
+
+ if (unlikely(float64_is_any_nan(farg.d))) {
+ if (unlikely(float64_is_signaling_nan(farg.d, &env->fp_status))) {
+ /* sNaN reciprocal square root */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ farg.ll = float64_snan_to_qnan(farg.ll);
+ }
+ } else if (unlikely(float64_is_neg(farg.d) && !float64_is_zero(farg.d))) {
+ /* Reciprocal square root of a negative nonzero number */
+ farg.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, 1);
+ } else {
+ farg.d = float64_sqrt(farg.d, &env->fp_status);
+ farg.d = float64_div(float64_one, farg.d, &env->fp_status);
+ }
+
+ return farg.ll;
+}
+
+/* fsel - fsel. */
+uint64_t helper_fsel(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint64_t arg3)
+{
+ CPU_DoubleU farg1;
+
+ farg1.ll = arg1;
+
+ if ((!float64_is_neg(farg1.d) || float64_is_zero(farg1.d)) &&
+ !float64_is_any_nan(farg1.d)) {
+ return arg2;
+ } else {
+ return arg3;
+ }
+}
+
+uint32_t helper_ftdiv(uint64_t fra, uint64_t frb)
+{
+ int fe_flag = 0;
+ int fg_flag = 0;
+
+ if (unlikely(float64_is_infinity(fra) ||
+ float64_is_infinity(frb) ||
+ float64_is_zero(frb))) {
+ fe_flag = 1;
+ fg_flag = 1;
+ } else {
+ int e_a = ppc_float64_get_unbiased_exp(fra);
+ int e_b = ppc_float64_get_unbiased_exp(frb);
+
+ if (unlikely(float64_is_any_nan(fra) ||
+ float64_is_any_nan(frb))) {
+ fe_flag = 1;
+ } else if ((e_b <= -1022) || (e_b >= 1021)) {
+ fe_flag = 1;
+ } else if (!float64_is_zero(fra) &&
+ (((e_a - e_b) >= 1023) ||
+ ((e_a - e_b) <= -1021) ||
+ (e_a <= -970))) {
+ fe_flag = 1;
+ }
+
+ if (unlikely(float64_is_zero_or_denormal(frb))) {
+ /* XB is not zero because of the above check and */
+ /* so must be denormalized. */
+ fg_flag = 1;
+ }
+ }
+
+ return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
+}
+
+uint32_t helper_ftsqrt(uint64_t frb)
+{
+ int fe_flag = 0;
+ int fg_flag = 0;
+
+ if (unlikely(float64_is_infinity(frb) || float64_is_zero(frb))) {
+ fe_flag = 1;
+ fg_flag = 1;
+ } else {
+ int e_b = ppc_float64_get_unbiased_exp(frb);
+
+ if (unlikely(float64_is_any_nan(frb))) {
+ fe_flag = 1;
+ } else if (unlikely(float64_is_zero(frb))) {
+ fe_flag = 1;
+ } else if (unlikely(float64_is_neg(frb))) {
+ fe_flag = 1;
+ } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) {
+ fe_flag = 1;
+ }
+
+ if (unlikely(float64_is_zero_or_denormal(frb))) {
+ /* XB is not zero because of the above check and */
+ /* therefore must be denormalized. */
+ fg_flag = 1;
+ }
+ }
+
+ return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0);
+}
+
+void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint32_t crfD)
+{
+ CPU_DoubleU farg1, farg2;
+ uint32_t ret = 0;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_any_nan(farg1.d) ||
+ float64_is_any_nan(farg2.d))) {
+ ret = 0x01UL;
+ } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x08UL;
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x04UL;
+ } else {
+ ret = 0x02UL;
+ }
+
+ env->fpscr &= ~(0x0F << FPSCR_FPRF);
+ env->fpscr |= ret << FPSCR_FPRF;
+ env->crf[crfD] = ret;
+ if (unlikely(ret == 0x01UL
+ && (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status)))) {
+ /* sNaN comparison */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
+ }
+}
+
+void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
+ uint32_t crfD)
+{
+ CPU_DoubleU farg1, farg2;
+ uint32_t ret = 0;
+
+ farg1.ll = arg1;
+ farg2.ll = arg2;
+
+ if (unlikely(float64_is_any_nan(farg1.d) ||
+ float64_is_any_nan(farg2.d))) {
+ ret = 0x01UL;
+ } else if (float64_lt(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x08UL;
+ } else if (!float64_le(farg1.d, farg2.d, &env->fp_status)) {
+ ret = 0x04UL;
+ } else {
+ ret = 0x02UL;
+ }
+
+ env->fpscr &= ~(0x0F << FPSCR_FPRF);
+ env->fpscr |= ret << FPSCR_FPRF;
+ env->crf[crfD] = ret;
+ if (unlikely(ret == 0x01UL)) {
+ if (float64_is_signaling_nan(farg1.d, &env->fp_status) ||
+ float64_is_signaling_nan(farg2.d, &env->fp_status)) {
+ /* sNaN comparison */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN |
+ POWERPC_EXCP_FP_VXVC, 1);
+ } else {
+ /* qNaN comparison */
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 1);
+ }
+ }
+}
+
+/* Single-precision floating-point conversions */
+static inline uint32_t efscfsi(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.f = int32_to_float32(val, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efscfui(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.f = uint32_to_float32(val, &env->vec_status);
+
+ return u.l;
+}
+
+static inline int32_t efsctsi(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_int32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctui(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_uint32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctsiz(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_int32_round_to_zero(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctuiz(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+
+ return float32_to_uint32_round_to_zero(u.f, &env->vec_status);
+}
+
+static inline uint32_t efscfsf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.f = int32_to_float32(val, &env->vec_status);
+ tmp = int64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efscfuf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.f = uint32_to_float32(val, &env->vec_status);
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_div(u.f, tmp, &env->vec_status);
+
+ return u.l;
+}
+
+static inline uint32_t efsctsf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
+
+ return float32_to_int32(u.f, &env->vec_status);
+}
+
+static inline uint32_t efsctuf(CPUPPCState *env, uint32_t val)
+{
+ CPU_FloatU u;
+ float32 tmp;
+
+ u.l = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float32_is_quiet_nan(u.f, &env->vec_status))) {
+ return 0;
+ }
+ tmp = uint64_to_float32(1ULL << 32, &env->vec_status);
+ u.f = float32_mul(u.f, tmp, &env->vec_status);
+
+ return float32_to_uint32(u.f, &env->vec_status);
+}
+
+#define HELPER_SPE_SINGLE_CONV(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t val) \
+ { \
+ return e##name(env, val); \
+ }
+/* efscfsi */
+HELPER_SPE_SINGLE_CONV(fscfsi);
+/* efscfui */
+HELPER_SPE_SINGLE_CONV(fscfui);
+/* efscfuf */
+HELPER_SPE_SINGLE_CONV(fscfuf);
+/* efscfsf */
+HELPER_SPE_SINGLE_CONV(fscfsf);
+/* efsctsi */
+HELPER_SPE_SINGLE_CONV(fsctsi);
+/* efsctui */
+HELPER_SPE_SINGLE_CONV(fsctui);
+/* efsctsiz */
+HELPER_SPE_SINGLE_CONV(fsctsiz);
+/* efsctuiz */
+HELPER_SPE_SINGLE_CONV(fsctuiz);
+/* efsctsf */
+HELPER_SPE_SINGLE_CONV(fsctsf);
+/* efsctuf */
+HELPER_SPE_SINGLE_CONV(fsctuf);
+
+#define HELPER_SPE_VECTOR_CONV(name) \
+ uint64_t helper_ev##name(CPUPPCState *env, uint64_t val) \
+ { \
+ return ((uint64_t)e##name(env, val >> 32) << 32) | \
+ (uint64_t)e##name(env, val); \
+ }
+/* evfscfsi */
+HELPER_SPE_VECTOR_CONV(fscfsi);
+/* evfscfui */
+HELPER_SPE_VECTOR_CONV(fscfui);
+/* evfscfuf */
+HELPER_SPE_VECTOR_CONV(fscfuf);
+/* evfscfsf */
+HELPER_SPE_VECTOR_CONV(fscfsf);
+/* evfsctsi */
+HELPER_SPE_VECTOR_CONV(fsctsi);
+/* evfsctui */
+HELPER_SPE_VECTOR_CONV(fsctui);
+/* evfsctsiz */
+HELPER_SPE_VECTOR_CONV(fsctsiz);
+/* evfsctuiz */
+HELPER_SPE_VECTOR_CONV(fsctuiz);
+/* evfsctsf */
+HELPER_SPE_VECTOR_CONV(fsctsf);
+/* evfsctuf */
+HELPER_SPE_VECTOR_CONV(fsctuf);
+
+/* Single-precision floating-point arithmetic */
+static inline uint32_t efsadd(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_add(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efssub(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_sub(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efsmul(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_mul(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+static inline uint32_t efsdiv(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ u1.f = float32_div(u1.f, u2.f, &env->vec_status);
+ return u1.l;
+}
+
+#define HELPER_SPE_SINGLE_ARITH(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
+ { \
+ return e##name(env, op1, op2); \
+ }
+/* efsadd */
+HELPER_SPE_SINGLE_ARITH(fsadd);
+/* efssub */
+HELPER_SPE_SINGLE_ARITH(fssub);
+/* efsmul */
+HELPER_SPE_SINGLE_ARITH(fsmul);
+/* efsdiv */
+HELPER_SPE_SINGLE_ARITH(fsdiv);
+
+#define HELPER_SPE_VECTOR_ARITH(name) \
+ uint64_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
+ { \
+ return ((uint64_t)e##name(env, op1 >> 32, op2 >> 32) << 32) | \
+ (uint64_t)e##name(env, op1, op2); \
+ }
+/* evfsadd */
+HELPER_SPE_VECTOR_ARITH(fsadd);
+/* evfssub */
+HELPER_SPE_VECTOR_ARITH(fssub);
+/* evfsmul */
+HELPER_SPE_VECTOR_ARITH(fsmul);
+/* evfsdiv */
+HELPER_SPE_VECTOR_ARITH(fsdiv);
+
+/* Single-precision floating-point comparisons */
+static inline uint32_t efscmplt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ return float32_lt(u1.f, u2.f, &env->vec_status) ? 4 : 0;
+}
+
+static inline uint32_t efscmpgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ return float32_le(u1.f, u2.f, &env->vec_status) ? 0 : 4;
+}
+
+static inline uint32_t efscmpeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ CPU_FloatU u1, u2;
+
+ u1.l = op1;
+ u2.l = op2;
+ return float32_eq(u1.f, u2.f, &env->vec_status) ? 4 : 0;
+}
+
+static inline uint32_t efststlt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: ignore special values (NaN, infinites, ...) */
+ return efscmplt(env, op1, op2);
+}
+
+static inline uint32_t efststgt(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: ignore special values (NaN, infinites, ...) */
+ return efscmpgt(env, op1, op2);
+}
+
+static inline uint32_t efststeq(CPUPPCState *env, uint32_t op1, uint32_t op2)
+{
+ /* XXX: TODO: ignore special values (NaN, infinites, ...) */
+ return efscmpeq(env, op1, op2);
+}
+
+#define HELPER_SINGLE_SPE_CMP(name) \
+ uint32_t helper_e##name(CPUPPCState *env, uint32_t op1, uint32_t op2) \
+ { \
+ return e##name(env, op1, op2); \
+ }
+/* efststlt */
+HELPER_SINGLE_SPE_CMP(fststlt);
+/* efststgt */
+HELPER_SINGLE_SPE_CMP(fststgt);
+/* efststeq */
+HELPER_SINGLE_SPE_CMP(fststeq);
+/* efscmplt */
+HELPER_SINGLE_SPE_CMP(fscmplt);
+/* efscmpgt */
+HELPER_SINGLE_SPE_CMP(fscmpgt);
+/* efscmpeq */
+HELPER_SINGLE_SPE_CMP(fscmpeq);
+
+static inline uint32_t evcmp_merge(int t0, int t1)
+{
+ return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
+}
+
+#define HELPER_VECTOR_SPE_CMP(name) \
+ uint32_t helper_ev##name(CPUPPCState *env, uint64_t op1, uint64_t op2) \
+ { \
+ return evcmp_merge(e##name(env, op1 >> 32, op2 >> 32), \
+ e##name(env, op1, op2)); \
+ }
+/* evfststlt */
+HELPER_VECTOR_SPE_CMP(fststlt);
+/* evfststgt */
+HELPER_VECTOR_SPE_CMP(fststgt);
+/* evfststeq */
+HELPER_VECTOR_SPE_CMP(fststeq);
+/* evfscmplt */
+HELPER_VECTOR_SPE_CMP(fscmplt);
+/* evfscmpgt */
+HELPER_VECTOR_SPE_CMP(fscmpgt);
+/* evfscmpeq */
+HELPER_VECTOR_SPE_CMP(fscmpeq);
+
+/* Double-precision floating-point conversion */
+uint64_t helper_efdcfsi(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = int32_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfsid(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = int64_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfui(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = uint32_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfuid(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.d = uint64_to_float64(val, &env->vec_status);
+
+ return u.ll;
+}
+
+uint32_t helper_efdctsi(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctui(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctsiz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int32_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdctsidz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_int64_round_to_zero(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctuiz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint32_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdctuidz(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+
+ return float64_to_uint64_round_to_zero(u.d, &env->vec_status);
+}
+
+uint64_t helper_efdcfsf(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.d = int32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
+
+ return u.ll;
+}
+
+uint64_t helper_efdcfuf(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.d = uint32_to_float64(val, &env->vec_status);
+ tmp = int64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_div(u.d, tmp, &env->vec_status);
+
+ return u.ll;
+}
+
+uint32_t helper_efdctsf(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
+
+ return float64_to_int32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efdctuf(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u;
+ float64 tmp;
+
+ u.ll = val;
+ /* NaN are not treated the same way IEEE 754 does */
+ if (unlikely(float64_is_any_nan(u.d))) {
+ return 0;
+ }
+ tmp = uint64_to_float64(1ULL << 32, &env->vec_status);
+ u.d = float64_mul(u.d, tmp, &env->vec_status);
+
+ return float64_to_uint32(u.d, &env->vec_status);
+}
+
+uint32_t helper_efscfd(CPUPPCState *env, uint64_t val)
+{
+ CPU_DoubleU u1;
+ CPU_FloatU u2;
+
+ u1.ll = val;
+ u2.f = float64_to_float32(u1.d, &env->vec_status);
+
+ return u2.l;
+}
+
+uint64_t helper_efdcfs(CPUPPCState *env, uint32_t val)
+{
+ CPU_DoubleU u2;
+ CPU_FloatU u1;
+
+ u1.l = val;
+ u2.d = float32_to_float64(u1.f, &env->vec_status);
+
+ return u2.ll;
+}
+
+/* Double precision fixed-point arithmetic */
+uint64_t helper_efdadd(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_add(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efdsub(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_sub(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efdmul(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_mul(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+uint64_t helper_efddiv(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ u1.d = float64_div(u1.d, u2.d, &env->vec_status);
+ return u1.ll;
+}
+
+/* Double precision floating point helpers */
+uint32_t helper_efdtstlt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_lt(u1.d, u2.d, &env->vec_status) ? 4 : 0;
+}
+
+uint32_t helper_efdtstgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_le(u1.d, u2.d, &env->vec_status) ? 0 : 4;
+}
+
+uint32_t helper_efdtsteq(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ CPU_DoubleU u1, u2;
+
+ u1.ll = op1;
+ u2.ll = op2;
+ return float64_eq_quiet(u1.d, u2.d, &env->vec_status) ? 4 : 0;
+}
+
+uint32_t helper_efdcmplt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtstlt(env, op1, op2);
+}
+
+uint32_t helper_efdcmpgt(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtstgt(env, op1, op2);
+}
+
+uint32_t helper_efdcmpeq(CPUPPCState *env, uint64_t op1, uint64_t op2)
+{
+ /* XXX: TODO: test special values (NaN, infinites, ...) */
+ return helper_efdtsteq(env, op1, op2);
+}
+
+#define DECODE_SPLIT(opcode, shift1, nb1, shift2, nb2) \
+ (((((opcode) >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
+ (((opcode) >> (shift2)) & ((1 << (nb2)) - 1)))
+
+#define xT(opcode) DECODE_SPLIT(opcode, 0, 1, 21, 5)
+#define xA(opcode) DECODE_SPLIT(opcode, 2, 1, 16, 5)
+#define xB(opcode) DECODE_SPLIT(opcode, 1, 1, 11, 5)
+#define xC(opcode) DECODE_SPLIT(opcode, 3, 1, 6, 5)
+#define BF(opcode) (((opcode) >> (31-8)) & 7)
+
+typedef union _ppc_vsr_t {
+ uint64_t u64[2];
+ uint32_t u32[4];
+ float32 f32[4];
+ float64 f64[2];
+} ppc_vsr_t;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VsrW(i) u32[i]
+#define VsrD(i) u64[i]
+#else
+#define VsrW(i) u32[3-(i)]
+#define VsrD(i) u64[1-(i)]
+#endif
+
+static void getVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
+{
+ if (n < 32) {
+ vsr->VsrD(0) = env->fpr[n];
+ vsr->VsrD(1) = env->vsr[n];
+ } else {
+ vsr->u64[0] = env->avr[n-32].u64[0];
+ vsr->u64[1] = env->avr[n-32].u64[1];
+ }
+}
+
+static void putVSR(int n, ppc_vsr_t *vsr, CPUPPCState *env)
+{
+ if (n < 32) {
+ env->fpr[n] = vsr->VsrD(0);
+ env->vsr[n] = vsr->VsrD(1);
+ } else {
+ env->avr[n-32].u64[0] = vsr->u64[0];
+ env->avr[n-32].u64[1] = vsr->u64[1];
+ }
+}
+
+#define float64_to_float64(x, env) x
+
+
+/* VSX_ADD_SUB - VSX floating point add/subract
+ * name - instruction mnemonic
+ * op - operation (add or sub)
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_ADD_SUB(name, op, nels, tp, fld, sfprf, r2sp) \
+void helper_##name(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ int i; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ xt.fld = tp##_##op(xa.fld, xb.fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
+ } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_ADD_SUB(xsadddp, add, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xsaddsp, add, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvadddp, add, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvaddsp, add, 4, float32, VsrW(i), 0, 0)
+VSX_ADD_SUB(xssubdp, sub, 1, float64, VsrD(0), 1, 0)
+VSX_ADD_SUB(xssubsp, sub, 1, float64, VsrD(0), 1, 1)
+VSX_ADD_SUB(xvsubdp, sub, 2, float64, VsrD(i), 0, 0)
+VSX_ADD_SUB(xvsubsp, sub, 4, float32, VsrW(i), 0, 0)
+
+/* VSX_MUL - VSX floating point multiply
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_MUL(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ int i; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ xt.fld = tp##_mul(xa.fld, xb.fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(xb.fld)) || \
+ (tp##_is_infinity(xb.fld) && tp##_is_zero(xa.fld))) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, sfprf); \
+ } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_MUL(xsmuldp, 1, float64, VsrD(0), 1, 0)
+VSX_MUL(xsmulsp, 1, float64, VsrD(0), 1, 1)
+VSX_MUL(xvmuldp, 2, float64, VsrD(i), 0, 0)
+VSX_MUL(xvmulsp, 4, float32, VsrW(i), 0, 0)
+
+/* VSX_DIV - VSX floating point divide
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_DIV(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ int i; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ xt.fld = tp##_div(xa.fld, xb.fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_infinity(xa.fld) && tp##_is_infinity(xb.fld)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIDI, sfprf); \
+ } else if (tp##_is_zero(xa.fld) && \
+ tp##_is_zero(xb.fld)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXZDZ, sfprf); \
+ } else if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_DIV(xsdivdp, 1, float64, VsrD(0), 1, 0)
+VSX_DIV(xsdivsp, 1, float64, VsrD(0), 1, 1)
+VSX_DIV(xvdivdp, 2, float64, VsrD(i), 0, 0)
+VSX_DIV(xvdivsp, 4, float32, VsrW(i), 0, 0)
+
+/* VSX_RE - VSX floating point reciprocal estimate
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_RE(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ } \
+ xt.fld = tp##_div(tp##_one, xb.fld, &env->fp_status); \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
+VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
+VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
+VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0)
+
+/* VSX_SQRT - VSX floating point square root
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_SQRT(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ xt.fld = tp##_sqrt(xb.fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
+ } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
+VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
+VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
+VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0)
+
+/* VSX_RSQRTE - VSX floating point reciprocal square root estimate
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * sfprf - set FPRF
+ */
+#define VSX_RSQRTE(op, nels, tp, fld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ xt.fld = tp##_sqrt(xb.fld, &tstat); \
+ xt.fld = tp##_div(tp##_one, xt.fld, &tstat); \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_neg(xb.fld) && !tp##_is_zero(xb.fld)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSQRT, sfprf); \
+ } else if (tp##_is_signaling_nan(xb.fld, &tstat)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ xt.fld = helper_frsp(env, xt.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
+VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
+VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
+VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0)
+
+/* VSX_TDIV - VSX floating point test for divide
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * emin - minimum unbiased exponent
+ * emax - maximum unbiased exponent
+ * nbits - number of fraction bits
+ */
+#define VSX_TDIV(op, nels, tp, fld, emin, emax, nbits) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xa, xb; \
+ int i; \
+ int fe_flag = 0; \
+ int fg_flag = 0; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_infinity(xa.fld) || \
+ tp##_is_infinity(xb.fld) || \
+ tp##_is_zero(xb.fld))) { \
+ fe_flag = 1; \
+ fg_flag = 1; \
+ } else { \
+ int e_a = ppc_##tp##_get_unbiased_exp(xa.fld); \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
+ \
+ if (unlikely(tp##_is_any_nan(xa.fld) || \
+ tp##_is_any_nan(xb.fld))) { \
+ fe_flag = 1; \
+ } else if ((e_b <= emin) || (e_b >= (emax-2))) { \
+ fe_flag = 1; \
+ } else if (!tp##_is_zero(xa.fld) && \
+ (((e_a - e_b) >= emax) || \
+ ((e_a - e_b) <= (emin+1)) || \
+ (e_a <= (emin+nbits)))) { \
+ fe_flag = 1; \
+ } \
+ \
+ if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
+ /* XB is not zero because of the above check and */ \
+ /* so must be denormalized. */ \
+ fg_flag = 1; \
+ } \
+ } \
+ } \
+ \
+ env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
+}
+
+VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
+VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
+VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23)
+
+/* VSX_TSQRT - VSX floating point test for square root
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * emin - minimum unbiased exponent
+ * emax - maximum unbiased exponent
+ * nbits - number of fraction bits
+ */
+#define VSX_TSQRT(op, nels, tp, fld, emin, nbits) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xa, xb; \
+ int i; \
+ int fe_flag = 0; \
+ int fg_flag = 0; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_infinity(xb.fld) || \
+ tp##_is_zero(xb.fld))) { \
+ fe_flag = 1; \
+ fg_flag = 1; \
+ } else { \
+ int e_b = ppc_##tp##_get_unbiased_exp(xb.fld); \
+ \
+ if (unlikely(tp##_is_any_nan(xb.fld))) { \
+ fe_flag = 1; \
+ } else if (unlikely(tp##_is_zero(xb.fld))) { \
+ fe_flag = 1; \
+ } else if (unlikely(tp##_is_neg(xb.fld))) { \
+ fe_flag = 1; \
+ } else if (!tp##_is_zero(xb.fld) && \
+ (e_b <= (emin+nbits))) { \
+ fe_flag = 1; \
+ } \
+ \
+ if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \
+ /* XB is not zero because of the above check and */ \
+ /* therefore must be denormalized. */ \
+ fg_flag = 1; \
+ } \
+ } \
+ } \
+ \
+ env->crf[BF(opcode)] = 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); \
+}
+
+VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
+VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
+VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23)
+
+/* VSX_MADD - VSX floating point muliply/add variations
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * maddflgs - flags for the float*muladd routine that control the
+ * various forms (madd, msub, nmadd, nmsub)
+ * afrm - A form (1=A, 0=M)
+ * sfprf - set FPRF
+ */
+#define VSX_MADD(op, nels, tp, fld, maddflgs, afrm, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt_in, xa, xb, xt_out; \
+ ppc_vsr_t *b, *c; \
+ int i; \
+ \
+ if (afrm) { /* AxB + T */ \
+ b = &xb; \
+ c = &xt_in; \
+ } else { /* AxT + B */ \
+ b = &xt_in; \
+ c = &xb; \
+ } \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt_in, env); \
+ \
+ xt_out = xt_in; \
+ \
+ helper_reset_fpstatus(env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ float_status tstat = env->fp_status; \
+ set_float_exception_flags(0, &tstat); \
+ if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\
+ /* Avoid double rounding errors by rounding the intermediate */ \
+ /* result to odd. */ \
+ set_float_rounding_mode(float_round_to_zero, &tstat); \
+ xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ xt_out.fld |= (get_float_exception_flags(&tstat) & \
+ float_flag_inexact) != 0; \
+ } else { \
+ xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \
+ maddflgs, &tstat); \
+ } \
+ env->fp_status.float_exception_flags |= tstat.float_exception_flags; \
+ \
+ if (unlikely(tstat.float_exception_flags & float_flag_invalid)) { \
+ if (tp##_is_signaling_nan(xa.fld, &tstat) || \
+ tp##_is_signaling_nan(b->fld, &tstat) || \
+ tp##_is_signaling_nan(c->fld, &tstat)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, sfprf); \
+ tstat.float_exception_flags &= ~float_flag_invalid; \
+ } \
+ if ((tp##_is_infinity(xa.fld) && tp##_is_zero(b->fld)) || \
+ (tp##_is_zero(xa.fld) && tp##_is_infinity(b->fld))) { \
+ xt_out.fld = float64_to_##tp(float_invalid_op_excp(env, \
+ POWERPC_EXCP_FP_VXIMZ, sfprf), &env->fp_status); \
+ tstat.float_exception_flags &= ~float_flag_invalid; \
+ } \
+ if ((tstat.float_exception_flags & float_flag_invalid) && \
+ ((tp##_is_infinity(xa.fld) || \
+ tp##_is_infinity(b->fld)) && \
+ tp##_is_infinity(c->fld))) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, sfprf); \
+ } \
+ } \
+ \
+ if (r2sp) { \
+ xt_out.fld = helper_frsp(env, xt_out.fld); \
+ } \
+ \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt_out.fld); \
+ } \
+ } \
+ putVSR(xT(opcode), &xt_out, env); \
+ float_check_status(env); \
+}
+
+#define MADD_FLGS 0
+#define MSUB_FLGS float_muladd_negate_c
+#define NMADD_FLGS float_muladd_negate_result
+#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
+
+VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
+VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
+VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsmsubmdp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 0)
+VSX_MADD(xsnmaddadp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 0)
+VSX_MADD(xsnmaddmdp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 0)
+VSX_MADD(xsnmsubadp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 0)
+VSX_MADD(xsnmsubmdp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 0)
+
+VSX_MADD(xsmaddasp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 1)
+VSX_MADD(xsmaddmsp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 1)
+VSX_MADD(xsmsubasp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsmsubmsp, 1, float64, VsrD(0), MSUB_FLGS, 0, 1, 1)
+VSX_MADD(xsnmaddasp, 1, float64, VsrD(0), NMADD_FLGS, 1, 1, 1)
+VSX_MADD(xsnmaddmsp, 1, float64, VsrD(0), NMADD_FLGS, 0, 1, 1)
+VSX_MADD(xsnmsubasp, 1, float64, VsrD(0), NMSUB_FLGS, 1, 1, 1)
+VSX_MADD(xsnmsubmsp, 1, float64, VsrD(0), NMSUB_FLGS, 0, 1, 1)
+
+VSX_MADD(xvmaddadp, 2, float64, VsrD(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmdp, 2, float64, VsrD(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubadp, 2, float64, VsrD(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmdp, 2, float64, VsrD(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddadp, 2, float64, VsrD(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmdp, 2, float64, VsrD(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubadp, 2, float64, VsrD(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmdp, 2, float64, VsrD(i), NMSUB_FLGS, 0, 0, 0)
+
+VSX_MADD(xvmaddasp, 4, float32, VsrW(i), MADD_FLGS, 1, 0, 0)
+VSX_MADD(xvmaddmsp, 4, float32, VsrW(i), MADD_FLGS, 0, 0, 0)
+VSX_MADD(xvmsubasp, 4, float32, VsrW(i), MSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvmsubmsp, 4, float32, VsrW(i), MSUB_FLGS, 0, 0, 0)
+VSX_MADD(xvnmaddasp, 4, float32, VsrW(i), NMADD_FLGS, 1, 0, 0)
+VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 0, 0)
+VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
+VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
+
+/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+ * op - instruction mnemonic
+ * cmp - comparison operation
+ * exp - expected result of comparison
+ * svxvc - set VXVC bit
+ */
+#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ bool vxsnan_flag = false, vxvc_flag = false, vex_flag = false; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ vxsnan_flag = true; \
+ if (fpscr_ve == 0 && svxvc) { \
+ vxvc_flag = true; \
+ } \
+ } else if (svxvc) { \
+ vxvc_flag = float64_is_quiet_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_quiet_nan(xb.VsrD(0), &env->fp_status); \
+ } \
+ if (vxsnan_flag) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ if (vxvc_flag) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
+ } \
+ vex_flag = fpscr_ve && (vxvc_flag || vxsnan_flag); \
+ \
+ if (!vex_flag) { \
+ if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) { \
+ xt.VsrD(0) = -1; \
+ xt.VsrD(1) = 0; \
+ } else { \
+ xt.VsrD(0) = 0; \
+ xt.VsrD(1) = 0; \
+ } \
+ } \
+ putVSR(xT(opcode), &xt, env); \
+ helper_float_check_status(env); \
+}
+
+VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
+VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
+
+#define VSX_SCALAR_CMP(op, ordered) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xa, xb; \
+ uint32_t cc = 0; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ \
+ if (unlikely(float64_is_any_nan(xa.VsrD(0)) || \
+ float64_is_any_nan(xb.VsrD(0)))) { \
+ if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) || \
+ float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ if (ordered) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
+ } \
+ cc = 1; \
+ } else { \
+ if (float64_lt(xa.VsrD(0), xb.VsrD(0), &env->fp_status)) { \
+ cc = 8; \
+ } else if (!float64_le(xa.VsrD(0), xb.VsrD(0), \
+ &env->fp_status)) { \
+ cc = 4; \
+ } else { \
+ cc = 2; \
+ } \
+ } \
+ \
+ env->fpscr &= ~(0x0F << FPSCR_FPRF); \
+ env->fpscr |= cc << FPSCR_FPRF; \
+ env->crf[BF(opcode)] = cc; \
+ \
+ float_check_status(env); \
+}
+
+VSX_SCALAR_CMP(xscmpodp, 1)
+VSX_SCALAR_CMP(xscmpudp, 0)
+
+/* VSX_MAX_MIN - VSX floating point maximum/minimum
+ * name - instruction mnemonic
+ * op - operation (max or min)
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ */
+#define VSX_MAX_MIN(name, op, nels, tp, fld) \
+void helper_##name(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ int i; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ xt.fld = tp##_##op(xa.fld, xb.fld, &env->fp_status); \
+ if (unlikely(tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb.fld, &env->fp_status))) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_MAX_MIN(xsmaxdp, maxnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmaxdp, maxnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvmaxsp, maxnum, 4, float32, VsrW(i))
+VSX_MAX_MIN(xsmindp, minnum, 1, float64, VsrD(0))
+VSX_MAX_MIN(xvmindp, minnum, 2, float64, VsrD(i))
+VSX_MAX_MIN(xvminsp, minnum, 4, float32, VsrW(i))
+
+/* VSX_CMP - VSX floating point compare
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * cmp - comparison operation
+ * svxvc - set VXVC bit
+ * exp - expected result of comparison
+ */
+#define VSX_CMP(op, nels, tp, fld, cmp, svxvc, exp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xa, xb; \
+ int i; \
+ int all_true = 1; \
+ int all_false = 1; \
+ \
+ getVSR(xA(opcode), &xa, env); \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_any_nan(xa.fld) || \
+ tp##_is_any_nan(xb.fld))) { \
+ if (tp##_is_signaling_nan(xa.fld, &env->fp_status) || \
+ tp##_is_signaling_nan(xb.fld, &env->fp_status)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ if (svxvc) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0); \
+ } \
+ xt.fld = 0; \
+ all_true = 0; \
+ } else { \
+ if (tp##_##cmp(xb.fld, xa.fld, &env->fp_status) == exp) { \
+ xt.fld = -1; \
+ all_false = 0; \
+ } else { \
+ xt.fld = 0; \
+ all_true = 0; \
+ } \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ if ((opcode >> (31-21)) & 1) { \
+ env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \
+ } \
+ float_check_status(env); \
+ }
+
+VSX_CMP(xvcmpeqdp, 2, float64, VsrD(i), eq, 0, 1)
+VSX_CMP(xvcmpgedp, 2, float64, VsrD(i), le, 1, 1)
+VSX_CMP(xvcmpgtdp, 2, float64, VsrD(i), lt, 1, 1)
+VSX_CMP(xvcmpnedp, 2, float64, VsrD(i), eq, 0, 0)
+VSX_CMP(xvcmpeqsp, 4, float32, VsrW(i), eq, 0, 1)
+VSX_CMP(xvcmpgesp, 4, float32, VsrW(i), le, 1, 1)
+VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1)
+VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0)
+
+/* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (float32 or float64)
+ * ttp - target type (float32 or float64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field (f32 or f64)
+ * sfprf - set FPRF
+ */
+#define VSX_CVT_FP_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ if (unlikely(stp##_is_signaling_nan(xb.sfld, \
+ &env->fp_status))) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ xt.tfld = ttp##_snan_to_qnan(xt.tfld); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf(env, ttp##_to_float64(xt.tfld, \
+ &env->fp_status)); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1)
+VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
+VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0)
+VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0)
+
+uint64_t helper_xscvdpspn(CPUPPCState *env, uint64_t xb)
+{
+ float_status tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+
+ return (uint64_t)float64_to_float32(xb, &tstat) << 32;
+}
+
+uint64_t helper_xscvspdpn(CPUPPCState *env, uint64_t xb)
+{
+ float_status tstat = env->fp_status;
+ set_float_exception_flags(0, &tstat);
+
+ return float32_to_float64(xb >> 32, &tstat);
+}
+
+/* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (float32 or float64)
+ * ttp - target type (int32, uint32, int64 or uint64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ * rnan - resulting NaN
+ */
+#define VSX_CVT_FP_TO_INT(op, nels, stp, ttp, sfld, tfld, rnan) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(stp##_is_any_nan(xb.sfld))) { \
+ if (stp##_is_signaling_nan(xb.sfld, &env->fp_status)) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ } \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
+ xt.tfld = rnan; \
+ } else { \
+ xt.tfld = stp##_to_##ttp##_round_to_zero(xb.sfld, \
+ &env->fp_status); \
+ if (env->fp_status.float_exception_flags & float_flag_invalid) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXCVI, 0); \
+ } \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xscvdpsxws, 1, float64, int32, VsrD(0), VsrW(1), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), 0ULL)
+VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U)
+VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \
+ 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U)
+VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \
+ 0x8000000000000000ULL)
+VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U)
+VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL)
+VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U)
+
+/* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * stp - source type (int32, uint32, int64 or uint64)
+ * ttp - target type (float32 or float64)
+ * sfld - source vsr_t field
+ * tfld - target vsr_t field
+ * jdef - definition of the j index (i or 2*i)
+ * sfprf - set FPRF
+ */
+#define VSX_CVT_INT_TO_FP(op, nels, stp, ttp, sfld, tfld, sfprf, r2sp) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ for (i = 0; i < nels; i++) { \
+ xt.tfld = stp##_to_##ttp(xb.sfld, &env->fp_status); \
+ if (r2sp) { \
+ xt.tfld = helper_frsp(env, xt.tfld); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.tfld); \
+ } \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
+VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
+VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0)
+VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0)
+
+/* For "use current rounding mode", define a value that will not be one of
+ * the existing rounding model enums.
+ */
+#define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \
+ float_round_up + float_round_to_zero)
+
+/* VSX_ROUND - VSX floating point round
+ * op - instruction mnemonic
+ * nels - number of elements (1, 2 or 4)
+ * tp - type (float32 or float64)
+ * fld - vsr_t field (VsrD(*) or VsrW(*))
+ * rmode - rounding mode
+ * sfprf - set FPRF
+ */
+#define VSX_ROUND(op, nels, tp, fld, rmode, sfprf) \
+void helper_##op(CPUPPCState *env, uint32_t opcode) \
+{ \
+ ppc_vsr_t xt, xb; \
+ int i; \
+ getVSR(xB(opcode), &xb, env); \
+ getVSR(xT(opcode), &xt, env); \
+ \
+ if (rmode != FLOAT_ROUND_CURRENT) { \
+ set_float_rounding_mode(rmode, &env->fp_status); \
+ } \
+ \
+ for (i = 0; i < nels; i++) { \
+ if (unlikely(tp##_is_signaling_nan(xb.fld, \
+ &env->fp_status))) { \
+ float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0); \
+ xt.fld = tp##_snan_to_qnan(xb.fld); \
+ } else { \
+ xt.fld = tp##_round_to_int(xb.fld, &env->fp_status); \
+ } \
+ if (sfprf) { \
+ helper_compute_fprf(env, xt.fld); \
+ } \
+ } \
+ \
+ /* If this is not a "use current rounding mode" instruction, \
+ * then inhibit setting of the XX bit and restore rounding \
+ * mode from FPSCR */ \
+ if (rmode != FLOAT_ROUND_CURRENT) { \
+ fpscr_set_rounding_mode(env); \
+ env->fp_status.float_exception_flags &= ~float_flag_inexact; \
+ } \
+ \
+ putVSR(xT(opcode), &xt, env); \
+ float_check_status(env); \
+}
+
+VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
+VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
+VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
+VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
+VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
+
+VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
+VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
+VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
+VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
+
+VSX_ROUND(xvrspi, 4, float32, VsrW(i), float_round_ties_away, 0)
+VSX_ROUND(xvrspic, 4, float32, VsrW(i), FLOAT_ROUND_CURRENT, 0)
+VSX_ROUND(xvrspim, 4, float32, VsrW(i), float_round_down, 0)
+VSX_ROUND(xvrspip, 4, float32, VsrW(i), float_round_up, 0)
+VSX_ROUND(xvrspiz, 4, float32, VsrW(i), float_round_to_zero, 0)
+
+uint64_t helper_xsrsp(CPUPPCState *env, uint64_t xb)
+{
+ helper_reset_fpstatus(env);
+
+ uint64_t xt = helper_frsp(env, xb);
+
+ helper_compute_fprf(env, xt);
+ float_check_status(env);
+ return xt;
+}
diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c
new file mode 100644
index 0000000000..7a338136a8
--- /dev/null
+++ b/target/ppc/gdbstub.c
@@ -0,0 +1,321 @@
+/*
+ * PowerPC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+static int ppc_gdb_register_len_apple(int n)
+{
+ switch (n) {
+ case 0 ... 31:
+ /* gprs */
+ return 8;
+ case 32 ... 63:
+ /* fprs */
+ return 8;
+ case 64 ... 95:
+ return 16;
+ case 64+32: /* nip */
+ case 65+32: /* msr */
+ case 67+32: /* lr */
+ case 68+32: /* ctr */
+ case 69+32: /* xer */
+ case 70+32: /* fpscr */
+ return 8;
+ case 66+32: /* cr */
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static int ppc_gdb_register_len(int n)
+{
+ switch (n) {
+ case 0 ... 31:
+ /* gprs */
+ return sizeof(target_ulong);
+ case 32 ... 63:
+ /* fprs */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return 8;
+ case 66:
+ /* cr */
+ return 4;
+ case 64:
+ /* nip */
+ case 65:
+ /* msr */
+ case 67:
+ /* lr */
+ case 68:
+ /* ctr */
+ case 69:
+ /* xer */
+ return sizeof(target_ulong);
+ case 70:
+ /* fpscr */
+ if (gdb_has_xml) {
+ return 0;
+ }
+ return sizeof(target_ulong);
+ default:
+ return 0;
+ }
+}
+
+/* We need to present the registers to gdb in the "current" memory ordering.
+ For user-only mode we get this for free; TARGET_WORDS_BIGENDIAN is set to
+ the proper ordering for the binary, and cannot be changed.
+ For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check
+ the current mode of the chip to see if we're running in little-endian. */
+void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len)
+{
+#ifndef CONFIG_USER_ONLY
+ if (!msr_le) {
+ /* do nothing */
+ } else if (len == 4) {
+ bswap32s((uint32_t *)mem_buf);
+ } else if (len == 8) {
+ bswap64s((uint64_t *)mem_buf);
+ } else {
+ g_assert_not_reached();
+ }
+#endif
+}
+
+/* Old gdb always expects FP registers. Newer (xml-aware) gdb only
+ * expects whatever the target description contains. Due to a
+ * historical mishap the FP registers appear in between core integer
+ * regs and PC, MSR, CR, and so forth. We hack round this by giving the
+ * FP regs zero size when talking to a newer gdb.
+ */
+
+int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r = ppc_gdb_register_len(n);
+
+ if (!r) {
+ return r;
+ }
+
+ if (n < 32) {
+ /* gprs */
+ gdb_get_regl(mem_buf, env->gpr[n]);
+ } else if (n < 64) {
+ /* fprs */
+ stfq_p(mem_buf, env->fpr[n-32]);
+ } else {
+ switch (n) {
+ case 64:
+ gdb_get_regl(mem_buf, env->nip);
+ break;
+ case 65:
+ gdb_get_regl(mem_buf, env->msr);
+ break;
+ case 66:
+ {
+ uint32_t cr = 0;
+ int i;
+ for (i = 0; i < 8; i++) {
+ cr |= env->crf[i] << (32 - ((i + 1) * 4));
+ }
+ gdb_get_reg32(mem_buf, cr);
+ break;
+ }
+ case 67:
+ gdb_get_regl(mem_buf, env->lr);
+ break;
+ case 68:
+ gdb_get_regl(mem_buf, env->ctr);
+ break;
+ case 69:
+ gdb_get_regl(mem_buf, env->xer);
+ break;
+ case 70:
+ gdb_get_reg32(mem_buf, env->fpscr);
+ break;
+ }
+ }
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ return r;
+}
+
+int ppc_cpu_gdb_read_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r = ppc_gdb_register_len_apple(n);
+
+ if (!r) {
+ return r;
+ }
+
+ if (n < 32) {
+ /* gprs */
+ gdb_get_reg64(mem_buf, env->gpr[n]);
+ } else if (n < 64) {
+ /* fprs */
+ stfq_p(mem_buf, env->fpr[n-32]);
+ } else if (n < 96) {
+ /* Altivec */
+ stq_p(mem_buf, n - 64);
+ stq_p(mem_buf + 8, 0);
+ } else {
+ switch (n) {
+ case 64 + 32:
+ gdb_get_reg64(mem_buf, env->nip);
+ break;
+ case 65 + 32:
+ gdb_get_reg64(mem_buf, env->msr);
+ break;
+ case 66 + 32:
+ {
+ uint32_t cr = 0;
+ int i;
+ for (i = 0; i < 8; i++) {
+ cr |= env->crf[i] << (32 - ((i + 1) * 4));
+ }
+ gdb_get_reg32(mem_buf, cr);
+ break;
+ }
+ case 67 + 32:
+ gdb_get_reg64(mem_buf, env->lr);
+ break;
+ case 68 + 32:
+ gdb_get_reg64(mem_buf, env->ctr);
+ break;
+ case 69 + 32:
+ gdb_get_reg64(mem_buf, env->xer);
+ break;
+ case 70 + 32:
+ gdb_get_reg64(mem_buf, env->fpscr);
+ break;
+ }
+ }
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ return r;
+}
+
+int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r = ppc_gdb_register_len(n);
+
+ if (!r) {
+ return r;
+ }
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ if (n < 32) {
+ /* gprs */
+ env->gpr[n] = ldtul_p(mem_buf);
+ } else if (n < 64) {
+ /* fprs */
+ env->fpr[n-32] = ldfq_p(mem_buf);
+ } else {
+ switch (n) {
+ case 64:
+ env->nip = ldtul_p(mem_buf);
+ break;
+ case 65:
+ ppc_store_msr(env, ldtul_p(mem_buf));
+ break;
+ case 66:
+ {
+ uint32_t cr = ldl_p(mem_buf);
+ int i;
+ for (i = 0; i < 8; i++) {
+ env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
+ }
+ break;
+ }
+ case 67:
+ env->lr = ldtul_p(mem_buf);
+ break;
+ case 68:
+ env->ctr = ldtul_p(mem_buf);
+ break;
+ case 69:
+ env->xer = ldtul_p(mem_buf);
+ break;
+ case 70:
+ /* fpscr */
+ store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
+ break;
+ }
+ }
+ return r;
+}
+int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r = ppc_gdb_register_len_apple(n);
+
+ if (!r) {
+ return r;
+ }
+ ppc_maybe_bswap_register(env, mem_buf, r);
+ if (n < 32) {
+ /* gprs */
+ env->gpr[n] = ldq_p(mem_buf);
+ } else if (n < 64) {
+ /* fprs */
+ env->fpr[n-32] = ldfq_p(mem_buf);
+ } else {
+ switch (n) {
+ case 64 + 32:
+ env->nip = ldq_p(mem_buf);
+ break;
+ case 65 + 32:
+ ppc_store_msr(env, ldq_p(mem_buf));
+ break;
+ case 66 + 32:
+ {
+ uint32_t cr = ldl_p(mem_buf);
+ int i;
+ for (i = 0; i < 8; i++) {
+ env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
+ }
+ break;
+ }
+ case 67 + 32:
+ env->lr = ldq_p(mem_buf);
+ break;
+ case 68 + 32:
+ env->ctr = ldq_p(mem_buf);
+ break;
+ case 69 + 32:
+ env->xer = ldq_p(mem_buf);
+ break;
+ case 70 + 32:
+ /* fpscr */
+ store_fpscr(env, ldq_p(mem_buf), 0xffffffff);
+ break;
+ }
+ }
+ return r;
+}
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
new file mode 100644
index 0000000000..da00f0ab49
--- /dev/null
+++ b/target/ppc/helper.h
@@ -0,0 +1,742 @@
+DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, void, env, i32, i32)
+DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32)
+#endif
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(store_msr, void, env, tl)
+DEF_HELPER_1(rfi, void, env)
+DEF_HELPER_1(rfsvc, void, env)
+DEF_HELPER_1(40x_rfci, void, env)
+DEF_HELPER_1(rfci, void, env)
+DEF_HELPER_1(rfdi, void, env)
+DEF_HELPER_1(rfmci, void, env)
+#if defined(TARGET_PPC64)
+DEF_HELPER_2(pminsn, void, env, i32)
+DEF_HELPER_1(rfid, void, env)
+DEF_HELPER_1(hrfid, void, env)
+DEF_HELPER_2(store_lpcr, void, env, tl)
+#endif
+DEF_HELPER_1(check_tlb_flush_local, void, env)
+DEF_HELPER_1(check_tlb_flush_global, void, env)
+#endif
+
+DEF_HELPER_3(lmw, void, env, tl, i32)
+DEF_HELPER_FLAGS_3(stmw, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_4(lsw, void, env, tl, i32, i32)
+DEF_HELPER_5(lswx, void, env, tl, i32, i32, i32)
+DEF_HELPER_FLAGS_4(stsw, TCG_CALL_NO_WG, void, env, tl, i32, i32)
+DEF_HELPER_FLAGS_3(dcbz, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_FLAGS_2(icbi, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32)
+
+#if defined(TARGET_PPC64)
+DEF_HELPER_4(divdeu, i64, env, i64, i64, i32)
+DEF_HELPER_4(divde, i64, env, i64, i64, i32)
+#endif
+DEF_HELPER_4(divweu, tl, env, tl, tl, i32)
+DEF_HELPER_4(divwe, tl, env, tl, tl, i32)
+
+DEF_HELPER_FLAGS_1(cntlzw, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(cnttzw, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_3(sraw, tl, env, tl, tl)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl)
+DEF_HELPER_FLAGS_1(cntlzd, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(cnttzd, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_1(popcntd, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_3(srad, tl, env, tl, tl)
+DEF_HELPER_0(darn32, tl)
+DEF_HELPER_0(darn64, tl)
+#endif
+
+DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(cntlzw32, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_2(brinc, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+
+DEF_HELPER_1(float_check_status, void, env)
+DEF_HELPER_1(reset_fpstatus, void, env)
+DEF_HELPER_2(compute_fprf, void, env, i64)
+DEF_HELPER_3(store_fpscr, void, env, i64, i32)
+DEF_HELPER_2(fpscr_clrbit, void, env, i32)
+DEF_HELPER_2(fpscr_setbit, void, env, i32)
+DEF_HELPER_2(float64_to_float32, i32, env, i64)
+DEF_HELPER_2(float32_to_float64, i64, env, i32)
+
+DEF_HELPER_4(fcmpo, void, env, i64, i64, i32)
+DEF_HELPER_4(fcmpu, void, env, i64, i64, i32)
+
+DEF_HELPER_2(fctiw, i64, env, i64)
+DEF_HELPER_2(fctiwu, i64, env, i64)
+DEF_HELPER_2(fctiwz, i64, env, i64)
+DEF_HELPER_2(fctiwuz, i64, env, i64)
+DEF_HELPER_2(fcfid, i64, env, i64)
+DEF_HELPER_2(fcfidu, i64, env, i64)
+DEF_HELPER_2(fcfids, i64, env, i64)
+DEF_HELPER_2(fcfidus, i64, env, i64)
+DEF_HELPER_2(fctid, i64, env, i64)
+DEF_HELPER_2(fctidu, i64, env, i64)
+DEF_HELPER_2(fctidz, i64, env, i64)
+DEF_HELPER_2(fctiduz, i64, env, i64)
+DEF_HELPER_2(frsp, i64, env, i64)
+DEF_HELPER_2(frin, i64, env, i64)
+DEF_HELPER_2(friz, i64, env, i64)
+DEF_HELPER_2(frip, i64, env, i64)
+DEF_HELPER_2(frim, i64, env, i64)
+
+DEF_HELPER_3(fadd, i64, env, i64, i64)
+DEF_HELPER_3(fsub, i64, env, i64, i64)
+DEF_HELPER_3(fmul, i64, env, i64, i64)
+DEF_HELPER_3(fdiv, i64, env, i64, i64)
+DEF_HELPER_4(fmadd, i64, env, i64, i64, i64)
+DEF_HELPER_4(fmsub, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64)
+DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64)
+DEF_HELPER_2(fsqrt, i64, env, i64)
+DEF_HELPER_2(fre, i64, env, i64)
+DEF_HELPER_2(fres, i64, env, i64)
+DEF_HELPER_2(frsqrte, i64, env, i64)
+DEF_HELPER_4(fsel, i64, env, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64)
+
+#define dh_alias_avr ptr
+#define dh_ctype_avr ppc_avr_t *
+#define dh_is_signed_avr dh_is_signed_ptr
+
+DEF_HELPER_3(vaddubm, void, avr, avr, avr)
+DEF_HELPER_3(vadduhm, void, avr, avr, avr)
+DEF_HELPER_3(vadduwm, void, avr, avr, avr)
+DEF_HELPER_3(vaddudm, void, avr, avr, avr)
+DEF_HELPER_3(vsububm, void, avr, avr, avr)
+DEF_HELPER_3(vsubuhm, void, avr, avr, avr)
+DEF_HELPER_3(vsubuwm, void, avr, avr, avr)
+DEF_HELPER_3(vsubudm, void, avr, avr, avr)
+DEF_HELPER_3(vavgub, void, avr, avr, avr)
+DEF_HELPER_3(vavguh, void, avr, avr, avr)
+DEF_HELPER_3(vavguw, void, avr, avr, avr)
+DEF_HELPER_3(vabsdub, void, avr, avr, avr)
+DEF_HELPER_3(vabsduh, void, avr, avr, avr)
+DEF_HELPER_3(vabsduw, void, avr, avr, avr)
+DEF_HELPER_3(vavgsb, void, avr, avr, avr)
+DEF_HELPER_3(vavgsh, void, avr, avr, avr)
+DEF_HELPER_3(vavgsw, void, avr, avr, avr)
+DEF_HELPER_3(vminsb, void, avr, avr, avr)
+DEF_HELPER_3(vminsh, void, avr, avr, avr)
+DEF_HELPER_3(vminsw, void, avr, avr, avr)
+DEF_HELPER_3(vminsd, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsb, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsh, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsw, void, avr, avr, avr)
+DEF_HELPER_3(vmaxsd, void, avr, avr, avr)
+DEF_HELPER_3(vminub, void, avr, avr, avr)
+DEF_HELPER_3(vminuh, void, avr, avr, avr)
+DEF_HELPER_3(vminuw, void, avr, avr, avr)
+DEF_HELPER_3(vminud, void, avr, avr, avr)
+DEF_HELPER_3(vmaxub, void, avr, avr, avr)
+DEF_HELPER_3(vmaxuh, void, avr, avr, avr)
+DEF_HELPER_3(vmaxuw, void, avr, avr, avr)
+DEF_HELPER_3(vmaxud, void, avr, avr, avr)
+DEF_HELPER_4(vcmpequb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequd, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnew, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtub, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtud, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsb, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsh, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsw, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsd, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpeqfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgefp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpbfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpequd_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpneh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnew_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpnezw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtub_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtuw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtud_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsb_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsh_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsw_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtsd_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpeqfp_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgefp_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpgtfp_dot, void, env, avr, avr, avr)
+DEF_HELPER_4(vcmpbfp_dot, void, env, avr, avr, avr)
+DEF_HELPER_3(vmrglb, void, avr, avr, avr)
+DEF_HELPER_3(vmrglh, void, avr, avr, avr)
+DEF_HELPER_3(vmrglw, void, avr, avr, avr)
+DEF_HELPER_3(vmrghb, void, avr, avr, avr)
+DEF_HELPER_3(vmrghh, void, avr, avr, avr)
+DEF_HELPER_3(vmrghw, void, avr, avr, avr)
+DEF_HELPER_3(vmulesb, void, avr, avr, avr)
+DEF_HELPER_3(vmulesh, void, avr, avr, avr)
+DEF_HELPER_3(vmulesw, void, avr, avr, avr)
+DEF_HELPER_3(vmuleub, void, avr, avr, avr)
+DEF_HELPER_3(vmuleuh, void, avr, avr, avr)
+DEF_HELPER_3(vmuleuw, void, avr, avr, avr)
+DEF_HELPER_3(vmulosb, void, avr, avr, avr)
+DEF_HELPER_3(vmulosh, void, avr, avr, avr)
+DEF_HELPER_3(vmulosw, void, avr, avr, avr)
+DEF_HELPER_3(vmuloub, void, avr, avr, avr)
+DEF_HELPER_3(vmulouh, void, avr, avr, avr)
+DEF_HELPER_3(vmulouw, void, avr, avr, avr)
+DEF_HELPER_3(vmuluwm, void, avr, avr, avr)
+DEF_HELPER_3(vsrab, void, avr, avr, avr)
+DEF_HELPER_3(vsrah, void, avr, avr, avr)
+DEF_HELPER_3(vsraw, void, avr, avr, avr)
+DEF_HELPER_3(vsrad, void, avr, avr, avr)
+DEF_HELPER_3(vsrb, void, avr, avr, avr)
+DEF_HELPER_3(vsrh, void, avr, avr, avr)
+DEF_HELPER_3(vsrw, void, avr, avr, avr)
+DEF_HELPER_3(vsrd, void, avr, avr, avr)
+DEF_HELPER_3(vslb, void, avr, avr, avr)
+DEF_HELPER_3(vslh, void, avr, avr, avr)
+DEF_HELPER_3(vslw, void, avr, avr, avr)
+DEF_HELPER_3(vsld, void, avr, avr, avr)
+DEF_HELPER_3(vslo, void, avr, avr, avr)
+DEF_HELPER_3(vsro, void, avr, avr, avr)
+DEF_HELPER_3(vsrv, void, avr, avr, avr)
+DEF_HELPER_3(vslv, void, avr, avr, avr)
+DEF_HELPER_3(vaddcuw, void, avr, avr, avr)
+DEF_HELPER_2(vprtybw, void, avr, avr)
+DEF_HELPER_2(vprtybd, void, avr, avr)
+DEF_HELPER_2(vprtybq, void, avr, avr)
+DEF_HELPER_3(vsubcuw, void, avr, avr, avr)
+DEF_HELPER_2(lvsl, void, avr, tl)
+DEF_HELPER_2(lvsr, void, avr, tl)
+DEF_HELPER_4(vaddsbs, void, env, avr, avr, avr)
+DEF_HELPER_4(vaddshs, void, env, avr, avr, avr)
+DEF_HELPER_4(vaddsws, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubsbs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubshs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubsws, void, env, avr, avr, avr)
+DEF_HELPER_4(vaddubs, void, env, avr, avr, avr)
+DEF_HELPER_4(vadduhs, void, env, avr, avr, avr)
+DEF_HELPER_4(vadduws, void, env, avr, avr, avr)
+DEF_HELPER_4(vsububs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubuhs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubuws, void, env, avr, avr, avr)
+DEF_HELPER_3(vadduqm, void, avr, avr, avr)
+DEF_HELPER_4(vaddecuq, void, avr, avr, avr, avr)
+DEF_HELPER_4(vaddeuqm, void, avr, avr, avr, avr)
+DEF_HELPER_3(vaddcuq, void, avr, avr, avr)
+DEF_HELPER_3(vsubuqm, void, avr, avr, avr)
+DEF_HELPER_4(vsubecuq, void, avr, avr, avr, avr)
+DEF_HELPER_4(vsubeuqm, void, avr, avr, avr, avr)
+DEF_HELPER_3(vsubcuq, void, avr, avr, avr)
+DEF_HELPER_3(vrlb, void, avr, avr, avr)
+DEF_HELPER_3(vrlh, void, avr, avr, avr)
+DEF_HELPER_3(vrlw, void, avr, avr, avr)
+DEF_HELPER_3(vrld, void, avr, avr, avr)
+DEF_HELPER_3(vsl, void, avr, avr, avr)
+DEF_HELPER_3(vsr, void, avr, avr, avr)
+DEF_HELPER_4(vsldoi, void, avr, avr, avr, i32)
+DEF_HELPER_2(vspltisb, void, avr, i32)
+DEF_HELPER_2(vspltish, void, avr, i32)
+DEF_HELPER_2(vspltisw, void, avr, i32)
+DEF_HELPER_3(vspltb, void, avr, avr, i32)
+DEF_HELPER_3(vsplth, void, avr, avr, i32)
+DEF_HELPER_3(vspltw, void, avr, avr, i32)
+DEF_HELPER_3(vextractub, void, avr, avr, i32)
+DEF_HELPER_3(vextractuh, void, avr, avr, i32)
+DEF_HELPER_3(vextractuw, void, avr, avr, i32)
+DEF_HELPER_3(vextractd, void, avr, avr, i32)
+DEF_HELPER_3(vinsertb, void, avr, avr, i32)
+DEF_HELPER_3(vinserth, void, avr, avr, i32)
+DEF_HELPER_3(vinsertw, void, avr, avr, i32)
+DEF_HELPER_3(vinsertd, void, avr, avr, i32)
+DEF_HELPER_2(vextsb2w, void, avr, avr)
+DEF_HELPER_2(vextsh2w, void, avr, avr)
+DEF_HELPER_2(vextsb2d, void, avr, avr)
+DEF_HELPER_2(vextsh2d, void, avr, avr)
+DEF_HELPER_2(vextsw2d, void, avr, avr)
+DEF_HELPER_2(vnegw, void, avr, avr)
+DEF_HELPER_2(vnegd, void, avr, avr)
+DEF_HELPER_2(vupkhpx, void, avr, avr)
+DEF_HELPER_2(vupklpx, void, avr, avr)
+DEF_HELPER_2(vupkhsb, void, avr, avr)
+DEF_HELPER_2(vupkhsh, void, avr, avr)
+DEF_HELPER_2(vupkhsw, void, avr, avr)
+DEF_HELPER_2(vupklsb, void, avr, avr)
+DEF_HELPER_2(vupklsh, void, avr, avr)
+DEF_HELPER_2(vupklsw, void, avr, avr)
+DEF_HELPER_5(vmsumubm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsummbm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vsel, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vperm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vpermr, void, env, avr, avr, avr, avr)
+DEF_HELPER_4(vpkshss, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkshus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkswss, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkswus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpksdss, void, env, avr, avr, avr)
+DEF_HELPER_4(vpksdus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuhus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuwus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkudus, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuhum, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkuwum, void, env, avr, avr, avr)
+DEF_HELPER_4(vpkudum, void, env, avr, avr, avr)
+DEF_HELPER_3(vpkpx, void, avr, avr, avr)
+DEF_HELPER_5(vmhaddshs, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmhraddshs, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumuhm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumuhs, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumshm, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vmsumshs, void, env, avr, avr, avr, avr)
+DEF_HELPER_4(vmladduhm, void, avr, avr, avr, avr)
+DEF_HELPER_2(mtvscr, void, env, avr)
+DEF_HELPER_3(lvebx, void, env, avr, tl)
+DEF_HELPER_3(lvehx, void, env, avr, tl)
+DEF_HELPER_3(lvewx, void, env, avr, tl)
+DEF_HELPER_3(stvebx, void, env, avr, tl)
+DEF_HELPER_3(stvehx, void, env, avr, tl)
+DEF_HELPER_3(stvewx, void, env, avr, tl)
+DEF_HELPER_4(vsumsws, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum2sws, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum4sbs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum4shs, void, env, avr, avr, avr)
+DEF_HELPER_4(vsum4ubs, void, env, avr, avr, avr)
+DEF_HELPER_4(vaddfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vsubfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vmaxfp, void, env, avr, avr, avr)
+DEF_HELPER_4(vminfp, void, env, avr, avr, avr)
+DEF_HELPER_3(vrefp, void, env, avr, avr)
+DEF_HELPER_3(vrsqrtefp, void, env, avr, avr)
+DEF_HELPER_3(vrlwmi, void, avr, avr, avr)
+DEF_HELPER_3(vrldmi, void, avr, avr, avr)
+DEF_HELPER_3(vrldnm, void, avr, avr, avr)
+DEF_HELPER_3(vrlwnm, void, avr, avr, avr)
+DEF_HELPER_5(vmaddfp, void, env, avr, avr, avr, avr)
+DEF_HELPER_5(vnmsubfp, void, env, avr, avr, avr, avr)
+DEF_HELPER_3(vexptefp, void, env, avr, avr)
+DEF_HELPER_3(vlogefp, void, env, avr, avr)
+DEF_HELPER_3(vrfim, void, env, avr, avr)
+DEF_HELPER_3(vrfin, void, env, avr, avr)
+DEF_HELPER_3(vrfip, void, env, avr, avr)
+DEF_HELPER_3(vrfiz, void, env, avr, avr)
+DEF_HELPER_4(vcfux, void, env, avr, avr, i32)
+DEF_HELPER_4(vcfsx, void, env, avr, avr, i32)
+DEF_HELPER_4(vctuxs, void, env, avr, avr, i32)
+DEF_HELPER_4(vctsxs, void, env, avr, avr, i32)
+
+DEF_HELPER_2(vclzb, void, avr, avr)
+DEF_HELPER_2(vclzh, void, avr, avr)
+DEF_HELPER_2(vclzw, void, avr, avr)
+DEF_HELPER_2(vclzd, void, avr, avr)
+DEF_HELPER_2(vctzb, void, avr, avr)
+DEF_HELPER_2(vctzh, void, avr, avr)
+DEF_HELPER_2(vctzw, void, avr, avr)
+DEF_HELPER_2(vctzd, void, avr, avr)
+DEF_HELPER_2(vpopcntb, void, avr, avr)
+DEF_HELPER_2(vpopcnth, void, avr, avr)
+DEF_HELPER_2(vpopcntw, void, avr, avr)
+DEF_HELPER_2(vpopcntd, void, avr, avr)
+DEF_HELPER_1(vclzlsbb, tl, avr)
+DEF_HELPER_1(vctzlsbb, tl, avr)
+DEF_HELPER_3(vbpermd, void, avr, avr, avr)
+DEF_HELPER_3(vbpermq, void, avr, avr, avr)
+DEF_HELPER_2(vgbbd, void, avr, avr)
+DEF_HELPER_3(vpmsumb, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumh, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumw, void, avr, avr, avr)
+DEF_HELPER_3(vpmsumd, void, avr, avr, avr)
+
+DEF_HELPER_2(vsbox, void, avr, avr)
+DEF_HELPER_3(vcipher, void, avr, avr, avr)
+DEF_HELPER_3(vcipherlast, void, avr, avr, avr)
+DEF_HELPER_3(vncipher, void, avr, avr, avr)
+DEF_HELPER_3(vncipherlast, void, avr, avr, avr)
+DEF_HELPER_3(vshasigmaw, void, avr, avr, i32)
+DEF_HELPER_3(vshasigmad, void, avr, avr, i32)
+DEF_HELPER_4(vpermxor, void, avr, avr, avr, avr)
+
+DEF_HELPER_4(bcdadd, i32, avr, avr, avr, i32)
+DEF_HELPER_4(bcdsub, i32, avr, avr, avr, i32)
+DEF_HELPER_3(bcdcfn, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctn, i32, avr, avr, i32)
+DEF_HELPER_3(bcdcfz, i32, avr, avr, i32)
+DEF_HELPER_3(bcdctz, i32, avr, avr, i32)
+
+DEF_HELPER_2(xsadddp, void, env, i32)
+DEF_HELPER_2(xssubdp, void, env, i32)
+DEF_HELPER_2(xsmuldp, void, env, i32)
+DEF_HELPER_2(xsdivdp, void, env, i32)
+DEF_HELPER_2(xsredp, void, env, i32)
+DEF_HELPER_2(xssqrtdp, void, env, i32)
+DEF_HELPER_2(xsrsqrtedp, void, env, i32)
+DEF_HELPER_2(xstdivdp, void, env, i32)
+DEF_HELPER_2(xstsqrtdp, void, env, i32)
+DEF_HELPER_2(xsmaddadp, void, env, i32)
+DEF_HELPER_2(xsmaddmdp, void, env, i32)
+DEF_HELPER_2(xsmsubadp, void, env, i32)
+DEF_HELPER_2(xsmsubmdp, void, env, i32)
+DEF_HELPER_2(xsnmaddadp, void, env, i32)
+DEF_HELPER_2(xsnmaddmdp, void, env, i32)
+DEF_HELPER_2(xsnmsubadp, void, env, i32)
+DEF_HELPER_2(xsnmsubmdp, void, env, i32)
+DEF_HELPER_2(xscmpeqdp, void, env, i32)
+DEF_HELPER_2(xscmpgtdp, void, env, i32)
+DEF_HELPER_2(xscmpgedp, void, env, i32)
+DEF_HELPER_2(xscmpnedp, void, env, i32)
+DEF_HELPER_2(xscmpodp, void, env, i32)
+DEF_HELPER_2(xscmpudp, void, env, i32)
+DEF_HELPER_2(xsmaxdp, void, env, i32)
+DEF_HELPER_2(xsmindp, void, env, i32)
+DEF_HELPER_2(xscvdpsp, void, env, i32)
+DEF_HELPER_2(xscvdpspn, i64, env, i64)
+DEF_HELPER_2(xscvspdp, void, env, i32)
+DEF_HELPER_2(xscvspdpn, i64, env, i64)
+DEF_HELPER_2(xscvdpsxds, void, env, i32)
+DEF_HELPER_2(xscvdpsxws, void, env, i32)
+DEF_HELPER_2(xscvdpuxds, void, env, i32)
+DEF_HELPER_2(xscvdpuxws, void, env, i32)
+DEF_HELPER_2(xscvsxddp, void, env, i32)
+DEF_HELPER_2(xscvuxdsp, void, env, i32)
+DEF_HELPER_2(xscvsxdsp, void, env, i32)
+DEF_HELPER_2(xscvuxddp, void, env, i32)
+DEF_HELPER_2(xsrdpi, void, env, i32)
+DEF_HELPER_2(xsrdpic, void, env, i32)
+DEF_HELPER_2(xsrdpim, void, env, i32)
+DEF_HELPER_2(xsrdpip, void, env, i32)
+DEF_HELPER_2(xsrdpiz, void, env, i32)
+
+DEF_HELPER_2(xsaddsp, void, env, i32)
+DEF_HELPER_2(xssubsp, void, env, i32)
+DEF_HELPER_2(xsmulsp, void, env, i32)
+DEF_HELPER_2(xsdivsp, void, env, i32)
+DEF_HELPER_2(xsresp, void, env, i32)
+DEF_HELPER_2(xsrsp, i64, env, i64)
+DEF_HELPER_2(xssqrtsp, void, env, i32)
+DEF_HELPER_2(xsrsqrtesp, void, env, i32)
+DEF_HELPER_2(xsmaddasp, void, env, i32)
+DEF_HELPER_2(xsmaddmsp, void, env, i32)
+DEF_HELPER_2(xsmsubasp, void, env, i32)
+DEF_HELPER_2(xsmsubmsp, void, env, i32)
+DEF_HELPER_2(xsnmaddasp, void, env, i32)
+DEF_HELPER_2(xsnmaddmsp, void, env, i32)
+DEF_HELPER_2(xsnmsubasp, void, env, i32)
+DEF_HELPER_2(xsnmsubmsp, void, env, i32)
+
+DEF_HELPER_2(xvadddp, void, env, i32)
+DEF_HELPER_2(xvsubdp, void, env, i32)
+DEF_HELPER_2(xvmuldp, void, env, i32)
+DEF_HELPER_2(xvdivdp, void, env, i32)
+DEF_HELPER_2(xvredp, void, env, i32)
+DEF_HELPER_2(xvsqrtdp, void, env, i32)
+DEF_HELPER_2(xvrsqrtedp, void, env, i32)
+DEF_HELPER_2(xvtdivdp, void, env, i32)
+DEF_HELPER_2(xvtsqrtdp, void, env, i32)
+DEF_HELPER_2(xvmaddadp, void, env, i32)
+DEF_HELPER_2(xvmaddmdp, void, env, i32)
+DEF_HELPER_2(xvmsubadp, void, env, i32)
+DEF_HELPER_2(xvmsubmdp, void, env, i32)
+DEF_HELPER_2(xvnmaddadp, void, env, i32)
+DEF_HELPER_2(xvnmaddmdp, void, env, i32)
+DEF_HELPER_2(xvnmsubadp, void, env, i32)
+DEF_HELPER_2(xvnmsubmdp, void, env, i32)
+DEF_HELPER_2(xvmaxdp, void, env, i32)
+DEF_HELPER_2(xvmindp, void, env, i32)
+DEF_HELPER_2(xvcmpeqdp, void, env, i32)
+DEF_HELPER_2(xvcmpgedp, void, env, i32)
+DEF_HELPER_2(xvcmpgtdp, void, env, i32)
+DEF_HELPER_2(xvcmpnedp, void, env, i32)
+DEF_HELPER_2(xvcvdpsp, void, env, i32)
+DEF_HELPER_2(xvcvdpsxds, void, env, i32)
+DEF_HELPER_2(xvcvdpsxws, void, env, i32)
+DEF_HELPER_2(xvcvdpuxds, void, env, i32)
+DEF_HELPER_2(xvcvdpuxws, void, env, i32)
+DEF_HELPER_2(xvcvsxddp, void, env, i32)
+DEF_HELPER_2(xvcvuxddp, void, env, i32)
+DEF_HELPER_2(xvcvsxwdp, void, env, i32)
+DEF_HELPER_2(xvcvuxwdp, void, env, i32)
+DEF_HELPER_2(xvrdpi, void, env, i32)
+DEF_HELPER_2(xvrdpic, void, env, i32)
+DEF_HELPER_2(xvrdpim, void, env, i32)
+DEF_HELPER_2(xvrdpip, void, env, i32)
+DEF_HELPER_2(xvrdpiz, void, env, i32)
+
+DEF_HELPER_2(xvaddsp, void, env, i32)
+DEF_HELPER_2(xvsubsp, void, env, i32)
+DEF_HELPER_2(xvmulsp, void, env, i32)
+DEF_HELPER_2(xvdivsp, void, env, i32)
+DEF_HELPER_2(xvresp, void, env, i32)
+DEF_HELPER_2(xvsqrtsp, void, env, i32)
+DEF_HELPER_2(xvrsqrtesp, void, env, i32)
+DEF_HELPER_2(xvtdivsp, void, env, i32)
+DEF_HELPER_2(xvtsqrtsp, void, env, i32)
+DEF_HELPER_2(xvmaddasp, void, env, i32)
+DEF_HELPER_2(xvmaddmsp, void, env, i32)
+DEF_HELPER_2(xvmsubasp, void, env, i32)
+DEF_HELPER_2(xvmsubmsp, void, env, i32)
+DEF_HELPER_2(xvnmaddasp, void, env, i32)
+DEF_HELPER_2(xvnmaddmsp, void, env, i32)
+DEF_HELPER_2(xvnmsubasp, void, env, i32)
+DEF_HELPER_2(xvnmsubmsp, void, env, i32)
+DEF_HELPER_2(xvmaxsp, void, env, i32)
+DEF_HELPER_2(xvminsp, void, env, i32)
+DEF_HELPER_2(xvcmpeqsp, void, env, i32)
+DEF_HELPER_2(xvcmpgesp, void, env, i32)
+DEF_HELPER_2(xvcmpgtsp, void, env, i32)
+DEF_HELPER_2(xvcmpnesp, void, env, i32)
+DEF_HELPER_2(xvcvspdp, void, env, i32)
+DEF_HELPER_2(xvcvspsxds, void, env, i32)
+DEF_HELPER_2(xvcvspsxws, void, env, i32)
+DEF_HELPER_2(xvcvspuxds, void, env, i32)
+DEF_HELPER_2(xvcvspuxws, void, env, i32)
+DEF_HELPER_2(xvcvsxdsp, void, env, i32)
+DEF_HELPER_2(xvcvuxdsp, void, env, i32)
+DEF_HELPER_2(xvcvsxwsp, void, env, i32)
+DEF_HELPER_2(xvcvuxwsp, void, env, i32)
+DEF_HELPER_2(xvrspi, void, env, i32)
+DEF_HELPER_2(xvrspic, void, env, i32)
+DEF_HELPER_2(xvrspim, void, env, i32)
+DEF_HELPER_2(xvrspip, void, env, i32)
+DEF_HELPER_2(xvrspiz, void, env, i32)
+
+DEF_HELPER_2(efscfsi, i32, env, i32)
+DEF_HELPER_2(efscfui, i32, env, i32)
+DEF_HELPER_2(efscfuf, i32, env, i32)
+DEF_HELPER_2(efscfsf, i32, env, i32)
+DEF_HELPER_2(efsctsi, i32, env, i32)
+DEF_HELPER_2(efsctui, i32, env, i32)
+DEF_HELPER_2(efsctsiz, i32, env, i32)
+DEF_HELPER_2(efsctuiz, i32, env, i32)
+DEF_HELPER_2(efsctsf, i32, env, i32)
+DEF_HELPER_2(efsctuf, i32, env, i32)
+DEF_HELPER_2(evfscfsi, i64, env, i64)
+DEF_HELPER_2(evfscfui, i64, env, i64)
+DEF_HELPER_2(evfscfuf, i64, env, i64)
+DEF_HELPER_2(evfscfsf, i64, env, i64)
+DEF_HELPER_2(evfsctsi, i64, env, i64)
+DEF_HELPER_2(evfsctui, i64, env, i64)
+DEF_HELPER_2(evfsctsiz, i64, env, i64)
+DEF_HELPER_2(evfsctuiz, i64, env, i64)
+DEF_HELPER_2(evfsctsf, i64, env, i64)
+DEF_HELPER_2(evfsctuf, i64, env, i64)
+DEF_HELPER_3(efsadd, i32, env, i32, i32)
+DEF_HELPER_3(efssub, i32, env, i32, i32)
+DEF_HELPER_3(efsmul, i32, env, i32, i32)
+DEF_HELPER_3(efsdiv, i32, env, i32, i32)
+DEF_HELPER_3(evfsadd, i64, env, i64, i64)
+DEF_HELPER_3(evfssub, i64, env, i64, i64)
+DEF_HELPER_3(evfsmul, i64, env, i64, i64)
+DEF_HELPER_3(evfsdiv, i64, env, i64, i64)
+DEF_HELPER_3(efststlt, i32, env, i32, i32)
+DEF_HELPER_3(efststgt, i32, env, i32, i32)
+DEF_HELPER_3(efststeq, i32, env, i32, i32)
+DEF_HELPER_3(efscmplt, i32, env, i32, i32)
+DEF_HELPER_3(efscmpgt, i32, env, i32, i32)
+DEF_HELPER_3(efscmpeq, i32, env, i32, i32)
+DEF_HELPER_3(evfststlt, i32, env, i64, i64)
+DEF_HELPER_3(evfststgt, i32, env, i64, i64)
+DEF_HELPER_3(evfststeq, i32, env, i64, i64)
+DEF_HELPER_3(evfscmplt, i32, env, i64, i64)
+DEF_HELPER_3(evfscmpgt, i32, env, i64, i64)
+DEF_HELPER_3(evfscmpeq, i32, env, i64, i64)
+DEF_HELPER_2(efdcfsi, i64, env, i32)
+DEF_HELPER_2(efdcfsid, i64, env, i64)
+DEF_HELPER_2(efdcfui, i64, env, i32)
+DEF_HELPER_2(efdcfuid, i64, env, i64)
+DEF_HELPER_2(efdctsi, i32, env, i64)
+DEF_HELPER_2(efdctui, i32, env, i64)
+DEF_HELPER_2(efdctsiz, i32, env, i64)
+DEF_HELPER_2(efdctsidz, i64, env, i64)
+DEF_HELPER_2(efdctuiz, i32, env, i64)
+DEF_HELPER_2(efdctuidz, i64, env, i64)
+DEF_HELPER_2(efdcfsf, i64, env, i32)
+DEF_HELPER_2(efdcfuf, i64, env, i32)
+DEF_HELPER_2(efdctsf, i32, env, i64)
+DEF_HELPER_2(efdctuf, i32, env, i64)
+DEF_HELPER_2(efscfd, i32, env, i64)
+DEF_HELPER_2(efdcfs, i64, env, i32)
+DEF_HELPER_3(efdadd, i64, env, i64, i64)
+DEF_HELPER_3(efdsub, i64, env, i64, i64)
+DEF_HELPER_3(efdmul, i64, env, i64, i64)
+DEF_HELPER_3(efddiv, i64, env, i64, i64)
+DEF_HELPER_3(efdtstlt, i32, env, i64, i64)
+DEF_HELPER_3(efdtstgt, i32, env, i64, i64)
+DEF_HELPER_3(efdtsteq, i32, env, i64, i64)
+DEF_HELPER_3(efdcmplt, i32, env, i64, i64)
+DEF_HELPER_3(efdcmpgt, i32, env, i64, i64)
+DEF_HELPER_3(efdcmpeq, i32, env, i64, i64)
+
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(4xx_tlbre_hi, tl, env, tl)
+DEF_HELPER_2(4xx_tlbre_lo, tl, env, tl)
+DEF_HELPER_3(4xx_tlbwe_hi, void, env, tl, tl)
+DEF_HELPER_3(4xx_tlbwe_lo, void, env, tl, tl)
+DEF_HELPER_2(4xx_tlbsx, tl, env, tl)
+DEF_HELPER_3(440_tlbre, tl, env, i32, tl)
+DEF_HELPER_4(440_tlbwe, void, env, i32, tl, tl)
+DEF_HELPER_2(440_tlbsx, tl, env, tl)
+DEF_HELPER_1(booke206_tlbre, void, env)
+DEF_HELPER_1(booke206_tlbwe, void, env)
+DEF_HELPER_2(booke206_tlbsx, void, env, tl)
+DEF_HELPER_2(booke206_tlbivax, void, env, tl)
+DEF_HELPER_2(booke206_tlbilx0, void, env, tl)
+DEF_HELPER_2(booke206_tlbilx1, void, env, tl)
+DEF_HELPER_2(booke206_tlbilx3, void, env, tl)
+DEF_HELPER_2(booke206_tlbflush, void, env, tl)
+DEF_HELPER_3(booke_setpid, void, env, i32, tl)
+DEF_HELPER_2(6xx_tlbd, void, env, tl)
+DEF_HELPER_2(6xx_tlbi, void, env, tl)
+DEF_HELPER_2(74xx_tlbd, void, env, tl)
+DEF_HELPER_2(74xx_tlbi, void, env, tl)
+DEF_HELPER_FLAGS_1(tlbia, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(tlbie, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(tlbiva, TCG_CALL_NO_RWG, void, env, tl)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_3(store_slb, TCG_CALL_NO_RWG, void, env, tl, tl)
+DEF_HELPER_2(load_slb_esid, tl, env, tl)
+DEF_HELPER_2(load_slb_vsid, tl, env, tl)
+DEF_HELPER_2(find_slb_vsid, tl, env, tl)
+DEF_HELPER_FLAGS_1(slbia, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(slbie, TCG_CALL_NO_RWG, void, env, tl)
+#endif
+DEF_HELPER_FLAGS_2(load_sr, TCG_CALL_NO_RWG, tl, env, tl)
+DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl)
+
+DEF_HELPER_FLAGS_1(602_mfrom, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_1(msgsnd, void, tl)
+DEF_HELPER_2(msgclr, void, env, tl)
+#endif
+
+DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32)
+DEF_HELPER_FLAGS_2(clcs, TCG_CALL_NO_RWG_SE, tl, env, i32)
+#if !defined(CONFIG_USER_ONLY)
+DEF_HELPER_2(rac, tl, env, tl)
+#endif
+DEF_HELPER_3(div, tl, env, tl, tl)
+DEF_HELPER_3(divo, tl, env, tl, tl)
+DEF_HELPER_3(divs, tl, env, tl, tl)
+DEF_HELPER_3(divso, tl, env, tl, tl)
+
+DEF_HELPER_2(load_dcr, tl, env, tl)
+DEF_HELPER_3(store_dcr, void, env, tl, tl)
+
+DEF_HELPER_2(load_dump_spr, void, env, i32)
+DEF_HELPER_2(store_dump_spr, void, env, i32)
+DEF_HELPER_4(fscr_facility_check, void, env, i32, i32, i32)
+DEF_HELPER_4(msr_facility_check, void, env, i32, i32, i32)
+DEF_HELPER_FLAGS_1(load_tbl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_tbu, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_atbl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_atbu, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_601_rtcl, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_1(load_601_rtcu, TCG_CALL_NO_RWG, tl, env)
+#if !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_PPC64)
+DEF_HELPER_FLAGS_1(load_purr, TCG_CALL_NO_RWG, tl, env)
+#endif
+DEF_HELPER_2(store_sdr1, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_tbu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_atbl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_atbu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_601_rtcl, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_601_rtcu, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_decr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_decr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_1(load_hdecr, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_hdecr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_hid0_601, void, env, tl)
+DEF_HELPER_3(store_403_pbr, void, env, i32, tl)
+DEF_HELPER_FLAGS_1(load_40x_pit, TCG_CALL_NO_RWG, tl, env)
+DEF_HELPER_FLAGS_2(store_40x_pit, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(store_40x_dbcr0, void, env, tl)
+DEF_HELPER_2(store_40x_sler, void, env, tl)
+DEF_HELPER_FLAGS_2(store_booke_tcr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_FLAGS_2(store_booke_tsr, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_3(store_ibatl, void, env, i32, tl)
+DEF_HELPER_3(store_ibatu, void, env, i32, tl)
+DEF_HELPER_3(store_dbatl, void, env, i32, tl)
+DEF_HELPER_3(store_dbatu, void, env, i32, tl)
+DEF_HELPER_3(store_601_batl, void, env, i32, tl)
+DEF_HELPER_3(store_601_batu, void, env, i32, tl)
+#endif
+
+#define dh_alias_fprp ptr
+#define dh_ctype_fprp uint64_t *
+#define dh_is_signed_fprp dh_is_signed_ptr
+
+DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp)
+DEF_HELPER_3(dcmpo, i32, env, fprp, fprp)
+DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp)
+DEF_HELPER_3(dcmpu, i32, env, fprp, fprp)
+DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp)
+DEF_HELPER_3(dtstdc, i32, env, fprp, i32)
+DEF_HELPER_3(dtstdcq, i32, env, fprp, i32)
+DEF_HELPER_3(dtstdg, i32, env, fprp, i32)
+DEF_HELPER_3(dtstdgq, i32, env, fprp, i32)
+DEF_HELPER_3(dtstex, i32, env, fprp, fprp)
+DEF_HELPER_3(dtstexq, i32, env, fprp, fprp)
+DEF_HELPER_3(dtstsf, i32, env, fprp, fprp)
+DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp)
+DEF_HELPER_3(dtstsfi, i32, env, i32, fprp)
+DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp)
+DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32)
+DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32)
+DEF_HELPER_3(dctdp, void, env, fprp, fprp)
+DEF_HELPER_3(dctqpq, void, env, fprp, fprp)
+DEF_HELPER_3(drsp, void, env, fprp, fprp)
+DEF_HELPER_3(drdpq, void, env, fprp, fprp)
+DEF_HELPER_3(dcffix, void, env, fprp, fprp)
+DEF_HELPER_3(dcffixq, void, env, fprp, fprp)
+DEF_HELPER_3(dctfix, void, env, fprp, fprp)
+DEF_HELPER_3(dctfixq, void, env, fprp, fprp)
+DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32)
+DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32)
+DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32)
+DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32)
+DEF_HELPER_3(dxex, void, env, fprp, fprp)
+DEF_HELPER_3(dxexq, void, env, fprp, fprp)
+DEF_HELPER_4(diex, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp)
+DEF_HELPER_4(dscri, void, env, fprp, fprp, i32)
+DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32)
+DEF_HELPER_4(dscli, void, env, fprp, fprp, i32)
+DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32)
+
+DEF_HELPER_1(tbegin, void, env)
+DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env)
diff --git a/target/ppc/helper_regs.h b/target/ppc/helper_regs.h
new file mode 100644
index 0000000000..62138163a5
--- /dev/null
+++ b/target/ppc/helper_regs.h
@@ -0,0 +1,189 @@
+/*
+ * PowerPC emulation special registers manipulation helpers for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HELPER_REGS_H
+#define HELPER_REGS_H
+
+/* Swap temporary saved registers with GPRs */
+static inline void hreg_swap_gpr_tgpr(CPUPPCState *env)
+{
+ target_ulong tmp;
+
+ tmp = env->gpr[0];
+ env->gpr[0] = env->tgpr[0];
+ env->tgpr[0] = tmp;
+ tmp = env->gpr[1];
+ env->gpr[1] = env->tgpr[1];
+ env->tgpr[1] = tmp;
+ tmp = env->gpr[2];
+ env->gpr[2] = env->tgpr[2];
+ env->tgpr[2] = tmp;
+ tmp = env->gpr[3];
+ env->gpr[3] = env->tgpr[3];
+ env->tgpr[3] = tmp;
+}
+
+static inline void hreg_compute_mem_idx(CPUPPCState *env)
+{
+ /* This is our encoding for server processors. The architecture
+ * specifies that there is no such thing as userspace with
+ * translation off, however it appears that MacOS does it and
+ * some 32-bit CPUs support it. Weird...
+ *
+ * 0 = Guest User space virtual mode
+ * 1 = Guest Kernel space virtual mode
+ * 2 = Guest User space real mode
+ * 3 = Guest Kernel space real mode
+ * 4 = HV User space virtual mode
+ * 5 = HV Kernel space virtual mode
+ * 6 = HV User space real mode
+ * 7 = HV Kernel space real mode
+ *
+ * For BookE, we need 8 MMU modes as follow:
+ *
+ * 0 = AS 0 HV User space
+ * 1 = AS 0 HV Kernel space
+ * 2 = AS 1 HV User space
+ * 3 = AS 1 HV Kernel space
+ * 4 = AS 0 Guest User space
+ * 5 = AS 0 Guest Kernel space
+ * 6 = AS 1 Guest User space
+ * 7 = AS 1 Guest Kernel space
+ */
+ if (env->mmu_model & POWERPC_MMU_BOOKE) {
+ env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
+ env->immu_idx += msr_is ? 2 : 0;
+ env->dmmu_idx += msr_ds ? 2 : 0;
+ env->immu_idx += msr_gs ? 4 : 0;
+ env->dmmu_idx += msr_gs ? 4 : 0;
+ } else {
+ env->immu_idx = env->dmmu_idx = msr_pr ? 0 : 1;
+ env->immu_idx += msr_ir ? 0 : 2;
+ env->dmmu_idx += msr_dr ? 0 : 2;
+ env->immu_idx += msr_hv ? 4 : 0;
+ env->dmmu_idx += msr_hv ? 4 : 0;
+ }
+}
+
+static inline void hreg_compute_hflags(CPUPPCState *env)
+{
+ target_ulong hflags_mask;
+
+ /* We 'forget' FE0 & FE1: we'll never generate imprecise exceptions */
+ hflags_mask = (1 << MSR_VR) | (1 << MSR_AP) | (1 << MSR_SA) |
+ (1 << MSR_PR) | (1 << MSR_FP) | (1 << MSR_SE) | (1 << MSR_BE) |
+ (1 << MSR_LE) | (1 << MSR_VSX) | (1 << MSR_IR) | (1 << MSR_DR);
+ hflags_mask |= (1ULL << MSR_CM) | (1ULL << MSR_SF) | MSR_HVB;
+ hreg_compute_mem_idx(env);
+ env->hflags = env->msr & hflags_mask;
+ /* Merge with hflags coming from other registers */
+ env->hflags |= env->hflags_nmsr;
+}
+
+static inline int hreg_store_msr(CPUPPCState *env, target_ulong value,
+ int alter_hv)
+{
+ int excp;
+#if !defined(CONFIG_USER_ONLY)
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+#endif
+
+ excp = 0;
+ value &= env->msr_mask;
+#if !defined(CONFIG_USER_ONLY)
+ /* Neither mtmsr nor guest state can alter HV */
+ if (!alter_hv || !(env->msr & MSR_HVB)) {
+ value &= ~MSR_HVB;
+ value |= env->msr & MSR_HVB;
+ }
+ if (((value >> MSR_IR) & 1) != msr_ir ||
+ ((value >> MSR_DR) & 1) != msr_dr) {
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+ if ((env->mmu_model & POWERPC_MMU_BOOKE) &&
+ ((value >> MSR_GS) & 1) != msr_gs) {
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ }
+ if (unlikely((env->flags & POWERPC_FLAG_TGPR) &&
+ ((value ^ env->msr) & (1 << MSR_TGPR)))) {
+ /* Swap temporary saved registers with GPRs */
+ hreg_swap_gpr_tgpr(env);
+ }
+ if (unlikely((value >> MSR_EP) & 1) != msr_ep) {
+ /* Change the exception prefix on PowerPC 601 */
+ env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000;
+ }
+ /* If PR=1 then EE, IR and DR must be 1
+ *
+ * Note: We only enforce this on 64-bit server processors.
+ * It appears that:
+ * - 32-bit implementations supports PR=1 and EE/DR/IR=0 and MacOS
+ * exploits it.
+ * - 64-bit embedded implementations do not need any operation to be
+ * performed when PR is set.
+ */
+ if ((env->insns_flags & PPC_SEGMENT_64B) && ((value >> MSR_PR) & 1)) {
+ value |= (1 << MSR_EE) | (1 << MSR_DR) | (1 << MSR_IR);
+ }
+#endif
+ env->msr = value;
+ hreg_compute_hflags(env);
+#if !defined(CONFIG_USER_ONLY)
+ if (unlikely(msr_pow == 1)) {
+ if (!env->pending_interrupts && (*env->check_pow)(env)) {
+ cs->halted = 1;
+ excp = EXCP_HALTED;
+ }
+ }
+#endif
+
+ return excp;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static inline void check_tlb_flush(CPUPPCState *env, bool global)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ if (env->tlb_need_flush & TLB_NEED_LOCAL_FLUSH) {
+ tlb_flush(cs, 1);
+ env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ }
+
+ /* Propagate TLB invalidations to other CPUs when the guest uses broadcast
+ * TLB invalidation instructions.
+ */
+ if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
+ CPUState *other_cs;
+ CPU_FOREACH(other_cs) {
+ if (other_cs != cs) {
+ PowerPCCPU *cpu = POWERPC_CPU(other_cs);
+ CPUPPCState *other_env = &cpu->env;
+
+ other_env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH;
+ tlb_flush(other_cs, 1);
+ }
+ }
+ env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH;
+ }
+}
+#else
+static inline void check_tlb_flush(CPUPPCState *env, bool global) { }
+#endif
+
+#endif /* HELPER_REGS_H */
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
new file mode 100644
index 0000000000..2d57c9a1c2
--- /dev/null
+++ b/target/ppc/int_helper.c
@@ -0,0 +1,3126 @@
+/*
+ * PowerPC integer and vector emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "crypto/aes.h"
+
+#include "helper_regs.h"
+/*****************************************************************************/
+/* Fixed point operations helpers */
+
+target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb,
+ uint32_t oe)
+{
+ uint64_t rt = 0;
+ int overflow = 0;
+
+ uint64_t dividend = (uint64_t)ra << 32;
+ uint64_t divisor = (uint32_t)rb;
+
+ if (unlikely(divisor == 0)) {
+ overflow = 1;
+ } else {
+ rt = dividend / divisor;
+ overflow = rt > UINT32_MAX;
+ }
+
+ if (unlikely(overflow)) {
+ rt = 0; /* Undefined */
+ }
+
+ if (oe) {
+ if (unlikely(overflow)) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+ }
+
+ return (target_ulong)rt;
+}
+
+target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb,
+ uint32_t oe)
+{
+ int64_t rt = 0;
+ int overflow = 0;
+
+ int64_t dividend = (int64_t)ra << 32;
+ int64_t divisor = (int64_t)((int32_t)rb);
+
+ if (unlikely((divisor == 0) ||
+ ((divisor == -1ull) && (dividend == INT64_MIN)))) {
+ overflow = 1;
+ } else {
+ rt = dividend / divisor;
+ overflow = rt != (int32_t)rt;
+ }
+
+ if (unlikely(overflow)) {
+ rt = 0; /* Undefined */
+ }
+
+ if (oe) {
+ if (unlikely(overflow)) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+ }
+
+ return (target_ulong)rt;
+}
+
+#if defined(TARGET_PPC64)
+
+uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe)
+{
+ uint64_t rt = 0;
+ int overflow = 0;
+
+ overflow = divu128(&rt, &ra, rb);
+
+ if (unlikely(overflow)) {
+ rt = 0; /* Undefined */
+ }
+
+ if (oe) {
+ if (unlikely(overflow)) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+ }
+
+ return rt;
+}
+
+uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe)
+{
+ int64_t rt = 0;
+ int64_t ra = (int64_t)rau;
+ int64_t rb = (int64_t)rbu;
+ int overflow = divs128(&rt, &ra, rb);
+
+ if (unlikely(overflow)) {
+ rt = 0; /* Undefined */
+ }
+
+ if (oe) {
+
+ if (unlikely(overflow)) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+ }
+
+ return rt;
+}
+
+#endif
+
+
+target_ulong helper_cntlzw(target_ulong t)
+{
+ return clz32(t);
+}
+
+target_ulong helper_cnttzw(target_ulong t)
+{
+ return ctz32(t);
+}
+
+#if defined(TARGET_PPC64)
+/* if x = 0xab, returns 0xababababababababa */
+#define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff))
+
+/* substract 1 from each byte, and with inverse, check if MSB is set at each
+ * byte.
+ * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80
+ * (0xFF & 0xFF) & 0x80 = 0x80 (zero found)
+ */
+#define haszero(v) (((v) - pattern(0x01)) & ~(v) & pattern(0x80))
+
+/* When you XOR the pattern and there is a match, that byte will be zero */
+#define hasvalue(x, n) (haszero((x) ^ pattern(n)))
+
+uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb)
+{
+ return hasvalue(rb, ra) ? 1 << CRF_GT : 0;
+}
+
+#undef pattern
+#undef haszero
+#undef hasvalue
+
+target_ulong helper_cntlzd(target_ulong t)
+{
+ return clz64(t);
+}
+
+target_ulong helper_cnttzd(target_ulong t)
+{
+ return ctz64(t);
+}
+
+/* Return invalid random number.
+ *
+ * FIXME: Add rng backend or other mechanism to get cryptographically suitable
+ * random number
+ */
+target_ulong helper_darn32(void)
+{
+ return -1;
+}
+
+target_ulong helper_darn64(void)
+{
+ return -1;
+}
+
+#endif
+
+#if defined(TARGET_PPC64)
+
+uint64_t helper_bpermd(uint64_t rs, uint64_t rb)
+{
+ int i;
+ uint64_t ra = 0;
+
+ for (i = 0; i < 8; i++) {
+ int index = (rs >> (i*8)) & 0xFF;
+ if (index < 64) {
+ if (rb & (1ull << (63-index))) {
+ ra |= 1 << i;
+ }
+ }
+ }
+ return ra;
+}
+
+#endif
+
+target_ulong helper_cmpb(target_ulong rs, target_ulong rb)
+{
+ target_ulong mask = 0xff;
+ target_ulong ra = 0;
+ int i;
+
+ for (i = 0; i < sizeof(target_ulong); i++) {
+ if ((rs & mask) == (rb & mask)) {
+ ra |= mask;
+ }
+ mask <<= 8;
+ }
+ return ra;
+}
+
+/* shift right arithmetic helper */
+target_ulong helper_sraw(CPUPPCState *env, target_ulong value,
+ target_ulong shift)
+{
+ int32_t ret;
+
+ if (likely(!(shift & 0x20))) {
+ if (likely((uint32_t)shift != 0)) {
+ shift &= 0x1f;
+ ret = (int32_t)value >> shift;
+ if (likely(ret >= 0 || (value & ((1 << shift) - 1)) == 0)) {
+ env->ca = 0;
+ } else {
+ env->ca = 1;
+ }
+ } else {
+ ret = (int32_t)value;
+ env->ca = 0;
+ }
+ } else {
+ ret = (int32_t)value >> 31;
+ env->ca = (ret != 0);
+ }
+ return (target_long)ret;
+}
+
+#if defined(TARGET_PPC64)
+target_ulong helper_srad(CPUPPCState *env, target_ulong value,
+ target_ulong shift)
+{
+ int64_t ret;
+
+ if (likely(!(shift & 0x40))) {
+ if (likely((uint64_t)shift != 0)) {
+ shift &= 0x3f;
+ ret = (int64_t)value >> shift;
+ if (likely(ret >= 0 || (value & ((1ULL << shift) - 1)) == 0)) {
+ env->ca = 0;
+ } else {
+ env->ca = 1;
+ }
+ } else {
+ ret = (int64_t)value;
+ env->ca = 0;
+ }
+ } else {
+ ret = (int64_t)value >> 63;
+ env->ca = (ret != 0);
+ }
+ return ret;
+}
+#endif
+
+#if defined(TARGET_PPC64)
+target_ulong helper_popcntb(target_ulong val)
+{
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) &
+ 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) &
+ 0x3333333333333333ULL);
+ val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
+ 0x0f0f0f0f0f0f0f0fULL);
+ return val;
+}
+
+target_ulong helper_popcntw(target_ulong val)
+{
+ val = (val & 0x5555555555555555ULL) + ((val >> 1) &
+ 0x5555555555555555ULL);
+ val = (val & 0x3333333333333333ULL) + ((val >> 2) &
+ 0x3333333333333333ULL);
+ val = (val & 0x0f0f0f0f0f0f0f0fULL) + ((val >> 4) &
+ 0x0f0f0f0f0f0f0f0fULL);
+ val = (val & 0x00ff00ff00ff00ffULL) + ((val >> 8) &
+ 0x00ff00ff00ff00ffULL);
+ val = (val & 0x0000ffff0000ffffULL) + ((val >> 16) &
+ 0x0000ffff0000ffffULL);
+ return val;
+}
+
+target_ulong helper_popcntd(target_ulong val)
+{
+ return ctpop64(val);
+}
+#else
+target_ulong helper_popcntb(target_ulong val)
+{
+ val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
+ return val;
+}
+
+target_ulong helper_popcntw(target_ulong val)
+{
+ val = (val & 0x55555555) + ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ val = (val & 0x0f0f0f0f) + ((val >> 4) & 0x0f0f0f0f);
+ val = (val & 0x00ff00ff) + ((val >> 8) & 0x00ff00ff);
+ val = (val & 0x0000ffff) + ((val >> 16) & 0x0000ffff);
+ return val;
+}
+#endif
+
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2)
+{
+ uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+
+ if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = tmp % arg2;
+ return tmp / (int32_t)arg2;
+ }
+}
+
+target_ulong helper_divo(CPUPPCState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ uint64_t tmp = (uint64_t)arg1 << 32 | env->spr[SPR_MQ];
+
+ if (((int32_t)tmp == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->so = env->ov = 1;
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = tmp % arg2;
+ tmp /= (int32_t)arg2;
+ if ((int32_t)tmp != tmp) {
+ env->so = env->ov = 1;
+ } else {
+ env->ov = 0;
+ }
+ return tmp;
+ }
+}
+
+target_ulong helper_divs(CPUPPCState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
+ return (int32_t)arg1 / (int32_t)arg2;
+ }
+}
+
+target_ulong helper_divso(CPUPPCState *env, target_ulong arg1,
+ target_ulong arg2)
+{
+ if (((int32_t)arg1 == INT32_MIN && (int32_t)arg2 == (int32_t)-1) ||
+ (int32_t)arg2 == 0) {
+ env->so = env->ov = 1;
+ env->spr[SPR_MQ] = 0;
+ return INT32_MIN;
+ } else {
+ env->ov = 0;
+ env->spr[SPR_MQ] = (int32_t)arg1 % (int32_t)arg2;
+ return (int32_t)arg1 / (int32_t)arg2;
+ }
+}
+
+/*****************************************************************************/
+/* 602 specific instructions */
+/* mfrom is the most crazy instruction ever seen, imho ! */
+/* Real implementation uses a ROM table. Do the same */
+/* Extremely decomposed:
+ * -arg / 256
+ * return 256 * log10(10 + 1.0) + 0.5
+ */
+#if !defined(CONFIG_USER_ONLY)
+target_ulong helper_602_mfrom(target_ulong arg)
+{
+ if (likely(arg < 602)) {
+#include "mfrom_table.c"
+ return mfrom_ROM_table[arg];
+ } else {
+ return 0;
+ }
+}
+#endif
+
+/*****************************************************************************/
+/* Altivec extension helpers */
+#if defined(HOST_WORDS_BIGENDIAN)
+#define HI_IDX 0
+#define LO_IDX 1
+#define AVRB(i) u8[i]
+#define AVRW(i) u32[i]
+#else
+#define HI_IDX 1
+#define LO_IDX 0
+#define AVRB(i) u8[15-(i)]
+#define AVRW(i) u32[3-(i)]
+#endif
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VECTOR_FOR_INORDER_I(index, element) \
+ for (index = 0; index < ARRAY_SIZE(r->element); index++)
+#else
+#define VECTOR_FOR_INORDER_I(index, element) \
+ for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--)
+#endif
+
+/* Saturating arithmetic helpers. */
+#define SATCVT(from, to, from_type, to_type, min, max) \
+ static inline to_type cvt##from##to(from_type x, int *sat) \
+ { \
+ to_type r; \
+ \
+ if (x < (from_type)min) { \
+ r = min; \
+ *sat = 1; \
+ } else if (x > (from_type)max) { \
+ r = max; \
+ *sat = 1; \
+ } else { \
+ r = x; \
+ } \
+ return r; \
+ }
+#define SATCVTU(from, to, from_type, to_type, min, max) \
+ static inline to_type cvt##from##to(from_type x, int *sat) \
+ { \
+ to_type r; \
+ \
+ if (x > (from_type)max) { \
+ r = max; \
+ *sat = 1; \
+ } else { \
+ r = x; \
+ } \
+ return r; \
+ }
+SATCVT(sh, sb, int16_t, int8_t, INT8_MIN, INT8_MAX)
+SATCVT(sw, sh, int32_t, int16_t, INT16_MIN, INT16_MAX)
+SATCVT(sd, sw, int64_t, int32_t, INT32_MIN, INT32_MAX)
+
+SATCVTU(uh, ub, uint16_t, uint8_t, 0, UINT8_MAX)
+SATCVTU(uw, uh, uint32_t, uint16_t, 0, UINT16_MAX)
+SATCVTU(ud, uw, uint64_t, uint32_t, 0, UINT32_MAX)
+SATCVT(sh, ub, int16_t, uint8_t, 0, UINT8_MAX)
+SATCVT(sw, uh, int32_t, uint16_t, 0, UINT16_MAX)
+SATCVT(sd, uw, int64_t, uint32_t, 0, UINT32_MAX)
+#undef SATCVT
+#undef SATCVTU
+
+void helper_lvsl(ppc_avr_t *r, target_ulong sh)
+{
+ int i, j = (sh & 0xf);
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ r->u8[i] = j++;
+ }
+}
+
+void helper_lvsr(ppc_avr_t *r, target_ulong sh)
+{
+ int i, j = 0x10 - (sh & 0xf);
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ r->u8[i] = j++;
+ }
+}
+
+void helper_mtvscr(CPUPPCState *env, ppc_avr_t *r)
+{
+#if defined(HOST_WORDS_BIGENDIAN)
+ env->vscr = r->u32[3];
+#else
+ env->vscr = r->u32[0];
+#endif
+ set_flush_to_zero(vscr_nj, &env->vec_status);
+}
+
+void helper_vaddcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ r->u32[i] = ~a->u32[i] < b->u32[i];
+ }
+}
+
+/* vprtybw */
+void helper_vprtybw(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ uint64_t res = b->u32[i] ^ (b->u32[i] >> 16);
+ res ^= res >> 8;
+ r->u32[i] = res & 1;
+ }
+}
+
+/* vprtybd */
+void helper_vprtybd(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ uint64_t res = b->u64[i] ^ (b->u64[i] >> 32);
+ res ^= res >> 16;
+ res ^= res >> 8;
+ r->u64[i] = res & 1;
+ }
+}
+
+/* vprtybq */
+void helper_vprtybq(ppc_avr_t *r, ppc_avr_t *b)
+{
+ uint64_t res = b->u64[0] ^ b->u64[1];
+ res ^= res >> 32;
+ res ^= res >> 16;
+ res ^= res >> 8;
+ r->u64[LO_IDX] = res & 1;
+ r->u64[HI_IDX] = 0;
+}
+
+#define VARITH_DO(name, op, element) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = a->element[i] op b->element[i]; \
+ } \
+ }
+#define VARITH(suffix, element) \
+ VARITH_DO(add##suffix, +, element) \
+ VARITH_DO(sub##suffix, -, element)
+VARITH(ubm, u8)
+VARITH(uhm, u16)
+VARITH(uwm, u32)
+VARITH(udm, u64)
+VARITH_DO(muluwm, *, u32)
+#undef VARITH_DO
+#undef VARITH
+
+#define VARITHFP(suffix, func) \
+ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ r->f[i] = func(a->f[i], b->f[i], &env->vec_status); \
+ } \
+ }
+VARITHFP(addfp, float32_add)
+VARITHFP(subfp, float32_sub)
+VARITHFP(minfp, float32_min)
+VARITHFP(maxfp, float32_max)
+#undef VARITHFP
+
+#define VARITHFPFMA(suffix, type) \
+ void helper_v##suffix(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b, ppc_avr_t *c) \
+ { \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ r->f[i] = float32_muladd(a->f[i], c->f[i], b->f[i], \
+ type, &env->vec_status); \
+ } \
+ }
+VARITHFPFMA(maddfp, 0);
+VARITHFPFMA(nmsubfp, float_muladd_negate_result | float_muladd_negate_c);
+#undef VARITHFPFMA
+
+#define VARITHSAT_CASE(type, op, cvt, element) \
+ { \
+ type result = (type)a->element[i] op (type)b->element[i]; \
+ r->element[i] = cvt(result, &sat); \
+ }
+
+#define VARITHSAT_DO(name, op, optype, cvt, element) \
+ void helper_v##name(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, \
+ ppc_avr_t *b) \
+ { \
+ int sat = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ switch (sizeof(r->element[0])) { \
+ case 1: \
+ VARITHSAT_CASE(optype, op, cvt, element); \
+ break; \
+ case 2: \
+ VARITHSAT_CASE(optype, op, cvt, element); \
+ break; \
+ case 4: \
+ VARITHSAT_CASE(optype, op, cvt, element); \
+ break; \
+ } \
+ } \
+ if (sat) { \
+ env->vscr |= (1 << VSCR_SAT); \
+ } \
+ }
+#define VARITHSAT_SIGNED(suffix, element, optype, cvt) \
+ VARITHSAT_DO(adds##suffix##s, +, optype, cvt, element) \
+ VARITHSAT_DO(subs##suffix##s, -, optype, cvt, element)
+#define VARITHSAT_UNSIGNED(suffix, element, optype, cvt) \
+ VARITHSAT_DO(addu##suffix##s, +, optype, cvt, element) \
+ VARITHSAT_DO(subu##suffix##s, -, optype, cvt, element)
+VARITHSAT_SIGNED(b, s8, int16_t, cvtshsb)
+VARITHSAT_SIGNED(h, s16, int32_t, cvtswsh)
+VARITHSAT_SIGNED(w, s32, int64_t, cvtsdsw)
+VARITHSAT_UNSIGNED(b, u8, uint16_t, cvtshub)
+VARITHSAT_UNSIGNED(h, u16, uint32_t, cvtswuh)
+VARITHSAT_UNSIGNED(w, u32, uint64_t, cvtsduw)
+#undef VARITHSAT_CASE
+#undef VARITHSAT_DO
+#undef VARITHSAT_SIGNED
+#undef VARITHSAT_UNSIGNED
+
+#define VAVG_DO(name, element, etype) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ etype x = (etype)a->element[i] + (etype)b->element[i] + 1; \
+ r->element[i] = x >> 1; \
+ } \
+ }
+
+#define VAVG(type, signed_element, signed_type, unsigned_element, \
+ unsigned_type) \
+ VAVG_DO(avgs##type, signed_element, signed_type) \
+ VAVG_DO(avgu##type, unsigned_element, unsigned_type)
+VAVG(b, s8, int16_t, u8, uint16_t)
+VAVG(h, s16, int32_t, u16, uint32_t)
+VAVG(w, s32, int64_t, u32, uint64_t)
+#undef VAVG_DO
+#undef VAVG
+
+#define VABSDU_DO(name, element) \
+void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = (a->element[i] > b->element[i]) ? \
+ (a->element[i] - b->element[i]) : \
+ (b->element[i] - a->element[i]); \
+ } \
+}
+
+/* VABSDU - Vector absolute difference unsigned
+ * name - instruction mnemonic suffix (b: byte, h: halfword, w: word)
+ * element - element type to access from vector
+ */
+#define VABSDU(type, element) \
+ VABSDU_DO(absdu##type, element)
+VABSDU(b, u8)
+VABSDU(h, u16)
+VABSDU(w, u32)
+#undef VABSDU_DO
+#undef VABSDU
+
+#define VCF(suffix, cvt, element) \
+ void helper_vcf##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *b, uint32_t uim) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ float32 t = cvt(b->element[i], &env->vec_status); \
+ r->f[i] = float32_scalbn(t, -uim, &env->vec_status); \
+ } \
+ }
+VCF(ux, uint32_to_float32, u32)
+VCF(sx, int32_to_float32, s32)
+#undef VCF
+
+#define VCMP_DO(suffix, compare, element, record) \
+ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ uint64_t ones = (uint64_t)-1; \
+ uint64_t all = ones; \
+ uint64_t none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint64_t result = (a->element[i] compare b->element[i] ? \
+ ones : 0x0); \
+ switch (sizeof(a->element[0])) { \
+ case 8: \
+ r->u64[i] = result; \
+ break; \
+ case 4: \
+ r->u32[i] = result; \
+ break; \
+ case 2: \
+ r->u16[i] = result; \
+ break; \
+ case 1: \
+ r->u8[i] = result; \
+ break; \
+ } \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+ }
+#define VCMP(suffix, compare, element) \
+ VCMP_DO(suffix, compare, element, 0) \
+ VCMP_DO(suffix##_dot, compare, element, 1)
+VCMP(equb, ==, u8)
+VCMP(equh, ==, u16)
+VCMP(equw, ==, u32)
+VCMP(equd, ==, u64)
+VCMP(gtub, >, u8)
+VCMP(gtuh, >, u16)
+VCMP(gtuw, >, u32)
+VCMP(gtud, >, u64)
+VCMP(gtsb, >, s8)
+VCMP(gtsh, >, s16)
+VCMP(gtsw, >, s32)
+VCMP(gtsd, >, s64)
+#undef VCMP_DO
+#undef VCMP
+
+#define VCMPNE_DO(suffix, element, etype, cmpzero, record) \
+void helper_vcmpne##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ etype ones = (etype)-1; \
+ etype all = ones; \
+ etype result, none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ if (cmpzero) { \
+ result = ((a->element[i] == 0) \
+ || (b->element[i] == 0) \
+ || (a->element[i] != b->element[i]) ? \
+ ones : 0x0); \
+ } else { \
+ result = (a->element[i] != b->element[i]) ? ones : 0x0; \
+ } \
+ r->element[i] = result; \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+}
+
+/* VCMPNEZ - Vector compare not equal to zero
+ * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word)
+ * element - element type to access from vector
+ */
+#define VCMPNE(suffix, element, etype, cmpzero) \
+ VCMPNE_DO(suffix, element, etype, cmpzero, 0) \
+ VCMPNE_DO(suffix##_dot, element, etype, cmpzero, 1)
+VCMPNE(zb, u8, uint8_t, 1)
+VCMPNE(zh, u16, uint16_t, 1)
+VCMPNE(zw, u32, uint32_t, 1)
+VCMPNE(b, u8, uint8_t, 0)
+VCMPNE(h, u16, uint16_t, 0)
+VCMPNE(w, u32, uint32_t, 0)
+#undef VCMPNE_DO
+#undef VCMPNE
+
+#define VCMPFP_DO(suffix, compare, order, record) \
+ void helper_vcmp##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ uint32_t ones = (uint32_t)-1; \
+ uint32_t all = ones; \
+ uint32_t none = 0; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ uint32_t result; \
+ int rel = float32_compare_quiet(a->f[i], b->f[i], \
+ &env->vec_status); \
+ if (rel == float_relation_unordered) { \
+ result = 0; \
+ } else if (rel compare order) { \
+ result = ones; \
+ } else { \
+ result = 0; \
+ } \
+ r->u32[i] = result; \
+ all &= result; \
+ none |= result; \
+ } \
+ if (record) { \
+ env->crf[6] = ((all != 0) << 3) | ((none == 0) << 1); \
+ } \
+ }
+#define VCMPFP(suffix, compare, order) \
+ VCMPFP_DO(suffix, compare, order, 0) \
+ VCMPFP_DO(suffix##_dot, compare, order, 1)
+VCMPFP(eqfp, ==, float_relation_equal)
+VCMPFP(gefp, !=, float_relation_less)
+VCMPFP(gtfp, ==, float_relation_greater)
+#undef VCMPFP_DO
+#undef VCMPFP
+
+static inline void vcmpbfp_internal(CPUPPCState *env, ppc_avr_t *r,
+ ppc_avr_t *a, ppc_avr_t *b, int record)
+{
+ int i;
+ int all_in = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ int le_rel = float32_compare_quiet(a->f[i], b->f[i], &env->vec_status);
+ if (le_rel == float_relation_unordered) {
+ r->u32[i] = 0xc0000000;
+ all_in = 1;
+ } else {
+ float32 bneg = float32_chs(b->f[i]);
+ int ge_rel = float32_compare_quiet(a->f[i], bneg, &env->vec_status);
+ int le = le_rel != float_relation_greater;
+ int ge = ge_rel != float_relation_less;
+
+ r->u32[i] = ((!le) << 31) | ((!ge) << 30);
+ all_in |= (!le | !ge);
+ }
+ }
+ if (record) {
+ env->crf[6] = (all_in == 0) << 1;
+ }
+}
+
+void helper_vcmpbfp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ vcmpbfp_internal(env, r, a, b, 0);
+}
+
+void helper_vcmpbfp_dot(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b)
+{
+ vcmpbfp_internal(env, r, a, b, 1);
+}
+
+#define VCT(suffix, satcvt, element) \
+ void helper_vct##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *b, uint32_t uim) \
+ { \
+ int i; \
+ int sat = 0; \
+ float_status s = env->vec_status; \
+ \
+ set_float_rounding_mode(float_round_to_zero, &s); \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ if (float32_is_any_nan(b->f[i])) { \
+ r->element[i] = 0; \
+ } else { \
+ float64 t = float32_to_float64(b->f[i], &s); \
+ int64_t j; \
+ \
+ t = float64_scalbn(t, uim, &s); \
+ j = float64_to_int64(t, &s); \
+ r->element[i] = satcvt(j, &sat); \
+ } \
+ } \
+ if (sat) { \
+ env->vscr |= (1 << VSCR_SAT); \
+ } \
+ }
+VCT(uxs, cvtsduw, u32)
+VCT(sxs, cvtsdsw, s32)
+#undef VCT
+
+target_ulong helper_vclzlsbb(ppc_avr_t *r)
+{
+ target_ulong count = 0;
+ int i;
+ VECTOR_FOR_INORDER_I(i, u8) {
+ if (r->u8[i] & 0x01) {
+ break;
+ }
+ count++;
+ }
+ return count;
+}
+
+target_ulong helper_vctzlsbb(ppc_avr_t *r)
+{
+ target_ulong count = 0;
+ int i;
+#if defined(HOST_WORDS_BIGENDIAN)
+ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
+#else
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+#endif
+ if (r->u8[i] & 0x01) {
+ break;
+ }
+ count++;
+ }
+ return count;
+}
+
+void helper_vmhaddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i];
+ int32_t t = (int32_t)c->s16[i] + (prod >> 15);
+
+ r->s16[i] = cvtswsh(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vmhraddshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i] + 0x00004000;
+ int32_t t = (int32_t)c->s16[i] + (prod >> 15);
+ r->s16[i] = cvtswsh(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+#define VMINMAX_DO(name, compare, element) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ if (a->element[i] compare b->element[i]) { \
+ r->element[i] = b->element[i]; \
+ } else { \
+ r->element[i] = a->element[i]; \
+ } \
+ } \
+ }
+#define VMINMAX(suffix, element) \
+ VMINMAX_DO(min##suffix, >, element) \
+ VMINMAX_DO(max##suffix, <, element)
+VMINMAX(sb, s8)
+VMINMAX(sh, s16)
+VMINMAX(sw, s32)
+VMINMAX(sd, s64)
+VMINMAX(ub, u8)
+VMINMAX(uh, u16)
+VMINMAX(uw, u32)
+VMINMAX(ud, u64)
+#undef VMINMAX_DO
+#undef VMINMAX
+
+void helper_vmladduhm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ int32_t prod = a->s16[i] * b->s16[i];
+ r->s16[i] = (int16_t) (prod + c->s16[i]);
+ }
+}
+
+#define VMRG_DO(name, element, highp) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ ppc_avr_t result; \
+ int i; \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ \
+ for (i = 0; i < n_elems / 2; i++) { \
+ if (highp) { \
+ result.element[i*2+HI_IDX] = a->element[i]; \
+ result.element[i*2+LO_IDX] = b->element[i]; \
+ } else { \
+ result.element[n_elems - i * 2 - (1 + HI_IDX)] = \
+ b->element[n_elems - i - 1]; \
+ result.element[n_elems - i * 2 - (1 + LO_IDX)] = \
+ a->element[n_elems - i - 1]; \
+ } \
+ } \
+ *r = result; \
+ }
+#if defined(HOST_WORDS_BIGENDIAN)
+#define MRGHI 0
+#define MRGLO 1
+#else
+#define MRGHI 1
+#define MRGLO 0
+#endif
+#define VMRG(suffix, element) \
+ VMRG_DO(mrgl##suffix, element, MRGHI) \
+ VMRG_DO(mrgh##suffix, element, MRGLO)
+VMRG(b, u8)
+VMRG(h, u16)
+VMRG(w, u32)
+#undef VMRG_DO
+#undef VMRG
+#undef MRGHI
+#undef MRGLO
+
+void helper_vmsummbm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[16];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s8); i++) {
+ prod[i] = (int32_t)a->s8[i] * b->u8[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ r->s32[i] = c->s32[i] + prod[4 * i] + prod[4 * i + 1] +
+ prod[4 * i + 2] + prod[4 * i + 3];
+ }
+}
+
+void helper_vmsumshm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[8];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ prod[i] = a->s16[i] * b->s16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ r->s32[i] = c->s32[i] + prod[2 * i] + prod[2 * i + 1];
+ }
+}
+
+void helper_vmsumshs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ int32_t prod[8];
+ int i;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->s16); i++) {
+ prod[i] = (int32_t)a->s16[i] * b->s16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ int64_t t = (int64_t)c->s32[i] + prod[2 * i] + prod[2 * i + 1];
+
+ r->u32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vmsumubm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint16_t prod[16];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ prod[i] = a->u8[i] * b->u8[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->u32[i] = c->u32[i] + prod[4 * i] + prod[4 * i + 1] +
+ prod[4 * i + 2] + prod[4 * i + 3];
+ }
+}
+
+void helper_vmsumuhm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint32_t prod[8];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
+ prod[i] = a->u16[i] * b->u16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->u32[i] = c->u32[i] + prod[2 * i] + prod[2 * i + 1];
+ }
+}
+
+void helper_vmsumuhs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a,
+ ppc_avr_t *b, ppc_avr_t *c)
+{
+ uint32_t prod[8];
+ int i;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->u16); i++) {
+ prod[i] = a->u16[i] * b->u16[i];
+ }
+
+ VECTOR_FOR_INORDER_I(i, s32) {
+ uint64_t t = (uint64_t)c->u32[i] + prod[2 * i] + prod[2 * i + 1];
+
+ r->u32[i] = cvtuduw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+#define VMUL_DO(name, mul_element, prod_element, cast, evenp) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ VECTOR_FOR_INORDER_I(i, prod_element) { \
+ if (evenp) { \
+ r->prod_element[i] = \
+ (cast)a->mul_element[i * 2 + HI_IDX] * \
+ (cast)b->mul_element[i * 2 + HI_IDX]; \
+ } else { \
+ r->prod_element[i] = \
+ (cast)a->mul_element[i * 2 + LO_IDX] * \
+ (cast)b->mul_element[i * 2 + LO_IDX]; \
+ } \
+ } \
+ }
+#define VMUL(suffix, mul_element, prod_element, cast) \
+ VMUL_DO(mule##suffix, mul_element, prod_element, cast, 1) \
+ VMUL_DO(mulo##suffix, mul_element, prod_element, cast, 0)
+VMUL(sb, s8, s16, int16_t)
+VMUL(sh, s16, s32, int32_t)
+VMUL(sw, s32, s64, int64_t)
+VMUL(ub, u8, u16, uint16_t)
+VMUL(uh, u16, u32, uint32_t)
+VMUL(uw, u32, u64, uint64_t)
+#undef VMUL_DO
+#undef VMUL
+
+void helper_vperm(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int s = c->u8[i] & 0x1f;
+#if defined(HOST_WORDS_BIGENDIAN)
+ int index = s & 0xf;
+#else
+ int index = 15 - (s & 0xf);
+#endif
+
+ if (s & 0x10) {
+ result.u8[i] = b->u8[index];
+ } else {
+ result.u8[i] = a->u8[index];
+ }
+ }
+ *r = result;
+}
+
+void helper_vpermr(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int s = c->u8[i] & 0x1f;
+#if defined(HOST_WORDS_BIGENDIAN)
+ int index = 15 - (s & 0xf);
+#else
+ int index = s & 0xf;
+#endif
+
+ if (s & 0x10) {
+ result.u8[i] = a->u8[index];
+ } else {
+ result.u8[i] = b->u8[index];
+ }
+ }
+ *r = result;
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VBPERMQ_INDEX(avr, i) ((avr)->u8[(i)])
+#define VBPERMD_INDEX(i) (i)
+#define VBPERMQ_DW(index) (((index) & 0x40) != 0)
+#define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1))
+#else
+#define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)])
+#define VBPERMD_INDEX(i) (1 - i)
+#define VBPERMQ_DW(index) (((index) & 0x40) == 0)
+#define EXTRACT_BIT(avr, i, index) \
+ (extract64((avr)->u64[1 - i], 63 - index, 1))
+#endif
+
+void helper_vbpermd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result = { .u64 = { 0, 0 } };
+ VECTOR_FOR_INORDER_I(i, u64) {
+ for (j = 0; j < 8; j++) {
+ int index = VBPERMQ_INDEX(b, (i * 8) + j);
+ if (index < 64 && EXTRACT_BIT(a, i, index)) {
+ result.u64[VBPERMD_INDEX(i)] |= (0x80 >> j);
+ }
+ }
+ }
+ *r = result;
+}
+
+void helper_vbpermq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ uint64_t perm = 0;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int index = VBPERMQ_INDEX(b, i);
+
+ if (index < 128) {
+ uint64_t mask = (1ull << (63-(index & 0x3F)));
+ if (a->u64[VBPERMQ_DW(index)] & mask) {
+ perm |= (0x8000 >> i);
+ }
+ }
+ }
+
+ r->u64[HI_IDX] = perm;
+ r->u64[LO_IDX] = 0;
+}
+
+#undef VBPERMQ_INDEX
+#undef VBPERMQ_DW
+
+static const uint64_t VGBBD_MASKS[256] = {
+ 0x0000000000000000ull, /* 00 */
+ 0x0000000000000080ull, /* 01 */
+ 0x0000000000008000ull, /* 02 */
+ 0x0000000000008080ull, /* 03 */
+ 0x0000000000800000ull, /* 04 */
+ 0x0000000000800080ull, /* 05 */
+ 0x0000000000808000ull, /* 06 */
+ 0x0000000000808080ull, /* 07 */
+ 0x0000000080000000ull, /* 08 */
+ 0x0000000080000080ull, /* 09 */
+ 0x0000000080008000ull, /* 0A */
+ 0x0000000080008080ull, /* 0B */
+ 0x0000000080800000ull, /* 0C */
+ 0x0000000080800080ull, /* 0D */
+ 0x0000000080808000ull, /* 0E */
+ 0x0000000080808080ull, /* 0F */
+ 0x0000008000000000ull, /* 10 */
+ 0x0000008000000080ull, /* 11 */
+ 0x0000008000008000ull, /* 12 */
+ 0x0000008000008080ull, /* 13 */
+ 0x0000008000800000ull, /* 14 */
+ 0x0000008000800080ull, /* 15 */
+ 0x0000008000808000ull, /* 16 */
+ 0x0000008000808080ull, /* 17 */
+ 0x0000008080000000ull, /* 18 */
+ 0x0000008080000080ull, /* 19 */
+ 0x0000008080008000ull, /* 1A */
+ 0x0000008080008080ull, /* 1B */
+ 0x0000008080800000ull, /* 1C */
+ 0x0000008080800080ull, /* 1D */
+ 0x0000008080808000ull, /* 1E */
+ 0x0000008080808080ull, /* 1F */
+ 0x0000800000000000ull, /* 20 */
+ 0x0000800000000080ull, /* 21 */
+ 0x0000800000008000ull, /* 22 */
+ 0x0000800000008080ull, /* 23 */
+ 0x0000800000800000ull, /* 24 */
+ 0x0000800000800080ull, /* 25 */
+ 0x0000800000808000ull, /* 26 */
+ 0x0000800000808080ull, /* 27 */
+ 0x0000800080000000ull, /* 28 */
+ 0x0000800080000080ull, /* 29 */
+ 0x0000800080008000ull, /* 2A */
+ 0x0000800080008080ull, /* 2B */
+ 0x0000800080800000ull, /* 2C */
+ 0x0000800080800080ull, /* 2D */
+ 0x0000800080808000ull, /* 2E */
+ 0x0000800080808080ull, /* 2F */
+ 0x0000808000000000ull, /* 30 */
+ 0x0000808000000080ull, /* 31 */
+ 0x0000808000008000ull, /* 32 */
+ 0x0000808000008080ull, /* 33 */
+ 0x0000808000800000ull, /* 34 */
+ 0x0000808000800080ull, /* 35 */
+ 0x0000808000808000ull, /* 36 */
+ 0x0000808000808080ull, /* 37 */
+ 0x0000808080000000ull, /* 38 */
+ 0x0000808080000080ull, /* 39 */
+ 0x0000808080008000ull, /* 3A */
+ 0x0000808080008080ull, /* 3B */
+ 0x0000808080800000ull, /* 3C */
+ 0x0000808080800080ull, /* 3D */
+ 0x0000808080808000ull, /* 3E */
+ 0x0000808080808080ull, /* 3F */
+ 0x0080000000000000ull, /* 40 */
+ 0x0080000000000080ull, /* 41 */
+ 0x0080000000008000ull, /* 42 */
+ 0x0080000000008080ull, /* 43 */
+ 0x0080000000800000ull, /* 44 */
+ 0x0080000000800080ull, /* 45 */
+ 0x0080000000808000ull, /* 46 */
+ 0x0080000000808080ull, /* 47 */
+ 0x0080000080000000ull, /* 48 */
+ 0x0080000080000080ull, /* 49 */
+ 0x0080000080008000ull, /* 4A */
+ 0x0080000080008080ull, /* 4B */
+ 0x0080000080800000ull, /* 4C */
+ 0x0080000080800080ull, /* 4D */
+ 0x0080000080808000ull, /* 4E */
+ 0x0080000080808080ull, /* 4F */
+ 0x0080008000000000ull, /* 50 */
+ 0x0080008000000080ull, /* 51 */
+ 0x0080008000008000ull, /* 52 */
+ 0x0080008000008080ull, /* 53 */
+ 0x0080008000800000ull, /* 54 */
+ 0x0080008000800080ull, /* 55 */
+ 0x0080008000808000ull, /* 56 */
+ 0x0080008000808080ull, /* 57 */
+ 0x0080008080000000ull, /* 58 */
+ 0x0080008080000080ull, /* 59 */
+ 0x0080008080008000ull, /* 5A */
+ 0x0080008080008080ull, /* 5B */
+ 0x0080008080800000ull, /* 5C */
+ 0x0080008080800080ull, /* 5D */
+ 0x0080008080808000ull, /* 5E */
+ 0x0080008080808080ull, /* 5F */
+ 0x0080800000000000ull, /* 60 */
+ 0x0080800000000080ull, /* 61 */
+ 0x0080800000008000ull, /* 62 */
+ 0x0080800000008080ull, /* 63 */
+ 0x0080800000800000ull, /* 64 */
+ 0x0080800000800080ull, /* 65 */
+ 0x0080800000808000ull, /* 66 */
+ 0x0080800000808080ull, /* 67 */
+ 0x0080800080000000ull, /* 68 */
+ 0x0080800080000080ull, /* 69 */
+ 0x0080800080008000ull, /* 6A */
+ 0x0080800080008080ull, /* 6B */
+ 0x0080800080800000ull, /* 6C */
+ 0x0080800080800080ull, /* 6D */
+ 0x0080800080808000ull, /* 6E */
+ 0x0080800080808080ull, /* 6F */
+ 0x0080808000000000ull, /* 70 */
+ 0x0080808000000080ull, /* 71 */
+ 0x0080808000008000ull, /* 72 */
+ 0x0080808000008080ull, /* 73 */
+ 0x0080808000800000ull, /* 74 */
+ 0x0080808000800080ull, /* 75 */
+ 0x0080808000808000ull, /* 76 */
+ 0x0080808000808080ull, /* 77 */
+ 0x0080808080000000ull, /* 78 */
+ 0x0080808080000080ull, /* 79 */
+ 0x0080808080008000ull, /* 7A */
+ 0x0080808080008080ull, /* 7B */
+ 0x0080808080800000ull, /* 7C */
+ 0x0080808080800080ull, /* 7D */
+ 0x0080808080808000ull, /* 7E */
+ 0x0080808080808080ull, /* 7F */
+ 0x8000000000000000ull, /* 80 */
+ 0x8000000000000080ull, /* 81 */
+ 0x8000000000008000ull, /* 82 */
+ 0x8000000000008080ull, /* 83 */
+ 0x8000000000800000ull, /* 84 */
+ 0x8000000000800080ull, /* 85 */
+ 0x8000000000808000ull, /* 86 */
+ 0x8000000000808080ull, /* 87 */
+ 0x8000000080000000ull, /* 88 */
+ 0x8000000080000080ull, /* 89 */
+ 0x8000000080008000ull, /* 8A */
+ 0x8000000080008080ull, /* 8B */
+ 0x8000000080800000ull, /* 8C */
+ 0x8000000080800080ull, /* 8D */
+ 0x8000000080808000ull, /* 8E */
+ 0x8000000080808080ull, /* 8F */
+ 0x8000008000000000ull, /* 90 */
+ 0x8000008000000080ull, /* 91 */
+ 0x8000008000008000ull, /* 92 */
+ 0x8000008000008080ull, /* 93 */
+ 0x8000008000800000ull, /* 94 */
+ 0x8000008000800080ull, /* 95 */
+ 0x8000008000808000ull, /* 96 */
+ 0x8000008000808080ull, /* 97 */
+ 0x8000008080000000ull, /* 98 */
+ 0x8000008080000080ull, /* 99 */
+ 0x8000008080008000ull, /* 9A */
+ 0x8000008080008080ull, /* 9B */
+ 0x8000008080800000ull, /* 9C */
+ 0x8000008080800080ull, /* 9D */
+ 0x8000008080808000ull, /* 9E */
+ 0x8000008080808080ull, /* 9F */
+ 0x8000800000000000ull, /* A0 */
+ 0x8000800000000080ull, /* A1 */
+ 0x8000800000008000ull, /* A2 */
+ 0x8000800000008080ull, /* A3 */
+ 0x8000800000800000ull, /* A4 */
+ 0x8000800000800080ull, /* A5 */
+ 0x8000800000808000ull, /* A6 */
+ 0x8000800000808080ull, /* A7 */
+ 0x8000800080000000ull, /* A8 */
+ 0x8000800080000080ull, /* A9 */
+ 0x8000800080008000ull, /* AA */
+ 0x8000800080008080ull, /* AB */
+ 0x8000800080800000ull, /* AC */
+ 0x8000800080800080ull, /* AD */
+ 0x8000800080808000ull, /* AE */
+ 0x8000800080808080ull, /* AF */
+ 0x8000808000000000ull, /* B0 */
+ 0x8000808000000080ull, /* B1 */
+ 0x8000808000008000ull, /* B2 */
+ 0x8000808000008080ull, /* B3 */
+ 0x8000808000800000ull, /* B4 */
+ 0x8000808000800080ull, /* B5 */
+ 0x8000808000808000ull, /* B6 */
+ 0x8000808000808080ull, /* B7 */
+ 0x8000808080000000ull, /* B8 */
+ 0x8000808080000080ull, /* B9 */
+ 0x8000808080008000ull, /* BA */
+ 0x8000808080008080ull, /* BB */
+ 0x8000808080800000ull, /* BC */
+ 0x8000808080800080ull, /* BD */
+ 0x8000808080808000ull, /* BE */
+ 0x8000808080808080ull, /* BF */
+ 0x8080000000000000ull, /* C0 */
+ 0x8080000000000080ull, /* C1 */
+ 0x8080000000008000ull, /* C2 */
+ 0x8080000000008080ull, /* C3 */
+ 0x8080000000800000ull, /* C4 */
+ 0x8080000000800080ull, /* C5 */
+ 0x8080000000808000ull, /* C6 */
+ 0x8080000000808080ull, /* C7 */
+ 0x8080000080000000ull, /* C8 */
+ 0x8080000080000080ull, /* C9 */
+ 0x8080000080008000ull, /* CA */
+ 0x8080000080008080ull, /* CB */
+ 0x8080000080800000ull, /* CC */
+ 0x8080000080800080ull, /* CD */
+ 0x8080000080808000ull, /* CE */
+ 0x8080000080808080ull, /* CF */
+ 0x8080008000000000ull, /* D0 */
+ 0x8080008000000080ull, /* D1 */
+ 0x8080008000008000ull, /* D2 */
+ 0x8080008000008080ull, /* D3 */
+ 0x8080008000800000ull, /* D4 */
+ 0x8080008000800080ull, /* D5 */
+ 0x8080008000808000ull, /* D6 */
+ 0x8080008000808080ull, /* D7 */
+ 0x8080008080000000ull, /* D8 */
+ 0x8080008080000080ull, /* D9 */
+ 0x8080008080008000ull, /* DA */
+ 0x8080008080008080ull, /* DB */
+ 0x8080008080800000ull, /* DC */
+ 0x8080008080800080ull, /* DD */
+ 0x8080008080808000ull, /* DE */
+ 0x8080008080808080ull, /* DF */
+ 0x8080800000000000ull, /* E0 */
+ 0x8080800000000080ull, /* E1 */
+ 0x8080800000008000ull, /* E2 */
+ 0x8080800000008080ull, /* E3 */
+ 0x8080800000800000ull, /* E4 */
+ 0x8080800000800080ull, /* E5 */
+ 0x8080800000808000ull, /* E6 */
+ 0x8080800000808080ull, /* E7 */
+ 0x8080800080000000ull, /* E8 */
+ 0x8080800080000080ull, /* E9 */
+ 0x8080800080008000ull, /* EA */
+ 0x8080800080008080ull, /* EB */
+ 0x8080800080800000ull, /* EC */
+ 0x8080800080800080ull, /* ED */
+ 0x8080800080808000ull, /* EE */
+ 0x8080800080808080ull, /* EF */
+ 0x8080808000000000ull, /* F0 */
+ 0x8080808000000080ull, /* F1 */
+ 0x8080808000008000ull, /* F2 */
+ 0x8080808000008080ull, /* F3 */
+ 0x8080808000800000ull, /* F4 */
+ 0x8080808000800080ull, /* F5 */
+ 0x8080808000808000ull, /* F6 */
+ 0x8080808000808080ull, /* F7 */
+ 0x8080808080000000ull, /* F8 */
+ 0x8080808080000080ull, /* F9 */
+ 0x8080808080008000ull, /* FA */
+ 0x8080808080008080ull, /* FB */
+ 0x8080808080800000ull, /* FC */
+ 0x8080808080800080ull, /* FD */
+ 0x8080808080808000ull, /* FE */
+ 0x8080808080808080ull, /* FF */
+};
+
+void helper_vgbbd(ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+ uint64_t t[2] = { 0, 0 };
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+#if defined(HOST_WORDS_BIGENDIAN)
+ t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7);
+#else
+ t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (7-(i & 7));
+#endif
+ }
+
+ r->u64[0] = t[0];
+ r->u64[1] = t[1];
+}
+
+#define PMSUM(name, srcfld, trgfld, trgtyp) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i, j; \
+ trgtyp prod[sizeof(ppc_avr_t)/sizeof(a->srcfld[0])]; \
+ \
+ VECTOR_FOR_INORDER_I(i, srcfld) { \
+ prod[i] = 0; \
+ for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \
+ if (a->srcfld[i] & (1ull<<j)) { \
+ prod[i] ^= ((trgtyp)b->srcfld[i] << j); \
+ } \
+ } \
+ } \
+ \
+ VECTOR_FOR_INORDER_I(i, trgfld) { \
+ r->trgfld[i] = prod[2*i] ^ prod[2*i+1]; \
+ } \
+}
+
+PMSUM(vpmsumb, u8, u16, uint16_t)
+PMSUM(vpmsumh, u16, u32, uint32_t)
+PMSUM(vpmsumw, u32, u64, uint64_t)
+
+void helper_vpmsumd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+
+#ifdef CONFIG_INT128
+ int i, j;
+ __uint128_t prod[2];
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ prod[i] = 0;
+ for (j = 0; j < 64; j++) {
+ if (a->u64[i] & (1ull<<j)) {
+ prod[i] ^= (((__uint128_t)b->u64[i]) << j);
+ }
+ }
+ }
+
+ r->u128 = prod[0] ^ prod[1];
+
+#else
+ int i, j;
+ ppc_avr_t prod[2];
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ prod[i].u64[LO_IDX] = prod[i].u64[HI_IDX] = 0;
+ for (j = 0; j < 64; j++) {
+ if (a->u64[i] & (1ull<<j)) {
+ ppc_avr_t bshift;
+ if (j == 0) {
+ bshift.u64[HI_IDX] = 0;
+ bshift.u64[LO_IDX] = b->u64[i];
+ } else {
+ bshift.u64[HI_IDX] = b->u64[i] >> (64-j);
+ bshift.u64[LO_IDX] = b->u64[i] << j;
+ }
+ prod[i].u64[LO_IDX] ^= bshift.u64[LO_IDX];
+ prod[i].u64[HI_IDX] ^= bshift.u64[HI_IDX];
+ }
+ }
+ }
+
+ r->u64[LO_IDX] = prod[0].u64[LO_IDX] ^ prod[1].u64[LO_IDX];
+ r->u64[HI_IDX] = prod[0].u64[HI_IDX] ^ prod[1].u64[HI_IDX];
+#endif
+}
+
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define PKBIG 1
+#else
+#define PKBIG 0
+#endif
+void helper_vpkpx(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ ppc_avr_t result;
+#if defined(HOST_WORDS_BIGENDIAN)
+ const ppc_avr_t *x[2] = { a, b };
+#else
+ const ppc_avr_t *x[2] = { b, a };
+#endif
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ VECTOR_FOR_INORDER_I(j, u32) {
+ uint32_t e = x[i]->u32[j];
+
+ result.u16[4*i+j] = (((e >> 9) & 0xfc00) |
+ ((e >> 6) & 0x3e0) |
+ ((e >> 3) & 0x1f));
+ }
+ }
+ *r = result;
+}
+
+#define VPK(suffix, from, to, cvt, dosat) \
+ void helper_vpk##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ int sat = 0; \
+ ppc_avr_t result; \
+ ppc_avr_t *a0 = PKBIG ? a : b; \
+ ppc_avr_t *a1 = PKBIG ? b : a; \
+ \
+ VECTOR_FOR_INORDER_I(i, from) { \
+ result.to[i] = cvt(a0->from[i], &sat); \
+ result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \
+ } \
+ *r = result; \
+ if (dosat && sat) { \
+ env->vscr |= (1 << VSCR_SAT); \
+ } \
+ }
+#define I(x, y) (x)
+VPK(shss, s16, s8, cvtshsb, 1)
+VPK(shus, s16, u8, cvtshub, 1)
+VPK(swss, s32, s16, cvtswsh, 1)
+VPK(swus, s32, u16, cvtswuh, 1)
+VPK(sdss, s64, s32, cvtsdsw, 1)
+VPK(sdus, s64, u32, cvtsduw, 1)
+VPK(uhus, u16, u8, cvtuhub, 1)
+VPK(uwus, u32, u16, cvtuwuh, 1)
+VPK(udus, u64, u32, cvtuduw, 1)
+VPK(uhum, u16, u8, I, 0)
+VPK(uwum, u32, u16, I, 0)
+VPK(udum, u64, u32, I, 0)
+#undef I
+#undef VPK
+#undef PKBIG
+
+void helper_vrefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ r->f[i] = float32_div(float32_one, b->f[i], &env->vec_status);
+ }
+}
+
+#define VRFI(suffix, rounding) \
+ void helper_vrfi##suffix(CPUPPCState *env, ppc_avr_t *r, \
+ ppc_avr_t *b) \
+ { \
+ int i; \
+ float_status s = env->vec_status; \
+ \
+ set_float_rounding_mode(rounding, &s); \
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) { \
+ r->f[i] = float32_round_to_int (b->f[i], &s); \
+ } \
+ }
+VRFI(n, float_round_nearest_even)
+VRFI(m, float_round_down)
+VRFI(p, float_round_up)
+VRFI(z, float_round_to_zero)
+#undef VRFI
+
+#define VROTATE(suffix, element, mask) \
+ void helper_vrl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ unsigned int shift = b->element[i] & mask; \
+ r->element[i] = (a->element[i] << shift) | \
+ (a->element[i] >> (sizeof(a->element[0]) * 8 - shift)); \
+ } \
+ }
+VROTATE(b, u8, 0x7)
+VROTATE(h, u16, 0xF)
+VROTATE(w, u32, 0x1F)
+VROTATE(d, u64, 0x3F)
+#undef VROTATE
+
+void helper_vrsqrtefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ float32 t = float32_sqrt(b->f[i], &env->vec_status);
+
+ r->f[i] = float32_div(float32_one, t, &env->vec_status);
+ }
+}
+
+#define VRLMI(name, size, element, insert) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+{ \
+ int i; \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ uint##size##_t src1 = a->element[i]; \
+ uint##size##_t src2 = b->element[i]; \
+ uint##size##_t src3 = r->element[i]; \
+ uint##size##_t begin, end, shift, mask, rot_val; \
+ \
+ shift = extract##size(src2, 0, 6); \
+ end = extract##size(src2, 8, 6); \
+ begin = extract##size(src2, 16, 6); \
+ rot_val = rol##size(src1, shift); \
+ mask = mask_u##size(begin, end); \
+ if (insert) { \
+ r->element[i] = (rot_val & mask) | (src3 & ~mask); \
+ } else { \
+ r->element[i] = (rot_val & mask); \
+ } \
+ } \
+}
+
+VRLMI(vrldmi, 64, u64, 1);
+VRLMI(vrlwmi, 32, u32, 1);
+VRLMI(vrldnm, 64, u64, 0);
+VRLMI(vrlwnm, 32, u32, 0);
+
+void helper_vsel(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b,
+ ppc_avr_t *c)
+{
+ r->u64[0] = (a->u64[0] & ~c->u64[0]) | (b->u64[0] & c->u64[0]);
+ r->u64[1] = (a->u64[1] & ~c->u64[1]) | (b->u64[1] & c->u64[1]);
+}
+
+void helper_vexptefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ r->f[i] = float32_exp2(b->f[i], &env->vec_status);
+ }
+}
+
+void helper_vlogefp(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->f); i++) {
+ r->f[i] = float32_log2(b->f[i], &env->vec_status);
+ }
+}
+
+/* The specification says that the results are undefined if all of the
+ * shift counts are not identical. We check to make sure that they are
+ * to conform to what real hardware appears to do. */
+#define VSHIFT(suffix, leftp) \
+ void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int shift = b->u8[LO_IDX*15] & 0x7; \
+ int doit = 1; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) { \
+ doit = doit && ((b->u8[i] & 0x7) == shift); \
+ } \
+ if (doit) { \
+ if (shift == 0) { \
+ *r = *a; \
+ } else if (leftp) { \
+ uint64_t carry = a->u64[LO_IDX] >> (64 - shift); \
+ \
+ r->u64[HI_IDX] = (a->u64[HI_IDX] << shift) | carry; \
+ r->u64[LO_IDX] = a->u64[LO_IDX] << shift; \
+ } else { \
+ uint64_t carry = a->u64[HI_IDX] << (64 - shift); \
+ \
+ r->u64[LO_IDX] = (a->u64[LO_IDX] >> shift) | carry; \
+ r->u64[HI_IDX] = a->u64[HI_IDX] >> shift; \
+ } \
+ } \
+ }
+VSHIFT(l, 1)
+VSHIFT(r, 0)
+#undef VSHIFT
+
+#define VSL(suffix, element, mask) \
+ void helper_vsl##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ unsigned int shift = b->element[i] & mask; \
+ \
+ r->element[i] = a->element[i] << shift; \
+ } \
+ }
+VSL(b, u8, 0x7)
+VSL(h, u16, 0x0F)
+VSL(w, u32, 0x1F)
+VSL(d, u64, 0x3F)
+#undef VSL
+
+void helper_vslv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ unsigned int shift, bytes, size;
+
+ size = ARRAY_SIZE(r->u8);
+ for (i = 0; i < size; i++) {
+ shift = b->u8[i] & 0x7; /* extract shift value */
+ bytes = (a->u8[i] << 8) + /* extract adjacent bytes */
+ (((i + 1) < size) ? a->u8[i + 1] : 0);
+ r->u8[i] = (bytes << shift) >> 8; /* shift and store result */
+ }
+}
+
+void helper_vsrv(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ unsigned int shift, bytes;
+
+ /* Use reverse order, as destination and source register can be same. Its
+ * being modified in place saving temporary, reverse order will guarantee
+ * that computed result is not fed back.
+ */
+ for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) {
+ shift = b->u8[i] & 0x7; /* extract shift value */
+ bytes = ((i ? a->u8[i - 1] : 0) << 8) + a->u8[i];
+ /* extract adjacent bytes */
+ r->u8[i] = (bytes >> shift) & 0xFF; /* shift and store result */
+ }
+}
+
+void helper_vsldoi(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t shift)
+{
+ int sh = shift & 0xf;
+ int i;
+ ppc_avr_t result;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int index = sh + i;
+ if (index > 0xf) {
+ result.u8[i] = b->u8[index - 0x10];
+ } else {
+ result.u8[i] = a->u8[index];
+ }
+ }
+#else
+ for (i = 0; i < ARRAY_SIZE(r->u8); i++) {
+ int index = (16 - sh) + i;
+ if (index > 0xf) {
+ result.u8[i] = a->u8[index - 0x10];
+ } else {
+ result.u8[i] = b->u8[index];
+ }
+ }
+#endif
+ *r = result;
+}
+
+void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sh = (b->u8[LO_IDX*0xf] >> 3) & 0xf;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ memmove(&r->u8[0], &a->u8[sh], 16 - sh);
+ memset(&r->u8[16-sh], 0, sh);
+#else
+ memmove(&r->u8[sh], &a->u8[0], 16 - sh);
+ memset(&r->u8[0], 0, sh);
+#endif
+}
+
+/* Experimental testing shows that hardware masks the immediate. */
+#define _SPLAT_MASKED(element) (splat & (ARRAY_SIZE(r->element) - 1))
+#if defined(HOST_WORDS_BIGENDIAN)
+#define SPLAT_ELEMENT(element) _SPLAT_MASKED(element)
+#else
+#define SPLAT_ELEMENT(element) \
+ (ARRAY_SIZE(r->element) - 1 - _SPLAT_MASKED(element))
+#endif
+#define VSPLT(suffix, element) \
+ void helper_vsplt##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t splat) \
+ { \
+ uint32_t s = b->element[SPLAT_ELEMENT(element)]; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = s; \
+ } \
+ }
+VSPLT(b, u8)
+VSPLT(h, u16)
+VSPLT(w, u32)
+#undef VSPLT
+#undef SPLAT_ELEMENT
+#undef _SPLAT_MASKED
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VINSERT(suffix, element) \
+ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ memmove(&r->u8[index], &b->u8[8 - sizeof(r->element)], \
+ sizeof(r->element[0])); \
+ }
+#else
+#define VINSERT(suffix, element) \
+ void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t d = (16 - index) - sizeof(r->element[0]); \
+ memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \
+ }
+#endif
+VINSERT(b, u8)
+VINSERT(h, u16)
+VINSERT(w, u32)
+VINSERT(d, u64)
+#undef VINSERT
+#if defined(HOST_WORDS_BIGENDIAN)
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ memmove(&r->u8[8 - es], &b->u8[index], es); \
+ memset(&r->u8[8], 0, 8); \
+ memset(&r->u8[0], 0, 8 - es); \
+ }
+#else
+#define VEXTRACT(suffix, element) \
+ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \
+ { \
+ uint32_t es = sizeof(r->element[0]); \
+ uint32_t s = (16 - index) - es; \
+ memmove(&r->u8[8], &b->u8[s], es); \
+ memset(&r->u8[0], 0, 8); \
+ memset(&r->u8[8 + es], 0, 8 - es); \
+ }
+#endif
+VEXTRACT(ub, u8)
+VEXTRACT(uh, u16)
+VEXTRACT(uw, u32)
+VEXTRACT(d, u64)
+#undef VEXTRACT
+
+#define VEXT_SIGNED(name, element, mask, cast, recast) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
+{ \
+ int i; \
+ VECTOR_FOR_INORDER_I(i, element) { \
+ r->element[i] = (recast)((cast)(b->element[i] & mask)); \
+ } \
+}
+VEXT_SIGNED(vextsb2w, s32, UINT8_MAX, int8_t, int32_t)
+VEXT_SIGNED(vextsb2d, s64, UINT8_MAX, int8_t, int64_t)
+VEXT_SIGNED(vextsh2w, s32, UINT16_MAX, int16_t, int32_t)
+VEXT_SIGNED(vextsh2d, s64, UINT16_MAX, int16_t, int64_t)
+VEXT_SIGNED(vextsw2d, s64, UINT32_MAX, int32_t, int64_t)
+#undef VEXT_SIGNED
+
+#define VNEG(name, element) \
+void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \
+{ \
+ int i; \
+ VECTOR_FOR_INORDER_I(i, element) { \
+ r->element[i] = -b->element[i]; \
+ } \
+}
+VNEG(vnegw, s32)
+VNEG(vnegd, s64)
+#undef VNEG
+
+#define VSPLTI(suffix, element, splat_type) \
+ void helper_vspltis##suffix(ppc_avr_t *r, uint32_t splat) \
+ { \
+ splat_type x = (int8_t)(splat << 3) >> 3; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ r->element[i] = x; \
+ } \
+ }
+VSPLTI(b, s8, int8_t)
+VSPLTI(h, s16, int16_t)
+VSPLTI(w, s32, int32_t)
+#undef VSPLTI
+
+#define VSR(suffix, element, mask) \
+ void helper_vsr##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->element); i++) { \
+ unsigned int shift = b->element[i] & mask; \
+ r->element[i] = a->element[i] >> shift; \
+ } \
+ }
+VSR(ab, s8, 0x7)
+VSR(ah, s16, 0xF)
+VSR(aw, s32, 0x1F)
+VSR(ad, s64, 0x3F)
+VSR(b, u8, 0x7)
+VSR(h, u16, 0xF)
+VSR(w, u32, 0x1F)
+VSR(d, u64, 0x3F)
+#undef VSR
+
+void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sh = (b->u8[LO_IDX * 0xf] >> 3) & 0xf;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ memmove(&r->u8[sh], &a->u8[0], 16 - sh);
+ memset(&r->u8[0], 0, sh);
+#else
+ memmove(&r->u8[0], &a->u8[sh], 16 - sh);
+ memset(&r->u8[16 - sh], 0, sh);
+#endif
+}
+
+void helper_vsubcuw(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ r->u32[i] = a->u32[i] >= b->u32[i];
+ }
+}
+
+void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int64_t t;
+ int i, upper;
+ ppc_avr_t result;
+ int sat = 0;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ upper = ARRAY_SIZE(r->s32)-1;
+#else
+ upper = 0;
+#endif
+ t = (int64_t)b->s32[upper];
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ t += a->s32[i];
+ result.s32[i] = 0;
+ }
+ result.s32[upper] = cvtsdsw(t, &sat);
+ *r = result;
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum2sws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j, upper;
+ ppc_avr_t result;
+ int sat = 0;
+
+#if defined(HOST_WORDS_BIGENDIAN)
+ upper = 1;
+#else
+ upper = 0;
+#endif
+ for (i = 0; i < ARRAY_SIZE(r->u64); i++) {
+ int64_t t = (int64_t)b->s32[upper + i * 2];
+
+ result.u64[i] = 0;
+ for (j = 0; j < ARRAY_SIZE(r->u64); j++) {
+ t += a->s32[2 * i + j];
+ }
+ result.s32[upper + i * 2] = cvtsdsw(t, &sat);
+ }
+
+ *r = result;
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum4sbs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ int64_t t = (int64_t)b->s32[i];
+
+ for (j = 0; j < ARRAY_SIZE(r->s32); j++) {
+ t += a->s8[4 * i + j];
+ }
+ r->s32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum4shs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int sat = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(r->s32); i++) {
+ int64_t t = (int64_t)b->s32[i];
+
+ t += a->s16[2 * i] + a->s16[2 * i + 1];
+ r->s32[i] = cvtsdsw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+void helper_vsum4ubs(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i, j;
+ int sat = 0;
+
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) {
+ uint64_t t = (uint64_t)b->u32[i];
+
+ for (j = 0; j < ARRAY_SIZE(r->u32); j++) {
+ t += a->u8[4 * i + j];
+ }
+ r->u32[i] = cvtuduw(t, &sat);
+ }
+
+ if (sat) {
+ env->vscr |= (1 << VSCR_SAT);
+ }
+}
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define UPKHI 1
+#define UPKLO 0
+#else
+#define UPKHI 0
+#define UPKLO 1
+#endif
+#define VUPKPX(suffix, hi) \
+ void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ ppc_avr_t result; \
+ \
+ for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \
+ uint16_t e = b->u16[hi ? i : i+4]; \
+ uint8_t a = (e >> 15) ? 0xff : 0; \
+ uint8_t r = (e >> 10) & 0x1f; \
+ uint8_t g = (e >> 5) & 0x1f; \
+ uint8_t b = e & 0x1f; \
+ \
+ result.u32[i] = (a << 24) | (r << 16) | (g << 8) | b; \
+ } \
+ *r = result; \
+ }
+VUPKPX(lpx, UPKLO)
+VUPKPX(hpx, UPKHI)
+#undef VUPKPX
+
+#define VUPK(suffix, unpacked, packee, hi) \
+ void helper_vupk##suffix(ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ ppc_avr_t result; \
+ \
+ if (hi) { \
+ for (i = 0; i < ARRAY_SIZE(r->unpacked); i++) { \
+ result.unpacked[i] = b->packee[i]; \
+ } \
+ } else { \
+ for (i = ARRAY_SIZE(r->unpacked); i < ARRAY_SIZE(r->packee); \
+ i++) { \
+ result.unpacked[i - ARRAY_SIZE(r->unpacked)] = b->packee[i]; \
+ } \
+ } \
+ *r = result; \
+ }
+VUPK(hsb, s16, s8, UPKHI)
+VUPK(hsh, s32, s16, UPKHI)
+VUPK(hsw, s64, s32, UPKHI)
+VUPK(lsb, s16, s8, UPKLO)
+VUPK(lsh, s32, s16, UPKLO)
+VUPK(lsw, s64, s32, UPKLO)
+#undef VUPK
+#undef UPKHI
+#undef UPKLO
+
+#define VGENERIC_DO(name, element) \
+ void helper_v##name(ppc_avr_t *r, ppc_avr_t *b) \
+ { \
+ int i; \
+ \
+ VECTOR_FOR_INORDER_I(i, element) { \
+ r->element[i] = name(b->element[i]); \
+ } \
+ }
+
+#define clzb(v) ((v) ? clz32((uint32_t)(v) << 24) : 8)
+#define clzh(v) ((v) ? clz32((uint32_t)(v) << 16) : 16)
+#define clzw(v) clz32((v))
+#define clzd(v) clz64((v))
+
+VGENERIC_DO(clzb, u8)
+VGENERIC_DO(clzh, u16)
+VGENERIC_DO(clzw, u32)
+VGENERIC_DO(clzd, u64)
+
+#undef clzb
+#undef clzh
+#undef clzw
+#undef clzd
+
+#define ctzb(v) ((v) ? ctz32(v) : 8)
+#define ctzh(v) ((v) ? ctz32(v) : 16)
+#define ctzw(v) ctz32((v))
+#define ctzd(v) ctz64((v))
+
+VGENERIC_DO(ctzb, u8)
+VGENERIC_DO(ctzh, u16)
+VGENERIC_DO(ctzw, u32)
+VGENERIC_DO(ctzd, u64)
+
+#undef ctzb
+#undef ctzh
+#undef ctzw
+#undef ctzd
+
+#define popcntb(v) ctpop8(v)
+#define popcnth(v) ctpop16(v)
+#define popcntw(v) ctpop32(v)
+#define popcntd(v) ctpop64(v)
+
+VGENERIC_DO(popcntb, u8)
+VGENERIC_DO(popcnth, u16)
+VGENERIC_DO(popcntw, u32)
+VGENERIC_DO(popcntd, u64)
+
+#undef popcntb
+#undef popcnth
+#undef popcntw
+#undef popcntd
+
+#undef VGENERIC_DO
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define QW_ONE { .u64 = { 0, 1 } }
+#else
+#define QW_ONE { .u64 = { 1, 0 } }
+#endif
+
+#ifndef CONFIG_INT128
+
+static inline void avr_qw_not(ppc_avr_t *t, ppc_avr_t a)
+{
+ t->u64[0] = ~a.u64[0];
+ t->u64[1] = ~a.u64[1];
+}
+
+static int avr_qw_cmpu(ppc_avr_t a, ppc_avr_t b)
+{
+ if (a.u64[HI_IDX] < b.u64[HI_IDX]) {
+ return -1;
+ } else if (a.u64[HI_IDX] > b.u64[HI_IDX]) {
+ return 1;
+ } else if (a.u64[LO_IDX] < b.u64[LO_IDX]) {
+ return -1;
+ } else if (a.u64[LO_IDX] > b.u64[LO_IDX]) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+static void avr_qw_add(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
+{
+ t->u64[LO_IDX] = a.u64[LO_IDX] + b.u64[LO_IDX];
+ t->u64[HI_IDX] = a.u64[HI_IDX] + b.u64[HI_IDX] +
+ (~a.u64[LO_IDX] < b.u64[LO_IDX]);
+}
+
+static int avr_qw_addc(ppc_avr_t *t, ppc_avr_t a, ppc_avr_t b)
+{
+ ppc_avr_t not_a;
+ t->u64[LO_IDX] = a.u64[LO_IDX] + b.u64[LO_IDX];
+ t->u64[HI_IDX] = a.u64[HI_IDX] + b.u64[HI_IDX] +
+ (~a.u64[LO_IDX] < b.u64[LO_IDX]);
+ avr_qw_not(&not_a, a);
+ return avr_qw_cmpu(not_a, b) < 0;
+}
+
+#endif
+
+void helper_vadduqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 + b->u128;
+#else
+ avr_qw_add(r, *a, *b);
+#endif
+}
+
+void helper_vaddeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 + b->u128 + (c->u128 & 1);
+#else
+
+ if (c->u64[LO_IDX] & 1) {
+ ppc_avr_t tmp;
+
+ tmp.u64[HI_IDX] = 0;
+ tmp.u64[LO_IDX] = c->u64[LO_IDX] & 1;
+ avr_qw_add(&tmp, *a, tmp);
+ avr_qw_add(r, tmp, *b);
+ } else {
+ avr_qw_add(r, *a, *b);
+ }
+#endif
+}
+
+void helper_vaddcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = (~a->u128 < b->u128);
+#else
+ ppc_avr_t not_a;
+
+ avr_qw_not(&not_a, *a);
+
+ r->u64[HI_IDX] = 0;
+ r->u64[LO_IDX] = (avr_qw_cmpu(not_a, *b) < 0);
+#endif
+}
+
+void helper_vaddecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ int carry_out = (~a->u128 < b->u128);
+ if (!carry_out && (c->u128 & 1)) {
+ carry_out = ((a->u128 + b->u128 + 1) == 0) &&
+ ((a->u128 != 0) || (b->u128 != 0));
+ }
+ r->u128 = carry_out;
+#else
+
+ int carry_in = c->u64[LO_IDX] & 1;
+ int carry_out = 0;
+ ppc_avr_t tmp;
+
+ carry_out = avr_qw_addc(&tmp, *a, *b);
+
+ if (!carry_out && carry_in) {
+ ppc_avr_t one = QW_ONE;
+ carry_out = avr_qw_addc(&tmp, tmp, one);
+ }
+ r->u64[HI_IDX] = 0;
+ r->u64[LO_IDX] = carry_out;
+#endif
+}
+
+void helper_vsubuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 - b->u128;
+#else
+ ppc_avr_t tmp;
+ ppc_avr_t one = QW_ONE;
+
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&tmp, *a, tmp);
+ avr_qw_add(r, tmp, one);
+#endif
+}
+
+void helper_vsubeuqm(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ r->u128 = a->u128 + ~b->u128 + (c->u128 & 1);
+#else
+ ppc_avr_t tmp, sum;
+
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&sum, *a, tmp);
+
+ tmp.u64[HI_IDX] = 0;
+ tmp.u64[LO_IDX] = c->u64[LO_IDX] & 1;
+ avr_qw_add(r, sum, tmp);
+#endif
+}
+
+void helper_vsubcuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+#ifdef CONFIG_INT128
+ r->u128 = (~a->u128 < ~b->u128) ||
+ (a->u128 + ~b->u128 == (__uint128_t)-1);
+#else
+ int carry = (avr_qw_cmpu(*a, *b) > 0);
+ if (!carry) {
+ ppc_avr_t tmp;
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&tmp, *a, tmp);
+ carry = ((tmp.s64[HI_IDX] == -1ull) && (tmp.s64[LO_IDX] == -1ull));
+ }
+ r->u64[HI_IDX] = 0;
+ r->u64[LO_IDX] = carry;
+#endif
+}
+
+void helper_vsubecuq(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+#ifdef CONFIG_INT128
+ r->u128 =
+ (~a->u128 < ~b->u128) ||
+ ((c->u128 & 1) && (a->u128 + ~b->u128 == (__uint128_t)-1));
+#else
+ int carry_in = c->u64[LO_IDX] & 1;
+ int carry_out = (avr_qw_cmpu(*a, *b) > 0);
+ if (!carry_out && carry_in) {
+ ppc_avr_t tmp;
+ avr_qw_not(&tmp, *b);
+ avr_qw_add(&tmp, *a, tmp);
+ carry_out = ((tmp.u64[HI_IDX] == -1ull) && (tmp.u64[LO_IDX] == -1ull));
+ }
+
+ r->u64[HI_IDX] = 0;
+ r->u64[LO_IDX] = carry_out;
+#endif
+}
+
+#define BCD_PLUS_PREF_1 0xC
+#define BCD_PLUS_PREF_2 0xF
+#define BCD_PLUS_ALT_1 0xA
+#define BCD_NEG_PREF 0xD
+#define BCD_NEG_ALT 0xB
+#define BCD_PLUS_ALT_2 0xE
+#define NATIONAL_PLUS 0x2B
+#define NATIONAL_NEG 0x2D
+
+#if defined(HOST_WORDS_BIGENDIAN)
+#define BCD_DIG_BYTE(n) (15 - (n/2))
+#else
+#define BCD_DIG_BYTE(n) (n/2)
+#endif
+
+static int bcd_get_sgn(ppc_avr_t *bcd)
+{
+ switch (bcd->u8[BCD_DIG_BYTE(0)] & 0xF) {
+ case BCD_PLUS_PREF_1:
+ case BCD_PLUS_PREF_2:
+ case BCD_PLUS_ALT_1:
+ case BCD_PLUS_ALT_2:
+ {
+ return 1;
+ }
+
+ case BCD_NEG_PREF:
+ case BCD_NEG_ALT:
+ {
+ return -1;
+ }
+
+ default:
+ {
+ return 0;
+ }
+ }
+}
+
+static int bcd_preferred_sgn(int sgn, int ps)
+{
+ if (sgn >= 0) {
+ return (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2;
+ } else {
+ return BCD_NEG_PREF;
+ }
+}
+
+static uint8_t bcd_get_digit(ppc_avr_t *bcd, int n, int *invalid)
+{
+ uint8_t result;
+ if (n & 1) {
+ result = bcd->u8[BCD_DIG_BYTE(n)] >> 4;
+ } else {
+ result = bcd->u8[BCD_DIG_BYTE(n)] & 0xF;
+ }
+
+ if (unlikely(result > 9)) {
+ *invalid = true;
+ }
+ return result;
+}
+
+static void bcd_put_digit(ppc_avr_t *bcd, uint8_t digit, int n)
+{
+ if (n & 1) {
+ bcd->u8[BCD_DIG_BYTE(n)] &= 0x0F;
+ bcd->u8[BCD_DIG_BYTE(n)] |= (digit<<4);
+ } else {
+ bcd->u8[BCD_DIG_BYTE(n)] &= 0xF0;
+ bcd->u8[BCD_DIG_BYTE(n)] |= digit;
+ }
+}
+
+static int bcd_cmp_zero(ppc_avr_t *bcd)
+{
+ if (bcd->u64[HI_IDX] == 0 && (bcd->u64[LO_IDX] >> 4) == 0) {
+ return 1 << CRF_EQ;
+ } else {
+ return (bcd_get_sgn(bcd) == 1) ? 1 << CRF_GT : 1 << CRF_LT;
+ }
+}
+
+static uint16_t get_national_digit(ppc_avr_t *reg, int n)
+{
+#if defined(HOST_WORDS_BIGENDIAN)
+ return reg->u16[7 - n];
+#else
+ return reg->u16[n];
+#endif
+}
+
+static void set_national_digit(ppc_avr_t *reg, uint8_t val, int n)
+{
+#if defined(HOST_WORDS_BIGENDIAN)
+ reg->u16[7 - n] = val;
+#else
+ reg->u16[n] = val;
+#endif
+}
+
+static int bcd_cmp_mag(ppc_avr_t *a, ppc_avr_t *b)
+{
+ int i;
+ int invalid = 0;
+ for (i = 31; i > 0; i--) {
+ uint8_t dig_a = bcd_get_digit(a, i, &invalid);
+ uint8_t dig_b = bcd_get_digit(b, i, &invalid);
+ if (unlikely(invalid)) {
+ return 0; /* doesn't matter */
+ } else if (dig_a > dig_b) {
+ return 1;
+ } else if (dig_a < dig_b) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int bcd_add_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
+ int *overflow)
+{
+ int carry = 0;
+ int i;
+ int is_zero = 1;
+ for (i = 1; i <= 31; i++) {
+ uint8_t digit = bcd_get_digit(a, i, invalid) +
+ bcd_get_digit(b, i, invalid) + carry;
+ is_zero &= (digit == 0);
+ if (digit > 9) {
+ carry = 1;
+ digit -= 10;
+ } else {
+ carry = 0;
+ }
+
+ bcd_put_digit(t, digit, i);
+
+ if (unlikely(*invalid)) {
+ return -1;
+ }
+ }
+
+ *overflow = carry;
+ return is_zero;
+}
+
+static int bcd_sub_mag(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, int *invalid,
+ int *overflow)
+{
+ int carry = 0;
+ int i;
+ int is_zero = 1;
+ for (i = 1; i <= 31; i++) {
+ uint8_t digit = bcd_get_digit(a, i, invalid) -
+ bcd_get_digit(b, i, invalid) + carry;
+ is_zero &= (digit == 0);
+ if (digit & 0x80) {
+ carry = -1;
+ digit += 10;
+ } else {
+ carry = 0;
+ }
+
+ bcd_put_digit(t, digit, i);
+
+ if (unlikely(*invalid)) {
+ return -1;
+ }
+ }
+
+ *overflow = carry;
+ return is_zero;
+}
+
+uint32_t helper_bcdadd(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+
+ int sgna = bcd_get_sgn(a);
+ int sgnb = bcd_get_sgn(b);
+ int invalid = (sgna == 0) || (sgnb == 0);
+ int overflow = 0;
+ int zero = 0;
+ uint32_t cr = 0;
+ ppc_avr_t result = { .u64 = { 0, 0 } };
+
+ if (!invalid) {
+ if (sgna == sgnb) {
+ result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(sgna, ps);
+ zero = bcd_add_mag(&result, a, b, &invalid, &overflow);
+ cr = (sgna > 0) ? 1 << CRF_GT : 1 << CRF_LT;
+ } else if (bcd_cmp_mag(a, b) > 0) {
+ result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(sgna, ps);
+ zero = bcd_sub_mag(&result, a, b, &invalid, &overflow);
+ cr = (sgna > 0) ? 1 << CRF_GT : 1 << CRF_LT;
+ } else {
+ result.u8[BCD_DIG_BYTE(0)] = bcd_preferred_sgn(sgnb, ps);
+ zero = bcd_sub_mag(&result, b, a, &invalid, &overflow);
+ cr = (sgnb > 0) ? 1 << CRF_GT : 1 << CRF_LT;
+ }
+ }
+
+ if (unlikely(invalid)) {
+ result.u64[HI_IDX] = result.u64[LO_IDX] = -1;
+ cr = 1 << CRF_SO;
+ } else if (overflow) {
+ cr |= 1 << CRF_SO;
+ } else if (zero) {
+ cr = 1 << CRF_EQ;
+ }
+
+ *r = result;
+
+ return cr;
+}
+
+uint32_t helper_bcdsub(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, uint32_t ps)
+{
+ ppc_avr_t bcopy = *b;
+ int sgnb = bcd_get_sgn(b);
+ if (sgnb < 0) {
+ bcd_put_digit(&bcopy, BCD_PLUS_PREF_1, 0);
+ } else if (sgnb > 0) {
+ bcd_put_digit(&bcopy, BCD_NEG_PREF, 0);
+ }
+ /* else invalid ... defer to bcdadd code for proper handling */
+
+ return helper_bcdadd(r, a, &bcopy, ps);
+}
+
+uint32_t helper_bcdcfn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ uint16_t national = 0;
+ uint16_t sgnb = get_national_digit(b, 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+ int invalid = (sgnb != NATIONAL_PLUS && sgnb != NATIONAL_NEG);
+
+ for (i = 1; i < 8; i++) {
+ national = get_national_digit(b, i);
+ if (unlikely(national < 0x30 || national > 0x39)) {
+ invalid = 1;
+ break;
+ }
+
+ bcd_put_digit(&ret, national & 0xf, i);
+ }
+
+ if (sgnb == NATIONAL_PLUS) {
+ bcd_put_digit(&ret, (ps == 0) ? BCD_PLUS_PREF_1 : BCD_PLUS_PREF_2, 0);
+ } else {
+ bcd_put_digit(&ret, BCD_NEG_PREF, 0);
+ }
+
+ cr = bcd_cmp_zero(&ret);
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdctn(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ int sgnb = bcd_get_sgn(b);
+ int invalid = (sgnb == 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ int ox_flag = (b->u64[HI_IDX] != 0) || ((b->u64[LO_IDX] >> 32) != 0);
+
+ for (i = 1; i < 8; i++) {
+ set_national_digit(&ret, 0x30 + bcd_get_digit(b, i, &invalid), i);
+
+ if (unlikely(invalid)) {
+ break;
+ }
+ }
+ set_national_digit(&ret, (sgnb == -1) ? NATIONAL_NEG : NATIONAL_PLUS, 0);
+
+ cr = bcd_cmp_zero(b);
+
+ if (ox_flag) {
+ cr |= 1 << CRF_SO;
+ }
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdcfz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ int invalid = 0;
+ int zone_digit = 0;
+ int zone_lead = ps ? 0xF : 0x3;
+ int digit = 0;
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+ int sgnb = b->u8[BCD_DIG_BYTE(0)] >> 4;
+
+ if (unlikely((sgnb < 0xA) && ps)) {
+ invalid = 1;
+ }
+
+ for (i = 0; i < 16; i++) {
+ zone_digit = (i * 2) ? b->u8[BCD_DIG_BYTE(i * 2)] >> 4 : zone_lead;
+ digit = b->u8[BCD_DIG_BYTE(i * 2)] & 0xF;
+ if (unlikely(zone_digit != zone_lead || digit > 0x9)) {
+ invalid = 1;
+ break;
+ }
+
+ bcd_put_digit(&ret, digit, i + 1);
+ }
+
+ if ((ps && (sgnb == 0xB || sgnb == 0xD)) ||
+ (!ps && (sgnb & 0x4))) {
+ bcd_put_digit(&ret, BCD_NEG_PREF, 0);
+ } else {
+ bcd_put_digit(&ret, BCD_PLUS_PREF_1, 0);
+ }
+
+ cr = bcd_cmp_zero(&ret);
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps)
+{
+ int i;
+ int cr = 0;
+ uint8_t digit = 0;
+ int sgnb = bcd_get_sgn(b);
+ int zone_lead = (ps) ? 0xF0 : 0x30;
+ int invalid = (sgnb == 0);
+ ppc_avr_t ret = { .u64 = { 0, 0 } };
+
+ int ox_flag = ((b->u64[HI_IDX] >> 4) != 0);
+
+ for (i = 0; i < 16; i++) {
+ digit = bcd_get_digit(b, i + 1, &invalid);
+
+ if (unlikely(invalid)) {
+ break;
+ }
+
+ ret.u8[BCD_DIG_BYTE(i * 2)] = zone_lead + digit;
+ }
+
+ if (ps) {
+ bcd_put_digit(&ret, (sgnb == 1) ? 0xC : 0xD, 1);
+ } else {
+ bcd_put_digit(&ret, (sgnb == 1) ? 0x3 : 0x7, 1);
+ }
+
+ cr = bcd_cmp_zero(b);
+
+ if (ox_flag) {
+ cr |= 1 << CRF_SO;
+ }
+
+ if (unlikely(invalid)) {
+ cr = 1 << CRF_SO;
+ }
+
+ *r = ret;
+
+ return cr;
+}
+
+void helper_vsbox(ppc_avr_t *r, ppc_avr_t *a)
+{
+ int i;
+ VECTOR_FOR_INORDER_I(i, u8) {
+ r->u8[i] = AES_sbox[a->u8[i]];
+ }
+}
+
+void helper_vcipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ result.AVRW(i) = b->AVRW(i) ^
+ (AES_Te0[a->AVRB(AES_shifts[4*i + 0])] ^
+ AES_Te1[a->AVRB(AES_shifts[4*i + 1])] ^
+ AES_Te2[a->AVRB(AES_shifts[4*i + 2])] ^
+ AES_Te3[a->AVRB(AES_shifts[4*i + 3])]);
+ }
+ *r = result;
+}
+
+void helper_vcipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ result.AVRB(i) = b->AVRB(i) ^ (AES_sbox[a->AVRB(AES_shifts[i])]);
+ }
+ *r = result;
+}
+
+void helper_vncipher(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ /* This differs from what is written in ISA V2.07. The RTL is */
+ /* incorrect and will be fixed in V2.07B. */
+ int i;
+ ppc_avr_t tmp;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ tmp.AVRB(i) = b->AVRB(i) ^ AES_isbox[a->AVRB(AES_ishifts[i])];
+ }
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ r->AVRW(i) =
+ AES_imc[tmp.AVRB(4*i + 0)][0] ^
+ AES_imc[tmp.AVRB(4*i + 1)][1] ^
+ AES_imc[tmp.AVRB(4*i + 2)][2] ^
+ AES_imc[tmp.AVRB(4*i + 3)][3];
+ }
+}
+
+void helper_vncipherlast(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ result.AVRB(i) = b->AVRB(i) ^ (AES_isbox[a->AVRB(AES_ishifts[i])]);
+ }
+ *r = result;
+}
+
+#define ROTRu32(v, n) (((v) >> (n)) | ((v) << (32-n)))
+#if defined(HOST_WORDS_BIGENDIAN)
+#define EL_IDX(i) (i)
+#else
+#define EL_IDX(i) (3 - (i))
+#endif
+
+void helper_vshasigmaw(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
+{
+ int st = (st_six & 0x10) != 0;
+ int six = st_six & 0xF;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u32) {
+ if (st == 0) {
+ if ((six & (0x8 >> i)) == 0) {
+ r->u32[EL_IDX(i)] = ROTRu32(a->u32[EL_IDX(i)], 7) ^
+ ROTRu32(a->u32[EL_IDX(i)], 18) ^
+ (a->u32[EL_IDX(i)] >> 3);
+ } else { /* six.bit[i] == 1 */
+ r->u32[EL_IDX(i)] = ROTRu32(a->u32[EL_IDX(i)], 17) ^
+ ROTRu32(a->u32[EL_IDX(i)], 19) ^
+ (a->u32[EL_IDX(i)] >> 10);
+ }
+ } else { /* st == 1 */
+ if ((six & (0x8 >> i)) == 0) {
+ r->u32[EL_IDX(i)] = ROTRu32(a->u32[EL_IDX(i)], 2) ^
+ ROTRu32(a->u32[EL_IDX(i)], 13) ^
+ ROTRu32(a->u32[EL_IDX(i)], 22);
+ } else { /* six.bit[i] == 1 */
+ r->u32[EL_IDX(i)] = ROTRu32(a->u32[EL_IDX(i)], 6) ^
+ ROTRu32(a->u32[EL_IDX(i)], 11) ^
+ ROTRu32(a->u32[EL_IDX(i)], 25);
+ }
+ }
+ }
+}
+
+#undef ROTRu32
+#undef EL_IDX
+
+#define ROTRu64(v, n) (((v) >> (n)) | ((v) << (64-n)))
+#if defined(HOST_WORDS_BIGENDIAN)
+#define EL_IDX(i) (i)
+#else
+#define EL_IDX(i) (1 - (i))
+#endif
+
+void helper_vshasigmad(ppc_avr_t *r, ppc_avr_t *a, uint32_t st_six)
+{
+ int st = (st_six & 0x10) != 0;
+ int six = st_six & 0xF;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u64) {
+ if (st == 0) {
+ if ((six & (0x8 >> (2*i))) == 0) {
+ r->u64[EL_IDX(i)] = ROTRu64(a->u64[EL_IDX(i)], 1) ^
+ ROTRu64(a->u64[EL_IDX(i)], 8) ^
+ (a->u64[EL_IDX(i)] >> 7);
+ } else { /* six.bit[2*i] == 1 */
+ r->u64[EL_IDX(i)] = ROTRu64(a->u64[EL_IDX(i)], 19) ^
+ ROTRu64(a->u64[EL_IDX(i)], 61) ^
+ (a->u64[EL_IDX(i)] >> 6);
+ }
+ } else { /* st == 1 */
+ if ((six & (0x8 >> (2*i))) == 0) {
+ r->u64[EL_IDX(i)] = ROTRu64(a->u64[EL_IDX(i)], 28) ^
+ ROTRu64(a->u64[EL_IDX(i)], 34) ^
+ ROTRu64(a->u64[EL_IDX(i)], 39);
+ } else { /* six.bit[2*i] == 1 */
+ r->u64[EL_IDX(i)] = ROTRu64(a->u64[EL_IDX(i)], 14) ^
+ ROTRu64(a->u64[EL_IDX(i)], 18) ^
+ ROTRu64(a->u64[EL_IDX(i)], 41);
+ }
+ }
+ }
+}
+
+#undef ROTRu64
+#undef EL_IDX
+
+void helper_vpermxor(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b, ppc_avr_t *c)
+{
+ ppc_avr_t result;
+ int i;
+
+ VECTOR_FOR_INORDER_I(i, u8) {
+ int indexA = c->u8[i] >> 4;
+ int indexB = c->u8[i] & 0xF;
+#if defined(HOST_WORDS_BIGENDIAN)
+ result.u8[i] = a->u8[indexA] ^ b->u8[indexB];
+#else
+ result.u8[i] = a->u8[15-indexA] ^ b->u8[15-indexB];
+#endif
+ }
+ *r = result;
+}
+
+#undef VECTOR_FOR_INORDER_I
+#undef HI_IDX
+#undef LO_IDX
+
+/*****************************************************************************/
+/* SPE extension helpers */
+/* Use a table to make this quicker */
+static const uint8_t hbrev[16] = {
+ 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
+ 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
+};
+
+static inline uint8_t byte_reverse(uint8_t val)
+{
+ return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
+}
+
+static inline uint32_t word_reverse(uint32_t val)
+{
+ return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
+ (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
+}
+
+#define MASKBITS 16 /* Random value - to be fixed (implementation dependent) */
+target_ulong helper_brinc(target_ulong arg1, target_ulong arg2)
+{
+ uint32_t a, b, d, mask;
+
+ mask = UINT32_MAX >> (32 - MASKBITS);
+ a = arg1 & mask;
+ b = arg2 & mask;
+ d = word_reverse(1 + word_reverse(a | ~b));
+ return (arg1 & ~mask) | (d & b);
+}
+
+uint32_t helper_cntlsw32(uint32_t val)
+{
+ if (val & 0x80000000) {
+ return clz32(~val);
+ } else {
+ return clz32(val);
+ }
+}
+
+uint32_t helper_cntlzw32(uint32_t val)
+{
+ return clz32(val);
+}
+
+/* 440 specific */
+target_ulong helper_dlmzb(CPUPPCState *env, target_ulong high,
+ target_ulong low, uint32_t update_Rc)
+{
+ target_ulong mask;
+ int i;
+
+ i = 1;
+ for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
+ if ((high & mask) == 0) {
+ if (update_Rc) {
+ env->crf[0] = 0x4;
+ }
+ goto done;
+ }
+ i++;
+ }
+ for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
+ if ((low & mask) == 0) {
+ if (update_Rc) {
+ env->crf[0] = 0x8;
+ }
+ goto done;
+ }
+ i++;
+ }
+ i = 8;
+ if (update_Rc) {
+ env->crf[0] = 0x2;
+ }
+ done:
+ env->xer = (env->xer & ~0x7F) | i;
+ if (update_Rc) {
+ env->crf[0] |= xer_so;
+ }
+ return i;
+}
diff --git a/target/ppc/internal.h b/target/ppc/internal.h
new file mode 100644
index 0000000000..1ff4896c45
--- /dev/null
+++ b/target/ppc/internal.h
@@ -0,0 +1,50 @@
+/*
+ * PowerPC interal definitions for qemu.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef PPC_INTERNAL_H
+#define PPC_INTERNAL_H
+
+#define FUNC_MASK(name, ret_type, size, max_val) \
+static inline ret_type name(uint##size##_t start, \
+ uint##size##_t end) \
+{ \
+ ret_type ret, max_bit = size - 1; \
+ \
+ if (likely(start == 0)) { \
+ ret = max_val << (max_bit - end); \
+ } else if (likely(end == max_bit)) { \
+ ret = max_val >> start; \
+ } else { \
+ ret = (((uint##size##_t)(-1ULL)) >> (start)) ^ \
+ (((uint##size##_t)(-1ULL) >> (end)) >> 1); \
+ if (unlikely(start > end)) { \
+ return ~ret; \
+ } \
+ } \
+ \
+ return ret; \
+}
+
+#if defined(TARGET_PPC64)
+FUNC_MASK(MASK, target_ulong, 64, UINT64_MAX);
+#else
+FUNC_MASK(MASK, target_ulong, 32, UINT32_MAX);
+#endif
+FUNC_MASK(mask_u32, uint32_t, 32, UINT32_MAX);
+FUNC_MASK(mask_u64, uint64_t, 64, UINT64_MAX);
+
+#endif /* PPC_INTERNAL_H */
diff --git a/target/ppc/kvm-stub.c b/target/ppc/kvm-stub.c
new file mode 100644
index 0000000000..efeafca1df
--- /dev/null
+++ b/target/ppc/kvm-stub.c
@@ -0,0 +1,20 @@
+/*
+ * QEMU KVM PPC specific function stubs
+ *
+ * Copyright Freescale Inc. 2013
+ *
+ * Author: Alexander Graf <agraf@suse.de>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "hw/ppc/openpic.h"
+
+int kvm_openpic_connect_vcpu(DeviceState *d, CPUState *cs)
+{
+ return -EINVAL;
+}
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
new file mode 100644
index 0000000000..9c4834c4fc
--- /dev/null
+++ b/target/ppc/kvm.c
@@ -0,0 +1,2674 @@
+/*
+ * PowerPC implementation of KVM hooks
+ *
+ * Copyright IBM Corp. 2007
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * Authors:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ * Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <dirent.h>
+#include <sys/ioctl.h>
+#include <sys/vfs.h>
+
+#include <linux/kvm.h>
+
+#include "qemu-common.h"
+#include "qemu/error-report.h"
+#include "cpu.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "sysemu/numa.h"
+#include "kvm_ppc.h"
+#include "sysemu/cpus.h"
+#include "sysemu/device_tree.h"
+#include "mmu-hash64.h"
+
+#include "hw/sysbus.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/spapr_cpu_core.h"
+#include "hw/ppc/ppc.h"
+#include "sysemu/watchdog.h"
+#include "trace.h"
+#include "exec/gdbstub.h"
+#include "exec/memattrs.h"
+#include "sysemu/hostmem.h"
+#include "qemu/cutils.h"
+#if defined(TARGET_PPC64)
+#include "hw/ppc/spapr_cpu_core.h"
+#endif
+
+//#define DEBUG_KVM
+
+#ifdef DEBUG_KVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static int cap_interrupt_unset = false;
+static int cap_interrupt_level = false;
+static int cap_segstate;
+static int cap_booke_sregs;
+static int cap_ppc_smt;
+static int cap_ppc_rma;
+static int cap_spapr_tce;
+static int cap_spapr_multitce;
+static int cap_spapr_vfio;
+static int cap_hior;
+static int cap_one_reg;
+static int cap_epr;
+static int cap_ppc_watchdog;
+static int cap_papr;
+static int cap_htab_fd;
+static int cap_fixup_hcalls;
+static int cap_htm; /* Hardware transactional memory support */
+
+static uint32_t debug_inst_opcode;
+
+/* XXX We have a race condition where we actually have a level triggered
+ * interrupt, but the infrastructure can't expose that yet, so the guest
+ * takes but ignores it, goes to sleep and never gets notified that there's
+ * still an interrupt pending.
+ *
+ * As a quick workaround, let's just wake up again 20 ms after we injected
+ * an interrupt. That way we can assure that we're always reinjecting
+ * interrupts in case the guest swallowed them.
+ */
+static QEMUTimer *idle_timer;
+
+static void kvm_kick_cpu(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ qemu_cpu_kick(CPU(cpu));
+}
+
+/* Check whether we are running with KVM-PR (instead of KVM-HV). This
+ * should only be used for fallback tests - generally we should use
+ * explicit capabilities for the features we want, rather than
+ * assuming what is/isn't available depending on the KVM variant. */
+static bool kvmppc_is_pr(KVMState *ks)
+{
+ /* Assume KVM-PR if the GET_PVINFO capability is available */
+ return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
+}
+
+static int kvm_ppc_register_host_cpu_type(void);
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
+ cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
+ cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
+ cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
+ cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
+ cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
+ cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
+ cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
+ cap_spapr_vfio = false;
+ cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
+ cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
+ cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
+ cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
+ /* Note: we don't set cap_papr here, because this capability is
+ * only activated after this by kvmppc_set_papr() */
+ cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
+ cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
+ cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
+
+ if (!cap_interrupt_level) {
+ fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
+ "VM to stall at times!\n");
+ }
+
+ kvm_ppc_register_host_cpu_type();
+
+ return 0;
+}
+
+static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *cenv = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_sregs sregs;
+ int ret;
+
+ if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
+ /* What we're really trying to say is "if we're on BookE, we use
+ the native PVR for now". This is the only sane way to check
+ it though, so we potentially confuse users that they can run
+ BookE guests on BookS. Let's hope nobody dares enough :) */
+ return 0;
+ } else {
+ if (!cap_segstate) {
+ fprintf(stderr, "kvm error: missing PVR setting capability\n");
+ return -ENOSYS;
+ }
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ if (ret) {
+ return ret;
+ }
+
+ sregs.pvr = cenv->spr[SPR_PVR];
+ return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+}
+
+/* Set up a shared TLB array with KVM */
+static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_book3e_206_tlb_params params = {};
+ struct kvm_config_tlb cfg = {};
+ unsigned int entries = 0;
+ int ret, i;
+
+ if (!kvm_enabled() ||
+ !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
+ return 0;
+ }
+
+ assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ params.tlb_sizes[i] = booke206_tlb_size(env, i);
+ params.tlb_ways[i] = booke206_tlb_ways(env, i);
+ entries += params.tlb_sizes[i];
+ }
+
+ assert(entries == env->nb_tlb);
+ assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
+
+ env->tlb_dirty = true;
+
+ cfg.array = (uintptr_t)env->tlb.tlbm;
+ cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
+ cfg.params = (uintptr_t)&params;
+ cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
+ if (ret < 0) {
+ fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ env->kvm_sw_tlb = true;
+ return 0;
+}
+
+
+#if defined(TARGET_PPC64)
+static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
+ struct kvm_ppc_smmu_info *info)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ memset(info, 0, sizeof(*info));
+
+ /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
+ * need to "guess" what the supported page sizes are.
+ *
+ * For that to work we make a few assumptions:
+ *
+ * - Check whether we are running "PR" KVM which only supports 4K
+ * and 16M pages, but supports them regardless of the backing
+ * store characteritics. We also don't support 1T segments.
+ *
+ * This is safe as if HV KVM ever supports that capability or PR
+ * KVM grows supports for more page/segment sizes, those versions
+ * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
+ * will not hit this fallback
+ *
+ * - Else we are running HV KVM. This means we only support page
+ * sizes that fit in the backing store. Additionally we only
+ * advertize 64K pages if the processor is ARCH 2.06 and we assume
+ * P7 encodings for the SLB and hash table. Here too, we assume
+ * support for any newer processor will mean a kernel that
+ * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
+ * this fallback.
+ */
+ if (kvmppc_is_pr(cs->kvm_state)) {
+ /* No flags */
+ info->flags = 0;
+ info->slb_size = 64;
+
+ /* Standard 4k base page size segment */
+ info->sps[0].page_shift = 12;
+ info->sps[0].slb_enc = 0;
+ info->sps[0].enc[0].page_shift = 12;
+ info->sps[0].enc[0].pte_enc = 0;
+
+ /* Standard 16M large page size segment */
+ info->sps[1].page_shift = 24;
+ info->sps[1].slb_enc = SLB_VSID_L;
+ info->sps[1].enc[0].page_shift = 24;
+ info->sps[1].enc[0].pte_enc = 0;
+ } else {
+ int i = 0;
+
+ /* HV KVM has backing store size restrictions */
+ info->flags = KVM_PPC_PAGE_SIZES_REAL;
+
+ if (env->mmu_model & POWERPC_MMU_1TSEG) {
+ info->flags |= KVM_PPC_1T_SEGMENTS;
+ }
+
+ if (env->mmu_model == POWERPC_MMU_2_06 ||
+ env->mmu_model == POWERPC_MMU_2_07) {
+ info->slb_size = 32;
+ } else {
+ info->slb_size = 64;
+ }
+
+ /* Standard 4k base page size segment */
+ info->sps[i].page_shift = 12;
+ info->sps[i].slb_enc = 0;
+ info->sps[i].enc[0].page_shift = 12;
+ info->sps[i].enc[0].pte_enc = 0;
+ i++;
+
+ /* 64K on MMU 2.06 and later */
+ if (env->mmu_model == POWERPC_MMU_2_06 ||
+ env->mmu_model == POWERPC_MMU_2_07) {
+ info->sps[i].page_shift = 16;
+ info->sps[i].slb_enc = 0x110;
+ info->sps[i].enc[0].page_shift = 16;
+ info->sps[i].enc[0].pte_enc = 1;
+ i++;
+ }
+
+ /* Standard 16M large page size segment */
+ info->sps[i].page_shift = 24;
+ info->sps[i].slb_enc = SLB_VSID_L;
+ info->sps[i].enc[0].page_shift = 24;
+ info->sps[i].enc[0].pte_enc = 0;
+ }
+}
+
+static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
+ ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
+ if (ret == 0) {
+ return;
+ }
+ }
+
+ kvm_get_fallback_smmu_info(cpu, info);
+}
+
+static long gethugepagesize(const char *mem_path)
+{
+ struct statfs fs;
+ int ret;
+
+ do {
+ ret = statfs(mem_path, &fs);
+ } while (ret != 0 && errno == EINTR);
+
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't statfs() memory path: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+
+#define HUGETLBFS_MAGIC 0x958458f6
+
+ if (fs.f_type != HUGETLBFS_MAGIC) {
+ /* Explicit mempath, but it's ordinary pages */
+ return getpagesize();
+ }
+
+ /* It's hugepage, return the huge page size */
+ return fs.f_bsize;
+}
+
+/*
+ * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
+ * may or may not name the same files / on the same filesystem now as
+ * when we actually open and map them. Iterate over the file
+ * descriptors instead, and use qemu_fd_getpagesize().
+ */
+static int find_max_supported_pagesize(Object *obj, void *opaque)
+{
+ char *mem_path;
+ long *hpsize_min = opaque;
+
+ if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
+ mem_path = object_property_get_str(obj, "mem-path", NULL);
+ if (mem_path) {
+ long hpsize = gethugepagesize(mem_path);
+ if (hpsize < *hpsize_min) {
+ *hpsize_min = hpsize;
+ }
+ } else {
+ *hpsize_min = getpagesize();
+ }
+ }
+
+ return 0;
+}
+
+static long getrampagesize(void)
+{
+ long hpsize = LONG_MAX;
+ long mainrampagesize;
+ Object *memdev_root;
+
+ if (mem_path) {
+ mainrampagesize = gethugepagesize(mem_path);
+ } else {
+ mainrampagesize = getpagesize();
+ }
+
+ /* it's possible we have memory-backend objects with
+ * hugepage-backed RAM. these may get mapped into system
+ * address space via -numa parameters or memory hotplug
+ * hooks. we want to take these into account, but we
+ * also want to make sure these supported hugepage
+ * sizes are applicable across the entire range of memory
+ * we may boot from, so we take the min across all
+ * backends, and assume normal pages in cases where a
+ * backend isn't backed by hugepages.
+ */
+ memdev_root = object_resolve_path("/objects", NULL);
+ if (memdev_root) {
+ object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
+ }
+ if (hpsize == LONG_MAX) {
+ /* No additional memory regions found ==> Report main RAM page size */
+ return mainrampagesize;
+ }
+
+ /* If NUMA is disabled or the NUMA nodes are not backed with a
+ * memory-backend, then there is at least one node using "normal" RAM,
+ * so if its page size is smaller we have got to report that size instead.
+ */
+ if (hpsize > mainrampagesize &&
+ (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
+ static bool warned;
+ if (!warned) {
+ error_report("Huge page support disabled (n/a for main memory).");
+ warned = true;
+ }
+ return mainrampagesize;
+ }
+
+ return hpsize;
+}
+
+static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
+{
+ if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
+ return true;
+ }
+
+ return (1ul << shift) <= rampgsize;
+}
+
+static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
+{
+ static struct kvm_ppc_smmu_info smmu_info;
+ static bool has_smmu_info;
+ CPUPPCState *env = &cpu->env;
+ long rampagesize;
+ int iq, ik, jq, jk;
+ bool has_64k_pages = false;
+
+ /* We only handle page sizes for 64-bit server guests for now */
+ if (!(env->mmu_model & POWERPC_MMU_64)) {
+ return;
+ }
+
+ /* Collect MMU info from kernel if not already */
+ if (!has_smmu_info) {
+ kvm_get_smmu_info(cpu, &smmu_info);
+ has_smmu_info = true;
+ }
+
+ rampagesize = getrampagesize();
+
+ /* Convert to QEMU form */
+ memset(&env->sps, 0, sizeof(env->sps));
+
+ /* If we have HV KVM, we need to forbid CI large pages if our
+ * host page size is smaller than 64K.
+ */
+ if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
+ env->ci_large_pages = getpagesize() >= 0x10000;
+ }
+
+ /*
+ * XXX This loop should be an entry wide AND of the capabilities that
+ * the selected CPU has with the capabilities that KVM supports.
+ */
+ for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
+ struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
+ struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
+
+ if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
+ ksps->page_shift)) {
+ continue;
+ }
+ qsps->page_shift = ksps->page_shift;
+ qsps->slb_enc = ksps->slb_enc;
+ for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
+ if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
+ ksps->enc[jk].page_shift)) {
+ continue;
+ }
+ if (ksps->enc[jk].page_shift == 16) {
+ has_64k_pages = true;
+ }
+ qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
+ qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
+ if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
+ break;
+ }
+ }
+ if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
+ break;
+ }
+ }
+ env->slb_nr = smmu_info.slb_size;
+ if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
+ env->mmu_model &= ~POWERPC_MMU_1TSEG;
+ }
+ if (!has_64k_pages) {
+ env->mmu_model &= ~POWERPC_MMU_64K;
+ }
+}
+#else /* defined (TARGET_PPC64) */
+
+static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
+{
+}
+
+#endif /* !defined (TARGET_PPC64) */
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return ppc_get_vcpu_dt_id(POWERPC_CPU(cpu));
+}
+
+/* e500 supports 2 h/w breakpoint and 2 watchpoint.
+ * book3s supports only 1 watchpoint, so array size
+ * of 4 is sufficient for now.
+ */
+#define MAX_HW_BKPTS 4
+
+static struct HWBreakpoint {
+ target_ulong addr;
+ int type;
+} hw_debug_points[MAX_HW_BKPTS];
+
+static CPUWatchpoint hw_watchpoint;
+
+/* Default there is no breakpoint and watchpoint supported */
+static int max_hw_breakpoint;
+static int max_hw_watchpoint;
+static int nb_hw_breakpoint;
+static int nb_hw_watchpoint;
+
+static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
+{
+ if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
+ max_hw_breakpoint = 2;
+ max_hw_watchpoint = 2;
+ }
+
+ if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
+ fprintf(stderr, "Error initializing h/w breakpoints\n");
+ return;
+ }
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
+ int ret;
+
+ /* Gather server mmu info from KVM and update the CPU state */
+ kvm_fixup_page_sizes(cpu);
+
+ /* Synchronize sregs with kvm */
+ ret = kvm_arch_sync_sregs(cpu);
+ if (ret) {
+ if (ret == -EINVAL) {
+ error_report("Register sync failed... If you're using kvm-hv.ko,"
+ " only \"-cpu host\" is possible");
+ }
+ return ret;
+ }
+
+ idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
+
+ switch (cenv->mmu_model) {
+ case POWERPC_MMU_BOOKE206:
+ /* This target supports access to KVM's guest TLB */
+ ret = kvm_booke206_tlb_init(cpu);
+ break;
+ case POWERPC_MMU_2_07:
+ if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
+ /* KVM-HV has transactional memory on POWER8 also without the
+ * KVM_CAP_PPC_HTM extension, so enable it here instead. */
+ cap_htm = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
+ kvmppc_hw_debug_points_init(cenv);
+
+ return ret;
+}
+
+static void kvm_sw_tlb_put(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ struct kvm_dirty_tlb dirty_tlb;
+ unsigned char *bitmap;
+ int ret;
+
+ if (!env->kvm_sw_tlb) {
+ return;
+ }
+
+ bitmap = g_malloc((env->nb_tlb + 7) / 8);
+ memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
+
+ dirty_tlb.bitmap = (uintptr_t)bitmap;
+ dirty_tlb.num_dirty = env->nb_tlb;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
+ if (ret) {
+ fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
+ __func__, strerror(-ret));
+ }
+
+ g_free(bitmap);
+}
+
+static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ } val;
+ struct kvm_one_reg reg = {
+ .id = id,
+ .addr = (uintptr_t) &val,
+ };
+ int ret;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ trace_kvm_failed_spr_get(spr, strerror(errno));
+ } else {
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ env->spr[spr] = val.u32;
+ break;
+
+ case KVM_REG_SIZE_U64:
+ env->spr[spr] = val.u64;
+ break;
+
+ default:
+ /* Don't handle this size yet */
+ abort();
+ }
+ }
+}
+
+static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ union {
+ uint32_t u32;
+ uint64_t u64;
+ } val;
+ struct kvm_one_reg reg = {
+ .id = id,
+ .addr = (uintptr_t) &val,
+ };
+ int ret;
+
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ val.u32 = env->spr[spr];
+ break;
+
+ case KVM_REG_SIZE_U64:
+ val.u64 = env->spr[spr];
+ break;
+
+ default:
+ /* Don't handle this size yet */
+ abort();
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret != 0) {
+ trace_kvm_failed_spr_set(spr, strerror(errno));
+ }
+}
+
+static int kvm_put_fp(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int i;
+ int ret;
+
+ if (env->insns_flags & PPC_FLOAT) {
+ uint64_t fpscr = env->fpscr;
+ bool vsx = !!(env->insns_flags2 & PPC2_VSX);
+
+ reg.id = KVM_REG_PPC_FPSCR;
+ reg.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ uint64_t vsr[2];
+
+#ifdef HOST_WORDS_BIGENDIAN
+ vsr[0] = float64_val(env->fpr[i]);
+ vsr[1] = env->vsr[i];
+#else
+ vsr[0] = env->vsr[i];
+ vsr[1] = float64_val(env->fpr[i]);
+#endif
+ reg.addr = (uintptr_t) &vsr;
+ reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR",
+ i, strerror(errno));
+ return ret;
+ }
+ }
+ }
+
+ if (env->insns_flags & PPC_ALTIVEC) {
+ reg.id = KVM_REG_PPC_VSCR;
+ reg.addr = (uintptr_t)&env->vscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ reg.id = KVM_REG_PPC_VR(i);
+ reg.addr = (uintptr_t)&env->avr[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno));
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int kvm_get_fp(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int i;
+ int ret;
+
+ if (env->insns_flags & PPC_FLOAT) {
+ uint64_t fpscr;
+ bool vsx = !!(env->insns_flags2 & PPC2_VSX);
+
+ reg.id = KVM_REG_PPC_FPSCR;
+ reg.addr = (uintptr_t)&fpscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno));
+ return ret;
+ } else {
+ env->fpscr = fpscr;
+ }
+
+ for (i = 0; i < 32; i++) {
+ uint64_t vsr[2];
+
+ reg.addr = (uintptr_t) &vsr;
+ reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get %s%d from KVM: %s\n",
+ vsx ? "VSR" : "FPR", i, strerror(errno));
+ return ret;
+ } else {
+#ifdef HOST_WORDS_BIGENDIAN
+ env->fpr[i] = vsr[0];
+ if (vsx) {
+ env->vsr[i] = vsr[1];
+ }
+#else
+ env->fpr[i] = vsr[1];
+ if (vsx) {
+ env->vsr[i] = vsr[0];
+ }
+#endif
+ }
+ }
+ }
+
+ if (env->insns_flags & PPC_ALTIVEC) {
+ reg.id = KVM_REG_PPC_VSCR;
+ reg.addr = (uintptr_t)&env->vscr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ for (i = 0; i < 32; i++) {
+ reg.id = KVM_REG_PPC_VR(i);
+ reg.addr = (uintptr_t)&env->avr[i];
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get VR%d from KVM: %s\n",
+ i, strerror(errno));
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+#if defined(TARGET_PPC64)
+static int kvm_get_vpa(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&env->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&env->slb_shadow_size
+ == ((uintptr_t)&env->slb_shadow_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_SLB;
+ reg.addr = (uintptr_t)&env->slb_shadow_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get SLB shadow state from KVM: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_DTL;
+ reg.addr = (uintptr_t)&env->dtl_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to get dispatch trace log state from KVM: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kvm_put_vpa(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ /* SLB shadow or DTL can't be registered unless a master VPA is
+ * registered. That means when restoring state, if a VPA *is*
+ * registered, we need to set that up first. If not, we need to
+ * deregister the others before deregistering the master VPA */
+ assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
+
+ if (env->vpa_addr) {
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&env->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+ }
+
+ assert((uintptr_t)&env->slb_shadow_size
+ == ((uintptr_t)&env->slb_shadow_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_SLB;
+ reg.addr = (uintptr_t)&env->slb_shadow_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_DTL;
+ reg.addr = (uintptr_t)&env->dtl_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set dispatch trace log state to KVM: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ if (!env->vpa_addr) {
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&env->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+ if (ret < 0) {
+ DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif /* TARGET_PPC64 */
+
+int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int i;
+
+ sregs.pvr = env->spr[SPR_PVR];
+
+ sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+ if (env->slb[i].esid & SLB_ESID_V) {
+ sregs.u.s.ppc64.slb[i].slbe |= i;
+ }
+ sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ sregs.u.s.ppc32.sr[i] = env->sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ /* Beware. We have to swap upper and lower bits here */
+ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
+ | env->DBAT[1][i];
+ sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
+ | env->IBAT[1][i];
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_regs regs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ regs.ctr = env->ctr;
+ regs.lr = env->lr;
+ regs.xer = cpu_read_xer(env);
+ regs.msr = env->msr;
+ regs.pc = env->nip;
+
+ regs.srr0 = env->spr[SPR_SRR0];
+ regs.srr1 = env->spr[SPR_SRR1];
+
+ regs.sprg0 = env->spr[SPR_SPRG0];
+ regs.sprg1 = env->spr[SPR_SPRG1];
+ regs.sprg2 = env->spr[SPR_SPRG2];
+ regs.sprg3 = env->spr[SPR_SPRG3];
+ regs.sprg4 = env->spr[SPR_SPRG4];
+ regs.sprg5 = env->spr[SPR_SPRG5];
+ regs.sprg6 = env->spr[SPR_SPRG6];
+ regs.sprg7 = env->spr[SPR_SPRG7];
+
+ regs.pid = env->spr[SPR_BOOKE_PID];
+
+ for (i = 0;i < 32; i++)
+ regs.gpr[i] = env->gpr[i];
+
+ regs.cr = 0;
+ for (i = 0; i < 8; i++) {
+ regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+ if (ret < 0)
+ return ret;
+
+ kvm_put_fp(cs);
+
+ if (env->tlb_dirty) {
+ kvm_sw_tlb_put(cpu);
+ env->tlb_dirty = false;
+ }
+
+ if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
+ ret = kvmppc_put_books_sregs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
+ kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
+ }
+
+ if (cap_one_reg) {
+ int i;
+
+ /* We deliberately ignore errors here, for kernels which have
+ * the ONE_REG calls, but don't support the specific
+ * registers, there's a reasonable chance things will still
+ * work, at least until we try to migrate. */
+ for (i = 0; i < 1024; i++) {
+ uint64_t id = env->spr_cb[i].one_reg_id;
+
+ if (id != 0) {
+ kvm_put_one_spr(cs, id, i);
+ }
+ }
+
+#ifdef TARGET_PPC64
+ if (msr_ts) {
+ for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
+ }
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
+ kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
+ }
+
+ if (cap_papr) {
+ if (kvm_put_vpa(cs) < 0) {
+ DPRINTF("Warning: Unable to set VPA information to KVM\n");
+ }
+ }
+
+ kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
+#endif /* TARGET_PPC64 */
+ }
+
+ return ret;
+}
+
+static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
+{
+ env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
+}
+
+static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_BASE) {
+ env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
+ env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
+ env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
+ env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
+ env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
+ env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
+ env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
+ env->spr[SPR_DECR] = sregs.u.e.dec;
+ env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
+ env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
+ env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
+ env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
+ env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
+ env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
+ env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
+ env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_64) {
+ env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
+ env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
+ env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
+ kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
+ env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
+ kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
+ env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
+ kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
+ env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
+ kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
+ env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
+ kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
+ env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
+ kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
+ env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
+ kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
+ env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
+ kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
+ env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
+ kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
+ env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
+ kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
+ env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
+ kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
+ env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
+ kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
+ env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
+ kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
+ env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
+ kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
+ env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
+ kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
+ env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
+ kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPE) {
+ env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
+ kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
+ env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
+ env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PM) {
+ env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
+ kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PC) {
+ env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
+ env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
+ }
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+ env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
+ env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
+ env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
+ env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
+ env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
+ env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
+ env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
+ env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
+ env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
+ env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_EXP) {
+ env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PD) {
+ env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
+ env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
+ }
+
+ if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+ env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
+ env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
+ env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
+
+ if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
+ env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
+ env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
+ }
+ }
+
+ return 0;
+}
+
+static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!env->external_htab) {
+ ppc_store_sdr1(env, sregs.u.s.sdr1);
+ }
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ /*
+ * The packed SLB array we get from KVM_GET_SREGS only contains
+ * information about valid entries. So we flush our internal copy
+ * to get rid of stale ones, then put all valid SLB entries back
+ * in.
+ */
+ memset(env->slb, 0, sizeof(env->slb));
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
+ target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
+ /*
+ * Only restore valid entries
+ */
+ if (rb & SLB_ESID_V) {
+ ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
+ }
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ env->sr[i] = sregs.u.s.ppc32.sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
+ env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
+ env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
+ env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
+ }
+
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_regs regs;
+ uint32_t cr;
+ int i, ret;
+
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+ if (ret < 0)
+ return ret;
+
+ cr = regs.cr;
+ for (i = 7; i >= 0; i--) {
+ env->crf[i] = cr & 15;
+ cr >>= 4;
+ }
+
+ env->ctr = regs.ctr;
+ env->lr = regs.lr;
+ cpu_write_xer(env, regs.xer);
+ env->msr = regs.msr;
+ env->nip = regs.pc;
+
+ env->spr[SPR_SRR0] = regs.srr0;
+ env->spr[SPR_SRR1] = regs.srr1;
+
+ env->spr[SPR_SPRG0] = regs.sprg0;
+ env->spr[SPR_SPRG1] = regs.sprg1;
+ env->spr[SPR_SPRG2] = regs.sprg2;
+ env->spr[SPR_SPRG3] = regs.sprg3;
+ env->spr[SPR_SPRG4] = regs.sprg4;
+ env->spr[SPR_SPRG5] = regs.sprg5;
+ env->spr[SPR_SPRG6] = regs.sprg6;
+ env->spr[SPR_SPRG7] = regs.sprg7;
+
+ env->spr[SPR_BOOKE_PID] = regs.pid;
+
+ for (i = 0;i < 32; i++)
+ env->gpr[i] = regs.gpr[i];
+
+ kvm_get_fp(cs);
+
+ if (cap_booke_sregs) {
+ ret = kvmppc_get_booke_sregs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (cap_segstate) {
+ ret = kvmppc_get_books_sregs(cpu);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ if (cap_hior) {
+ kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
+ }
+
+ if (cap_one_reg) {
+ int i;
+
+ /* We deliberately ignore errors here, for kernels which have
+ * the ONE_REG calls, but don't support the specific
+ * registers, there's a reasonable chance things will still
+ * work, at least until we try to migrate. */
+ for (i = 0; i < 1024; i++) {
+ uint64_t id = env->spr_cb[i].one_reg_id;
+
+ if (id != 0) {
+ kvm_get_one_spr(cs, id, i);
+ }
+ }
+
+#ifdef TARGET_PPC64
+ if (msr_ts) {
+ for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
+ }
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
+ kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
+ }
+
+ if (cap_papr) {
+ if (kvm_get_vpa(cs) < 0) {
+ DPRINTF("Warning: Unable to get VPA information from KVM\n");
+ }
+ }
+
+ kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
+#endif
+ }
+
+ return 0;
+}
+
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
+{
+ unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
+
+ if (irq != PPC_INTERRUPT_EXT) {
+ return 0;
+ }
+
+ if (!kvm_enabled() || !cap_interrupt_unset || !cap_interrupt_level) {
+ return 0;
+ }
+
+ kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
+
+ return 0;
+}
+
+#if defined(TARGET_PPCEMB)
+#define PPC_INPUT_INT PPC40x_INPUT_INT
+#elif defined(TARGET_PPC64)
+#define PPC_INPUT_INT PPC970_INPUT_INT
+#else
+#define PPC_INPUT_INT PPC6xx_INPUT_INT
+#endif
+
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int r;
+ unsigned irq;
+
+ qemu_mutex_lock_iothread();
+
+ /* PowerPC QEMU tracks the various core input pins (interrupt, critical
+ * interrupt, reset, etc) in PPC-specific env->irq_input_state. */
+ if (!cap_interrupt_level &&
+ run->ready_for_interrupt_injection &&
+ (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->irq_input_state & (1<<PPC_INPUT_INT)))
+ {
+ /* For now KVM disregards the 'irq' argument. However, in the
+ * future KVM could cache it in-kernel to avoid a heavyweight exit
+ * when reading the UIC.
+ */
+ irq = KVM_INTERRUPT_SET;
+
+ DPRINTF("injected interrupt %d\n", irq);
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
+ if (r < 0) {
+ printf("cpu %d fail inject %x\n", cs->cpu_index, irq);
+ }
+
+ /* Always wake up soon in case the interrupt was level based */
+ timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+ (NANOSECONDS_PER_SECOND / 50));
+ }
+
+ /* We don't know if there are more interrupts pending after this. However,
+ * the guest will return to userspace in the course of handling this one
+ * anyways, so we will get a chance to deliver the rest. */
+
+ qemu_mutex_unlock_iothread();
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+static int kvmppc_handle_halt(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ }
+
+ return 0;
+}
+
+/* map dcr access to existing qemu dcr emulation */
+static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
+{
+ if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
+ fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
+
+ return 0;
+}
+
+static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
+{
+ if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
+ fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
+
+ return 0;
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ /* Mixed endian case is not handled */
+ uint32_t sc = debug_inst_opcode;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sizeof(sc), 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint32_t sc;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
+ sc != debug_inst_opcode ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sizeof(sc), 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int find_hw_breakpoint(target_ulong addr, int type)
+{
+ int n;
+
+ assert((nb_hw_breakpoint + nb_hw_watchpoint)
+ <= ARRAY_SIZE(hw_debug_points));
+
+ for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+ if (hw_debug_points[n].addr == addr &&
+ hw_debug_points[n].type == type) {
+ return n;
+ }
+ }
+
+ return -1;
+}
+
+static int find_hw_watchpoint(target_ulong addr, int *flag)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
+ if (n >= 0) {
+ *flag = BP_MEM_ACCESS;
+ return n;
+ }
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
+ if (n >= 0) {
+ *flag = BP_MEM_WRITE;
+ return n;
+ }
+
+ n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
+ if (n >= 0) {
+ *flag = BP_MEM_READ;
+ return n;
+ }
+
+ return -1;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ if ((nb_hw_breakpoint + nb_hw_watchpoint) >= ARRAY_SIZE(hw_debug_points)) {
+ return -ENOBUFS;
+ }
+
+ hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].addr = addr;
+ hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint].type = type;
+
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ if (nb_hw_breakpoint >= max_hw_breakpoint) {
+ return -ENOBUFS;
+ }
+
+ if (find_hw_breakpoint(addr, type) >= 0) {
+ return -EEXIST;
+ }
+
+ nb_hw_breakpoint++;
+ break;
+
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ if (nb_hw_watchpoint >= max_hw_watchpoint) {
+ return -ENOBUFS;
+ }
+
+ if (find_hw_breakpoint(addr, type) >= 0) {
+ return -EEXIST;
+ }
+
+ nb_hw_watchpoint++;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int n;
+
+ n = find_hw_breakpoint(addr, type);
+ if (n < 0) {
+ return -ENOENT;
+ }
+
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ nb_hw_breakpoint--;
+ break;
+
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_ACCESS:
+ nb_hw_watchpoint--;
+ break;
+
+ default:
+ return -ENOSYS;
+ }
+ hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoint = nb_hw_watchpoint = 0;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
+{
+ int n;
+
+ /* Software Breakpoint updates */
+ if (kvm_sw_breakpoints_active(cs)) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ }
+
+ assert((nb_hw_breakpoint + nb_hw_watchpoint)
+ <= ARRAY_SIZE(hw_debug_points));
+ assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
+
+ if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
+ for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
+ switch (hw_debug_points[n].type) {
+ case GDB_BREAKPOINT_HW:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
+ break;
+ case GDB_WATCHPOINT_READ:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
+ break;
+ case GDB_WATCHPOINT_ACCESS:
+ dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
+ KVMPPC_DEBUG_WATCH_READ;
+ break;
+ default:
+ cpu_abort(cs, "Unsupported breakpoint type\n");
+ }
+ dbg->arch.bp[n].addr = hw_debug_points[n].addr;
+ }
+ }
+}
+
+static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+ int handle = 0;
+ int n;
+ int flag = 0;
+
+ if (cs->singlestep_enabled) {
+ handle = 1;
+ } else if (arch_info->status) {
+ if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
+ if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
+ n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
+ if (n >= 0) {
+ handle = 1;
+ }
+ } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
+ KVMPPC_DEBUG_WATCH_WRITE)) {
+ n = find_hw_watchpoint(arch_info->address, &flag);
+ if (n >= 0) {
+ handle = 1;
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = hw_debug_points[n].addr;
+ hw_watchpoint.flags = flag;
+ }
+ }
+ }
+ } else if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
+ handle = 1;
+ } else {
+ /* QEMU is not able to handle debug exception, so inject
+ * program exception to guest;
+ * Yes program exception NOT debug exception !!
+ * When QEMU is using debug resources then debug exception must
+ * be always set. To achieve this we set MSR_DE and also set
+ * MSRP_DEP so guest cannot change MSR_DE.
+ * When emulating debug resource for guest we want guest
+ * to control MSR_DE (enable/disable debug interrupt on need).
+ * Supporting both configurations are NOT possible.
+ * So the result is that we cannot share debug resources
+ * between QEMU and Guest on BOOKE architecture.
+ * In the current design QEMU gets the priority over guest,
+ * this means that if QEMU is using debug resources then guest
+ * cannot use them;
+ * For software breakpoint QEMU uses a privileged instruction;
+ * So there cannot be any reason that we are here for guest
+ * set debug exception, only possibility is guest executed a
+ * privileged / illegal instruction and that's why we are
+ * injecting a program interrupt.
+ */
+
+ cpu_synchronize_state(cs);
+ /* env->nip is PC, so increment this by 4 to use
+ * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
+ */
+ env->nip += 4;
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code = POWERPC_EXCP_INVAL;
+ ppc_cpu_do_interrupt(cs);
+ }
+
+ return handle;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int ret;
+
+ qemu_mutex_lock_iothread();
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_DCR:
+ if (run->dcr.is_write) {
+ DPRINTF("handle dcr write\n");
+ ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
+ } else {
+ DPRINTF("handle dcr read\n");
+ ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
+ }
+ break;
+ case KVM_EXIT_HLT:
+ DPRINTF("handle halt\n");
+ ret = kvmppc_handle_halt(cpu);
+ break;
+#if defined(TARGET_PPC64)
+ case KVM_EXIT_PAPR_HCALL:
+ DPRINTF("handle PAPR hypercall\n");
+ run->papr_hcall.ret = spapr_hypercall(cpu,
+ run->papr_hcall.nr,
+ run->papr_hcall.args);
+ ret = 0;
+ break;
+#endif
+ case KVM_EXIT_EPR:
+ DPRINTF("handle epr\n");
+ run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
+ ret = 0;
+ break;
+ case KVM_EXIT_WATCHDOG:
+ DPRINTF("handle watchdog expiry\n");
+ watchdog_perform_action();
+ ret = 0;
+ break;
+
+ case KVM_EXIT_DEBUG:
+ DPRINTF("handle debug exception\n");
+ if (kvm_handle_debug(cpu, run)) {
+ ret = EXCP_DEBUG;
+ break;
+ }
+ /* re-enter, this exception was guest-internal */
+ ret = 0;
+ break;
+
+ default:
+ fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
+ ret = -1;
+ break;
+ }
+
+ qemu_mutex_unlock_iothread();
+ return ret;
+}
+
+int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ CPUState *cs = CPU(cpu);
+ uint32_t bits = tsr_bits;
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_OR_TSR,
+ .addr = (uintptr_t) &bits,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+
+ CPUState *cs = CPU(cpu);
+ uint32_t bits = tsr_bits;
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_CLEAR_TSR,
+ .addr = (uintptr_t) &bits,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+int kvmppc_set_tcr(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ uint32_t tcr = env->spr[SPR_BOOKE_TCR];
+
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_TCR,
+ .addr = (uintptr_t) &tcr,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
+}
+
+int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ if (!kvm_enabled()) {
+ return -1;
+ }
+
+ if (!cap_ppc_watchdog) {
+ printf("warning: KVM does not support watchdog");
+ return -1;
+ }
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
+ if (ret < 0) {
+ fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ return ret;
+}
+
+static int read_cpuinfo(const char *field, char *value, int len)
+{
+ FILE *f;
+ int ret = -1;
+ int field_len = strlen(field);
+ char line[512];
+
+ f = fopen("/proc/cpuinfo", "r");
+ if (!f) {
+ return -1;
+ }
+
+ do {
+ if (!fgets(line, sizeof(line), f)) {
+ break;
+ }
+ if (!strncmp(line, field, field_len)) {
+ pstrcpy(value, len, line);
+ ret = 0;
+ break;
+ }
+ } while(*line);
+
+ fclose(f);
+
+ return ret;
+}
+
+uint32_t kvmppc_get_tbfreq(void)
+{
+ char line[512];
+ char *ns;
+ uint32_t retval = NANOSECONDS_PER_SECOND;
+
+ if (read_cpuinfo("timebase", line, sizeof(line))) {
+ return retval;
+ }
+
+ if (!(ns = strchr(line, ':'))) {
+ return retval;
+ }
+
+ ns++;
+
+ return atoi(ns);
+}
+
+bool kvmppc_get_host_serial(char **value)
+{
+ return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
+ NULL);
+}
+
+bool kvmppc_get_host_model(char **value)
+{
+ return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
+}
+
+/* Try to find a device tree node for a CPU with clock-frequency property */
+static int kvmppc_find_cpu_dt(char *buf, int buf_len)
+{
+ struct dirent *dirp;
+ DIR *dp;
+
+ if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) {
+ printf("Can't open directory " PROC_DEVTREE_CPU "\n");
+ return -1;
+ }
+
+ buf[0] = '\0';
+ while ((dirp = readdir(dp)) != NULL) {
+ FILE *f;
+ snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
+ dirp->d_name);
+ f = fopen(buf, "r");
+ if (f) {
+ snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
+ fclose(f);
+ break;
+ }
+ buf[0] = '\0';
+ }
+ closedir(dp);
+ if (buf[0] == '\0') {
+ printf("Unknown host!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint64_t kvmppc_read_int_dt(const char *filename)
+{
+ union {
+ uint32_t v32;
+ uint64_t v64;
+ } u;
+ FILE *f;
+ int len;
+
+ f = fopen(filename, "rb");
+ if (!f) {
+ return -1;
+ }
+
+ len = fread(&u, 1, sizeof(u), f);
+ fclose(f);
+ switch (len) {
+ case 4:
+ /* property is a 32-bit quantity */
+ return be32_to_cpu(u.v32);
+ case 8:
+ return be64_to_cpu(u.v64);
+ }
+
+ return 0;
+}
+
+/* Read a CPU node property from the host device tree that's a single
+ * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
+ * (can't find or open the property, or doesn't understand the
+ * format) */
+static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
+{
+ char buf[PATH_MAX], *tmp;
+ uint64_t val;
+
+ if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
+ return -1;
+ }
+
+ tmp = g_strdup_printf("%s/%s", buf, propname);
+ val = kvmppc_read_int_dt(tmp);
+ g_free(tmp);
+
+ return val;
+}
+
+uint64_t kvmppc_get_clockfreq(void)
+{
+ return kvmppc_read_int_cpu_dt("clock-frequency");
+}
+
+uint32_t kvmppc_get_vmx(void)
+{
+ return kvmppc_read_int_cpu_dt("ibm,vmx");
+}
+
+uint32_t kvmppc_get_dfp(void)
+{
+ return kvmppc_read_int_cpu_dt("ibm,dfp");
+}
+
+static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
+ {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+ !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+int kvmppc_get_hasidle(CPUPPCState *env)
+{
+ struct kvm_ppc_pvinfo pvinfo;
+
+ if (!kvmppc_get_pvinfo(env, &pvinfo) &&
+ (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
+{
+ uint32_t *hc = (uint32_t*)buf;
+ struct kvm_ppc_pvinfo pvinfo;
+
+ if (!kvmppc_get_pvinfo(env, &pvinfo)) {
+ memcpy(buf, pvinfo.hcall, buf_len);
+ return 0;
+ }
+
+ /*
+ * Fallback to always fail hypercalls regardless of endianness:
+ *
+ * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
+ * li r3, -1
+ * b .+8 (becomes nop in wrong endian)
+ * bswap32(li r3, -1)
+ */
+
+ hc[0] = cpu_to_be32(0x08000048);
+ hc[1] = cpu_to_be32(0x3860ffff);
+ hc[2] = cpu_to_be32(0x48000008);
+ hc[3] = cpu_to_be32(bswap32(0x3860ffff));
+
+ return 1;
+}
+
+static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
+{
+ return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
+}
+
+void kvmppc_enable_logical_ci_hcalls(void)
+{
+ /*
+ * FIXME: it would be nice if we could detect the cases where
+ * we're using a device which requires the in kernel
+ * implementation of these hcalls, but the kernel lacks them and
+ * produce a warning.
+ */
+ kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
+ kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
+}
+
+void kvmppc_enable_set_mode_hcall(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_SET_MODE);
+}
+
+void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
+}
+
+void kvmppc_set_papr(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
+ if (ret) {
+ error_report("This vCPU type or KVM version does not support PAPR");
+ exit(1);
+ }
+
+ /* Update the capability flag so we sync the right information
+ * with kvm */
+ cap_papr = 1;
+}
+
+int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
+{
+ return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &cpu_version);
+}
+
+void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
+ if (ret && mpic_proxy) {
+ error_report("This KVM version does not support EPR");
+ exit(1);
+ }
+}
+
+int kvmppc_smt_threads(void)
+{
+ return cap_ppc_smt ? cap_ppc_smt : 1;
+}
+
+#ifdef TARGET_PPC64
+off_t kvmppc_alloc_rma(void **rma)
+{
+ off_t size;
+ int fd;
+ struct kvm_allocate_rma ret;
+
+ /* If cap_ppc_rma == 0, contiguous RMA allocation is not supported
+ * if cap_ppc_rma == 1, contiguous RMA allocation is supported, but
+ * not necessary on this hardware
+ * if cap_ppc_rma == 2, contiguous RMA allocation is needed on this hardware
+ *
+ * FIXME: We should allow the user to force contiguous RMA
+ * allocation in the cap_ppc_rma==1 case.
+ */
+ if (cap_ppc_rma < 2) {
+ return 0;
+ }
+
+ fd = kvm_vm_ioctl(kvm_state, KVM_ALLOCATE_RMA, &ret);
+ if (fd < 0) {
+ fprintf(stderr, "KVM: Error on KVM_ALLOCATE_RMA: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ size = MIN(ret.rma_size, 256ul << 20);
+
+ *rma = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (*rma == MAP_FAILED) {
+ fprintf(stderr, "KVM: Error mapping RMA: %s\n", strerror(errno));
+ return -1;
+ };
+
+ return size;
+}
+
+uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
+{
+ struct kvm_ppc_smmu_info info;
+ long rampagesize, best_page_shift;
+ int i;
+
+ if (cap_ppc_rma >= 2) {
+ return current_size;
+ }
+
+ /* Find the largest hardware supported page size that's less than
+ * or equal to the (logical) backing page size of guest RAM */
+ kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
+ rampagesize = getrampagesize();
+ best_page_shift = 0;
+
+ for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
+ struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
+
+ if (!sps->page_shift) {
+ continue;
+ }
+
+ if ((sps->page_shift > best_page_shift)
+ && ((1UL << sps->page_shift) <= rampagesize)) {
+ best_page_shift = sps->page_shift;
+ }
+ }
+
+ return MIN(current_size,
+ 1ULL << (best_page_shift + hash_shift - 7));
+}
+#endif
+
+bool kvmppc_spapr_use_multitce(void)
+{
+ return cap_spapr_multitce;
+}
+
+void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
+ bool need_vfio)
+{
+ struct kvm_create_spapr_tce args = {
+ .liobn = liobn,
+ .window_size = window_size,
+ };
+ long len;
+ int fd;
+ void *table;
+
+ /* Must set fd to -1 so we don't try to munmap when called for
+ * destroying the table, which the upper layers -will- do
+ */
+ *pfd = -1;
+ if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
+ return NULL;
+ }
+
+ fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
+ if (fd < 0) {
+ fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
+ liobn);
+ return NULL;
+ }
+
+ len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(uint64_t);
+ /* FIXME: round this up to page size */
+
+ table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (table == MAP_FAILED) {
+ fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
+ liobn);
+ close(fd);
+ return NULL;
+ }
+
+ *pfd = fd;
+ return table;
+}
+
+int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
+{
+ long len;
+
+ if (fd < 0) {
+ return -1;
+ }
+
+ len = nb_table * sizeof(uint64_t);
+ if ((munmap(table, len) < 0) ||
+ (close(fd) < 0)) {
+ fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
+ strerror(errno));
+ /* Leak the table */
+ }
+
+ return 0;
+}
+
+int kvmppc_reset_htab(int shift_hint)
+{
+ uint32_t shift = shift_hint;
+
+ if (!kvm_enabled()) {
+ /* Full emulation, tell caller to allocate htab itself */
+ return 0;
+ }
+ if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
+ int ret;
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
+ if (ret == -ENOTTY) {
+ /* At least some versions of PR KVM advertise the
+ * capability, but don't implement the ioctl(). Oops.
+ * Return 0 so that we allocate the htab in qemu, as is
+ * correct for PR. */
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ return shift;
+ }
+
+ /* We have a kernel that predates the htab reset calls. For PR
+ * KVM, we need to allocate the htab ourselves, for an HV KVM of
+ * this era, it has allocated a 16MB fixed size hash table already. */
+ if (kvmppc_is_pr(kvm_state)) {
+ /* PR - tell caller to allocate htab */
+ return 0;
+ } else {
+ /* HV - assume 16MB kernel allocated htab */
+ return 24;
+ }
+}
+
+static inline uint32_t mfpvr(void)
+{
+ uint32_t pvr;
+
+ asm ("mfpvr %0"
+ : "=r"(pvr));
+ return pvr;
+}
+
+static void alter_insns(uint64_t *word, uint64_t flags, bool on)
+{
+ if (on) {
+ *word |= flags;
+ } else {
+ *word &= ~flags;
+ }
+}
+
+static void kvmppc_host_cpu_initfn(Object *obj)
+{
+ assert(kvm_enabled());
+}
+
+static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ uint32_t vmx = kvmppc_get_vmx();
+ uint32_t dfp = kvmppc_get_dfp();
+ uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
+ uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
+
+ /* Now fix up the class with information we can query from the host */
+ pcc->pvr = mfpvr();
+
+ if (vmx != -1) {
+ /* Only override when we know what the host supports */
+ alter_insns(&pcc->insns_flags, PPC_ALTIVEC, vmx > 0);
+ alter_insns(&pcc->insns_flags2, PPC2_VSX, vmx > 1);
+ }
+ if (dfp != -1) {
+ /* Only override when we know what the host supports */
+ alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
+ }
+
+ if (dcache_size != -1) {
+ pcc->l1_dcache_size = dcache_size;
+ }
+
+ if (icache_size != -1) {
+ pcc->l1_icache_size = icache_size;
+ }
+
+ /* Reason: kvmppc_host_cpu_initfn() dies when !kvm_enabled() */
+ dc->cannot_destroy_with_object_finalize_yet = true;
+}
+
+bool kvmppc_has_cap_epr(void)
+{
+ return cap_epr;
+}
+
+bool kvmppc_has_cap_htab_fd(void)
+{
+ return cap_htab_fd;
+}
+
+bool kvmppc_has_cap_fixup_hcalls(void)
+{
+ return cap_fixup_hcalls;
+}
+
+bool kvmppc_has_cap_htm(void)
+{
+ return cap_htm;
+}
+
+static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
+{
+ ObjectClass *oc = OBJECT_CLASS(pcc);
+
+ while (oc && !object_class_is_abstract(oc)) {
+ oc = object_class_get_parent(oc);
+ }
+ assert(oc);
+
+ return POWERPC_CPU_CLASS(oc);
+}
+
+PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
+{
+ uint32_t host_pvr = mfpvr();
+ PowerPCCPUClass *pvr_pcc;
+
+ pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
+ if (pvr_pcc == NULL) {
+ pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
+ }
+
+ return pvr_pcc;
+}
+
+static int kvm_ppc_register_host_cpu_type(void)
+{
+ TypeInfo type_info = {
+ .name = TYPE_HOST_POWERPC_CPU,
+ .instance_init = kvmppc_host_cpu_initfn,
+ .class_init = kvmppc_host_cpu_class_init,
+ };
+ PowerPCCPUClass *pvr_pcc;
+ DeviceClass *dc;
+
+ pvr_pcc = kvm_ppc_get_host_cpu_class();
+ if (pvr_pcc == NULL) {
+ return -1;
+ }
+ type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
+ type_register(&type_info);
+
+ /* Register generic family CPU class for a family */
+ pvr_pcc = ppc_cpu_get_family_class(pvr_pcc);
+ dc = DEVICE_CLASS(pvr_pcc);
+ type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
+ type_info.name = g_strdup_printf("%s-"TYPE_POWERPC_CPU, dc->desc);
+ type_register(&type_info);
+
+#if defined(TARGET_PPC64)
+ type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
+ type_info.parent = TYPE_SPAPR_CPU_CORE,
+ type_info.instance_size = sizeof(sPAPRCPUCore);
+ type_info.instance_init = NULL;
+ type_info.class_init = spapr_cpu_core_class_init;
+ type_info.class_data = (void *) "host";
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+
+ /* Register generic spapr CPU family class for current host CPU type */
+ type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
+ type_info.class_data = (void *) dc->desc;
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+#endif
+
+ return 0;
+}
+
+int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
+{
+ struct kvm_rtas_token_args args = {
+ .token = token,
+ };
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
+ return -ENOENT;
+ }
+
+ strncpy(args.name, function, sizeof(args.name));
+
+ return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
+}
+
+int kvmppc_get_htab_fd(bool write)
+{
+ struct kvm_get_htab_fd s = {
+ .flags = write ? KVM_GET_HTAB_WRITE : 0,
+ .start_index = 0,
+ };
+
+ if (!cap_htab_fd) {
+ fprintf(stderr, "KVM version doesn't support saving the hash table\n");
+ return -1;
+ }
+
+ return kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
+}
+
+int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
+{
+ int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ uint8_t buf[bufsize];
+ ssize_t rc;
+
+ do {
+ rc = read(fd, buf, bufsize);
+ if (rc < 0) {
+ fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
+ strerror(errno));
+ return rc;
+ } else if (rc) {
+ uint8_t *buffer = buf;
+ ssize_t n = rc;
+ while (n) {
+ struct kvm_get_htab_header *head =
+ (struct kvm_get_htab_header *) buffer;
+ size_t chunksize = sizeof(*head) +
+ HASH_PTE_SIZE_64 * head->n_valid;
+
+ qemu_put_be32(f, head->index);
+ qemu_put_be16(f, head->n_valid);
+ qemu_put_be16(f, head->n_invalid);
+ qemu_put_buffer(f, (void *)(head + 1),
+ HASH_PTE_SIZE_64 * head->n_valid);
+
+ buffer += chunksize;
+ n -= chunksize;
+ }
+ }
+ } while ((rc != 0)
+ && ((max_ns < 0)
+ || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
+
+ return (rc == 0) ? 1 : 0;
+}
+
+int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
+ uint16_t n_valid, uint16_t n_invalid)
+{
+ struct kvm_get_htab_header *buf;
+ size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64;
+ ssize_t rc;
+
+ buf = alloca(chunksize);
+ buf->index = index;
+ buf->n_valid = n_valid;
+ buf->n_invalid = n_invalid;
+
+ qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid);
+
+ rc = write(fd, buf, chunksize);
+ if (rc < 0) {
+ fprintf(stderr, "Error writing KVM hash table: %s\n",
+ strerror(errno));
+ return rc;
+ }
+ if (rc != chunksize) {
+ /* We should never get a short write on a single chunk */
+ fprintf(stderr, "Short write, restoring KVM hash table\n");
+ return -1;
+ }
+ return 0;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
+{
+ return true;
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
+{
+ return 1;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+ return 1;
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}
+
+struct kvm_get_htab_buf {
+ struct kvm_get_htab_header header;
+ /*
+ * We require one extra byte for read
+ */
+ target_ulong hpte[(HPTES_PER_GROUP * 2) + 1];
+};
+
+uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index)
+{
+ int htab_fd;
+ struct kvm_get_htab_fd ghf;
+ struct kvm_get_htab_buf *hpte_buf;
+
+ ghf.flags = 0;
+ ghf.start_index = pte_index;
+ htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
+ if (htab_fd < 0) {
+ goto error_out;
+ }
+
+ hpte_buf = g_malloc0(sizeof(*hpte_buf));
+ /*
+ * Read the hpte group
+ */
+ if (read(htab_fd, hpte_buf, sizeof(*hpte_buf)) < 0) {
+ goto out_close;
+ }
+
+ close(htab_fd);
+ return (uint64_t)(uintptr_t) hpte_buf->hpte;
+
+out_close:
+ g_free(hpte_buf);
+ close(htab_fd);
+error_out:
+ return 0;
+}
+
+void kvmppc_hash64_free_pteg(uint64_t token)
+{
+ struct kvm_get_htab_buf *htab_buf;
+
+ htab_buf = container_of((void *)(uintptr_t) token, struct kvm_get_htab_buf,
+ hpte);
+ g_free(htab_buf);
+ return;
+}
+
+void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ int htab_fd;
+ struct kvm_get_htab_fd ghf;
+ struct kvm_get_htab_buf hpte_buf;
+
+ ghf.flags = 0;
+ ghf.start_index = 0; /* Ignored */
+ htab_fd = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &ghf);
+ if (htab_fd < 0) {
+ goto error_out;
+ }
+
+ hpte_buf.header.n_valid = 1;
+ hpte_buf.header.n_invalid = 0;
+ hpte_buf.header.index = pte_index;
+ hpte_buf.hpte[0] = pte0;
+ hpte_buf.hpte[1] = pte1;
+ /*
+ * Write the hpte entry.
+ * CAUTION: write() has the warn_unused_result attribute. Hence we
+ * need to check the return value, even though we do nothing.
+ */
+ if (write(htab_fd, &hpte_buf, sizeof(hpte_buf)) < 0) {
+ goto out_close;
+ }
+
+out_close:
+ close(htab_fd);
+ return;
+
+error_out:
+ return;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ return data & 0xffff;
+}
+
+int kvmppc_enable_hwrng(void)
+{
+ if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
+ return -1;
+ }
+
+ return kvmppc_enable_hcall(kvm_state, H_RANDOM);
+}
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
new file mode 100644
index 0000000000..bd1d78bfbe
--- /dev/null
+++ b/target/ppc/kvm_ppc.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright 2008 IBM Corporation.
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *
+ * This work is licensed under the GNU GPL license version 2 or later.
+ *
+ */
+
+#ifndef KVM_PPC_H
+#define KVM_PPC_H
+
+#define TYPE_HOST_POWERPC_CPU "host-" TYPE_POWERPC_CPU
+
+#ifdef CONFIG_KVM
+
+uint32_t kvmppc_get_tbfreq(void);
+uint64_t kvmppc_get_clockfreq(void);
+uint32_t kvmppc_get_vmx(void);
+uint32_t kvmppc_get_dfp(void);
+bool kvmppc_get_host_model(char **buf);
+bool kvmppc_get_host_serial(char **buf);
+int kvmppc_get_hasidle(CPUPPCState *env);
+int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
+void kvmppc_enable_logical_ci_hcalls(void);
+void kvmppc_enable_set_mode_hcall(void);
+void kvmppc_enable_clear_ref_mod_hcalls(void);
+void kvmppc_set_papr(PowerPCCPU *cpu);
+int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version);
+void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
+int kvmppc_smt_threads(void);
+int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits);
+int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits);
+int kvmppc_set_tcr(PowerPCCPU *cpu);
+int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
+#ifndef CONFIG_USER_ONLY
+off_t kvmppc_alloc_rma(void **rma);
+bool kvmppc_spapr_use_multitce(void);
+void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd,
+ bool need_vfio);
+int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
+int kvmppc_reset_htab(int shift_hint);
+uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift);
+#endif /* !CONFIG_USER_ONLY */
+bool kvmppc_has_cap_epr(void);
+int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function);
+bool kvmppc_has_cap_htab_fd(void);
+int kvmppc_get_htab_fd(bool write);
+int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns);
+int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
+ uint16_t n_valid, uint16_t n_invalid);
+uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu, target_ulong pte_index);
+void kvmppc_hash64_free_pteg(uint64_t token);
+
+void kvmppc_hash64_write_pte(CPUPPCState *env, target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1);
+bool kvmppc_has_cap_fixup_hcalls(void);
+bool kvmppc_has_cap_htm(void);
+int kvmppc_enable_hwrng(void);
+int kvmppc_put_books_sregs(PowerPCCPU *cpu);
+PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
+
+#else
+
+static inline uint32_t kvmppc_get_tbfreq(void)
+{
+ return 0;
+}
+
+static inline bool kvmppc_get_host_model(char **buf)
+{
+ return false;
+}
+
+static inline bool kvmppc_get_host_serial(char **buf)
+{
+ return false;
+}
+
+static inline uint64_t kvmppc_get_clockfreq(void)
+{
+ return 0;
+}
+
+static inline uint32_t kvmppc_get_vmx(void)
+{
+ return 0;
+}
+
+static inline uint32_t kvmppc_get_dfp(void)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_hasidle(CPUPPCState *env)
+{
+ return 0;
+}
+
+static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
+{
+ return -1;
+}
+
+static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
+{
+ return -1;
+}
+
+static inline void kvmppc_enable_logical_ci_hcalls(void)
+{
+}
+
+static inline void kvmppc_enable_set_mode_hcall(void)
+{
+}
+
+static inline void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+}
+
+static inline void kvmppc_set_papr(PowerPCCPU *cpu)
+{
+}
+
+static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version)
+{
+ return 0;
+}
+
+static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
+{
+}
+
+static inline int kvmppc_smt_threads(void)
+{
+ return 1;
+}
+
+static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ return 0;
+}
+
+static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ return 0;
+}
+
+static inline int kvmppc_set_tcr(PowerPCCPU *cpu)
+{
+ return 0;
+}
+
+static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
+{
+ return -1;
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline off_t kvmppc_alloc_rma(void **rma)
+{
+ return 0;
+}
+
+static inline bool kvmppc_spapr_use_multitce(void)
+{
+ return false;
+}
+
+static inline void *kvmppc_create_spapr_tce(uint32_t liobn,
+ uint32_t window_size, int *fd,
+ bool need_vfio)
+{
+ return NULL;
+}
+
+static inline int kvmppc_remove_spapr_tce(void *table, int pfd,
+ uint32_t nb_table)
+{
+ return -1;
+}
+
+static inline int kvmppc_reset_htab(int shift_hint)
+{
+ return 0;
+}
+
+static inline uint64_t kvmppc_rma_size(uint64_t current_size,
+ unsigned int hash_shift)
+{
+ return ram_size;
+}
+
+#endif /* !CONFIG_USER_ONLY */
+
+static inline bool kvmppc_has_cap_epr(void)
+{
+ return false;
+}
+
+static inline int kvmppc_define_rtas_kernel_token(uint32_t token,
+ const char *function)
+{
+ return -1;
+}
+
+static inline bool kvmppc_has_cap_htab_fd(void)
+{
+ return false;
+}
+
+static inline int kvmppc_get_htab_fd(bool write)
+{
+ return -1;
+}
+
+static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize,
+ int64_t max_ns)
+{
+ abort();
+}
+
+static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
+ uint16_t n_valid, uint16_t n_invalid)
+{
+ abort();
+}
+
+static inline uint64_t kvmppc_hash64_read_pteg(PowerPCCPU *cpu,
+ target_ulong pte_index)
+{
+ abort();
+}
+
+static inline void kvmppc_hash64_free_pteg(uint64_t token)
+{
+ abort();
+}
+
+static inline void kvmppc_hash64_write_pte(CPUPPCState *env,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ abort();
+}
+
+static inline bool kvmppc_has_cap_fixup_hcalls(void)
+{
+ abort();
+}
+
+static inline bool kvmppc_has_cap_htm(void)
+{
+ return false;
+}
+
+static inline int kvmppc_enable_hwrng(void)
+{
+ return -1;
+}
+
+static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ abort();
+}
+
+static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
+{
+ return NULL;
+}
+
+#endif
+
+#ifndef CONFIG_KVM
+
+#define kvmppc_eieio() do { } while (0)
+
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+}
+
+#else /* CONFIG_KVM */
+
+#define kvmppc_eieio() \
+ do { \
+ if (kvm_enabled()) { \
+ asm volatile("eieio" : : : "memory"); \
+ } \
+ } while (0)
+
+/* Store data cache blocks back to memory */
+static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
+ asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
+ }
+}
+
+/* Invalidate instruction cache blocks */
+static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
+{
+ uint8_t *p;
+
+ for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
+ asm volatile("icbi 0,%0" : : "r"(p));
+ }
+}
+
+#endif /* CONFIG_KVM */
+
+#ifndef KVM_INTERRUPT_SET
+#define KVM_INTERRUPT_SET -1
+#endif
+
+#ifndef KVM_INTERRUPT_UNSET
+#define KVM_INTERRUPT_UNSET -2
+#endif
+
+#ifndef KVM_INTERRUPT_SET_LEVEL
+#define KVM_INTERRUPT_SET_LEVEL -3
+#endif
+
+#endif /* KVM_PPC_H */
diff --git a/target/ppc/machine.c b/target/ppc/machine.c
new file mode 100644
index 0000000000..18c16d2512
--- /dev/null
+++ b/target/ppc/machine.c
@@ -0,0 +1,615 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "sysemu/kvm.h"
+#include "helper_regs.h"
+#include "mmu-hash64.h"
+#include "migration/cpu.h"
+
+static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ unsigned int i, j;
+ target_ulong sdr1;
+ uint32_t fpscr;
+ target_ulong xer;
+
+ for (i = 0; i < 32; i++)
+ qemu_get_betls(f, &env->gpr[i]);
+#if !defined(TARGET_PPC64)
+ for (i = 0; i < 32; i++)
+ qemu_get_betls(f, &env->gprh[i]);
+#endif
+ qemu_get_betls(f, &env->lr);
+ qemu_get_betls(f, &env->ctr);
+ for (i = 0; i < 8; i++)
+ qemu_get_be32s(f, &env->crf[i]);
+ qemu_get_betls(f, &xer);
+ cpu_write_xer(env, xer);
+ qemu_get_betls(f, &env->reserve_addr);
+ qemu_get_betls(f, &env->msr);
+ for (i = 0; i < 4; i++)
+ qemu_get_betls(f, &env->tgpr[i]);
+ for (i = 0; i < 32; i++) {
+ union {
+ float64 d;
+ uint64_t l;
+ } u;
+ u.l = qemu_get_be64(f);
+ env->fpr[i] = u.d;
+ }
+ qemu_get_be32s(f, &fpscr);
+ env->fpscr = fpscr;
+ qemu_get_sbe32s(f, &env->access_type);
+#if defined(TARGET_PPC64)
+ qemu_get_betls(f, &env->spr[SPR_ASR]);
+ qemu_get_sbe32s(f, &env->slb_nr);
+#endif
+ qemu_get_betls(f, &sdr1);
+ for (i = 0; i < 32; i++)
+ qemu_get_betls(f, &env->sr[i]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ qemu_get_betls(f, &env->DBAT[i][j]);
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ qemu_get_betls(f, &env->IBAT[i][j]);
+ qemu_get_sbe32s(f, &env->nb_tlb);
+ qemu_get_sbe32s(f, &env->tlb_per_way);
+ qemu_get_sbe32s(f, &env->nb_ways);
+ qemu_get_sbe32s(f, &env->last_way);
+ qemu_get_sbe32s(f, &env->id_tlbs);
+ qemu_get_sbe32s(f, &env->nb_pids);
+ if (env->tlb.tlb6) {
+ // XXX assumes 6xx
+ for (i = 0; i < env->nb_tlb; i++) {
+ qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
+ qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
+ qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
+ }
+ }
+ for (i = 0; i < 4; i++)
+ qemu_get_betls(f, &env->pb[i]);
+ for (i = 0; i < 1024; i++)
+ qemu_get_betls(f, &env->spr[i]);
+ if (!env->external_htab) {
+ ppc_store_sdr1(env, sdr1);
+ }
+ qemu_get_be32s(f, &env->vscr);
+ qemu_get_be64s(f, &env->spe_acc);
+ qemu_get_be32s(f, &env->spe_fscr);
+ qemu_get_betls(f, &env->msr_mask);
+ qemu_get_be32s(f, &env->flags);
+ qemu_get_sbe32s(f, &env->error_code);
+ qemu_get_be32s(f, &env->pending_interrupts);
+ qemu_get_be32s(f, &env->irq_input_state);
+ for (i = 0; i < POWERPC_EXCP_NB; i++)
+ qemu_get_betls(f, &env->excp_vectors[i]);
+ qemu_get_betls(f, &env->excp_prefix);
+ qemu_get_betls(f, &env->ivor_mask);
+ qemu_get_betls(f, &env->ivpr_mask);
+ qemu_get_betls(f, &env->hreset_vector);
+ qemu_get_betls(f, &env->nip);
+ qemu_get_betls(f, &env->hflags);
+ qemu_get_betls(f, &env->hflags_nmsr);
+ qemu_get_sbe32(f); /* Discard unused mmu_idx */
+ qemu_get_sbe32(f); /* Discard unused power_mode */
+
+ /* Recompute mmu indices */
+ hreg_compute_mem_idx(env);
+
+ return 0;
+}
+
+static int get_avr(QEMUFile *f, void *pv, size_t size)
+{
+ ppc_avr_t *v = pv;
+
+ v->u64[0] = qemu_get_be64(f);
+ v->u64[1] = qemu_get_be64(f);
+
+ return 0;
+}
+
+static void put_avr(QEMUFile *f, void *pv, size_t size)
+{
+ ppc_avr_t *v = pv;
+
+ qemu_put_be64(f, v->u64[0]);
+ qemu_put_be64(f, v->u64[1]);
+}
+
+static const VMStateInfo vmstate_info_avr = {
+ .name = "avr",
+ .get = get_avr,
+ .put = put_avr,
+};
+
+#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_avr, ppc_avr_t)
+
+#define VMSTATE_AVR_ARRAY(_f, _s, _n) \
+ VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
+
+static bool cpu_pre_2_8_migration(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return cpu->pre_2_8_migration;
+}
+
+static void cpu_pre_save(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+ uint64_t insns_compat_mask =
+ PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
+ | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
+ | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
+ | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
+ | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
+ | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
+ | PPC_64B | PPC_64BX | PPC_ALTIVEC
+ | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
+ uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
+ | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
+ | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
+ | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
+ | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
+ | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
+
+ env->spr[SPR_LR] = env->lr;
+ env->spr[SPR_CTR] = env->ctr;
+ env->spr[SPR_XER] = cpu_read_xer(env);
+#if defined(TARGET_PPC64)
+ env->spr[SPR_CFAR] = env->cfar;
+#endif
+ env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
+
+ for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
+ env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i];
+ env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i];
+ env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i];
+ env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i];
+ }
+ for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
+ env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4];
+ env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4];
+ env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
+ env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
+ }
+
+ /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
+ if (cpu->pre_2_8_migration) {
+ cpu->mig_msr_mask = env->msr_mask;
+ cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
+ cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
+ cpu->mig_nb_BATs = env->nb_BATs;
+ }
+}
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+ target_ulong msr;
+
+ /*
+ * We always ignore the source PVR. The user or management
+ * software has to take care of running QEMU in a compatible mode.
+ */
+ env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
+ env->lr = env->spr[SPR_LR];
+ env->ctr = env->spr[SPR_CTR];
+ cpu_write_xer(env, env->spr[SPR_XER]);
+#if defined(TARGET_PPC64)
+ env->cfar = env->spr[SPR_CFAR];
+#endif
+ env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
+
+ for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
+ env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i];
+ env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1];
+ env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i];
+ env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1];
+ }
+ for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
+ env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i];
+ env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1];
+ env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i];
+ env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
+ }
+
+ if (!env->external_htab) {
+ /* Restore htab_base and htab_mask variables */
+ ppc_store_sdr1(env, env->spr[SPR_SDR1]);
+ }
+
+ /* Invalidate all msr bits except MSR_TGPR/MSR_HVB before restoring */
+ msr = env->msr;
+ env->msr ^= ~((1ULL << MSR_TGPR) | MSR_HVB);
+ ppc_store_msr(env, msr);
+
+ hreg_compute_mem_idx(env);
+
+ return 0;
+}
+
+static bool fpu_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return (cpu->env.insns_flags & PPC_FLOAT);
+}
+
+static const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_FLOAT64_ARRAY(env.fpr, PowerPCCPU, 32),
+ VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool altivec_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return (cpu->env.insns_flags & PPC_ALTIVEC);
+}
+
+static const VMStateDescription vmstate_altivec = {
+ .name = "cpu/altivec",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = altivec_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_AVR_ARRAY(env.avr, PowerPCCPU, 32),
+ VMSTATE_UINT32(env.vscr, PowerPCCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool vsx_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ return (cpu->env.insns_flags2 & PPC2_VSX);
+}
+
+static const VMStateDescription vmstate_vsx = {
+ .name = "cpu/vsx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vsx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.vsr, PowerPCCPU, 32),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+#ifdef TARGET_PPC64
+/* Transactional memory state */
+static bool tm_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ return msr_ts;
+}
+
+static const VMStateDescription vmstate_tm = {
+ .name = "cpu/tm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .needed = tm_needed,
+ .fields = (VMStateField []) {
+ VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
+ VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
+ VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
+ VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
+ VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
+ VMSTATE_END_OF_LIST()
+ },
+};
+#endif
+
+static bool sr_needed(void *opaque)
+{
+#ifdef TARGET_PPC64
+ PowerPCCPU *cpu = opaque;
+
+ return !(cpu->env.mmu_model & POWERPC_MMU_64);
+#else
+ return true;
+#endif
+}
+
+static const VMStateDescription vmstate_sr = {
+ .name = "cpu/sr",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = sr_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+#ifdef TARGET_PPC64
+static int get_slbe(QEMUFile *f, void *pv, size_t size)
+{
+ ppc_slb_t *v = pv;
+
+ v->esid = qemu_get_be64(f);
+ v->vsid = qemu_get_be64(f);
+
+ return 0;
+}
+
+static void put_slbe(QEMUFile *f, void *pv, size_t size)
+{
+ ppc_slb_t *v = pv;
+
+ qemu_put_be64(f, v->esid);
+ qemu_put_be64(f, v->vsid);
+}
+
+static const VMStateInfo vmstate_info_slbe = {
+ .name = "slbe",
+ .get = get_slbe,
+ .put = put_slbe,
+};
+
+#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
+
+#define VMSTATE_SLB_ARRAY(_f, _s, _n) \
+ VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
+
+static bool slb_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+
+ /* We don't support any of the old segment table based 64-bit CPUs */
+ return (cpu->env.mmu_model & POWERPC_MMU_64);
+}
+
+static int slb_post_load(void *opaque, int version_id)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ /* We've pulled in the raw esid and vsid values from the migration
+ * stream, but we need to recompute the page size pointers */
+ for (i = 0; i < env->slb_nr; i++) {
+ if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
+ /* Migration source had bad values in its SLB */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_slb = {
+ .name = "cpu/slb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = slb_needed,
+ .post_load = slb_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU),
+ VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif /* TARGET_PPC64 */
+
+static const VMStateDescription vmstate_tlb6xx_entry = {
+ .name = "cpu/tlb6xx_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
+ VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
+ VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool tlb6xx_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ return env->nb_tlb && (env->tlb_type == TLB_6XX);
+}
+
+static const VMStateDescription vmstate_tlb6xx = {
+ .name = "cpu/tlb6xx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tlb6xx_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
+ env.nb_tlb,
+ vmstate_tlb6xx_entry,
+ ppc6xx_tlb_t),
+ VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_tlbemb_entry = {
+ .name = "cpu/tlbemb_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(RPN, ppcemb_tlb_t),
+ VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
+ VMSTATE_UINTTL(PID, ppcemb_tlb_t),
+ VMSTATE_UINTTL(size, ppcemb_tlb_t),
+ VMSTATE_UINT32(prot, ppcemb_tlb_t),
+ VMSTATE_UINT32(attr, ppcemb_tlb_t),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool tlbemb_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ return env->nb_tlb && (env->tlb_type == TLB_EMB);
+}
+
+static bool pbr403_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ uint32_t pvr = cpu->env.spr[SPR_PVR];
+
+ return (pvr & 0xffff0000) == 0x00200000;
+}
+
+static const VMStateDescription vmstate_pbr403 = {
+ .name = "cpu/pbr403",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pbr403_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_tlbemb = {
+ .name = "cpu/tlb6xx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tlbemb_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
+ env.nb_tlb,
+ vmstate_tlbemb_entry,
+ ppcemb_tlb_t),
+ /* 403 protection registers */
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_pbr403,
+ NULL
+ }
+};
+
+static const VMStateDescription vmstate_tlbmas_entry = {
+ .name = "cpu/tlbmas_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(mas8, ppcmas_tlb_t),
+ VMSTATE_UINT32(mas1, ppcmas_tlb_t),
+ VMSTATE_UINT64(mas2, ppcmas_tlb_t),
+ VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static bool tlbmas_needed(void *opaque)
+{
+ PowerPCCPU *cpu = opaque;
+ CPUPPCState *env = &cpu->env;
+
+ return env->nb_tlb && (env->tlb_type == TLB_MAS);
+}
+
+static const VMStateDescription vmstate_tlbmas = {
+ .name = "cpu/tlbmas",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = tlbmas_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU),
+ VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
+ env.nb_tlb,
+ vmstate_tlbmas_entry,
+ ppcmas_tlb_t),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_ppc_cpu = {
+ .name = "cpu",
+ .version_id = 5,
+ .minimum_version_id = 5,
+ .minimum_version_id_old = 4,
+ .load_state_old = cpu_load_old,
+ .pre_save = cpu_pre_save,
+ .post_load = cpu_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
+
+ /* User mode architected state */
+ VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
+#if !defined(TARGET_PPC64)
+ VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
+#endif
+ VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
+ VMSTATE_UINTTL(env.nip, PowerPCCPU),
+
+ /* SPRs */
+ VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
+ VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
+
+ /* Reservation */
+ VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
+
+ /* Supervisor mode architected state */
+ VMSTATE_UINTTL(env.msr, PowerPCCPU),
+
+ /* Internal state */
+ VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU),
+ /* FIXME: access_type? */
+
+ /* Sanity checking */
+ VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
+ cpu_pre_2_8_migration),
+ VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fpu,
+ &vmstate_altivec,
+ &vmstate_vsx,
+ &vmstate_sr,
+#ifdef TARGET_PPC64
+ &vmstate_tm,
+ &vmstate_slb,
+#endif /* TARGET_PPC64 */
+ &vmstate_tlb6xx,
+ &vmstate_tlbemb,
+ &vmstate_tlbmas,
+ NULL
+ }
+};
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
new file mode 100644
index 0000000000..1ab8a6eab4
--- /dev/null
+++ b/target/ppc/mem_helper.c
@@ -0,0 +1,310 @@
+/*
+ * PowerPC memory access emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+#include "helper_regs.h"
+#include "exec/cpu_ldst.h"
+
+//#define DEBUG_OP
+
+static inline bool needs_byteswap(const CPUPPCState *env)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+ return msr_le;
+#else
+ return !msr_le;
+#endif
+}
+
+/*****************************************************************************/
+/* Memory load and stores */
+
+static inline target_ulong addr_add(CPUPPCState *env, target_ulong addr,
+ target_long arg)
+{
+#if defined(TARGET_PPC64)
+ if (!msr_is_64bit(env, env->msr)) {
+ return (uint32_t)(addr + arg);
+ } else
+#endif
+ {
+ return addr + arg;
+ }
+}
+
+void helper_lmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
+{
+ for (; reg < 32; reg++) {
+ if (needs_byteswap(env)) {
+ env->gpr[reg] = bswap32(cpu_ldl_data_ra(env, addr, GETPC()));
+ } else {
+ env->gpr[reg] = cpu_ldl_data_ra(env, addr, GETPC());
+ }
+ addr = addr_add(env, addr, 4);
+ }
+}
+
+void helper_stmw(CPUPPCState *env, target_ulong addr, uint32_t reg)
+{
+ for (; reg < 32; reg++) {
+ if (needs_byteswap(env)) {
+ cpu_stl_data_ra(env, addr, bswap32((uint32_t)env->gpr[reg]),
+ GETPC());
+ } else {
+ cpu_stl_data_ra(env, addr, (uint32_t)env->gpr[reg], GETPC());
+ }
+ addr = addr_add(env, addr, 4);
+ }
+}
+
+static void do_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
+ uint32_t reg, uintptr_t raddr)
+{
+ int sh;
+
+ for (; nb > 3; nb -= 4) {
+ env->gpr[reg] = cpu_ldl_data_ra(env, addr, raddr);
+ reg = (reg + 1) % 32;
+ addr = addr_add(env, addr, 4);
+ }
+ if (unlikely(nb > 0)) {
+ env->gpr[reg] = 0;
+ for (sh = 24; nb > 0; nb--, sh -= 8) {
+ env->gpr[reg] |= cpu_ldub_data_ra(env, addr, raddr) << sh;
+ addr = addr_add(env, addr, 1);
+ }
+ }
+}
+
+void helper_lsw(CPUPPCState *env, target_ulong addr, uint32_t nb, uint32_t reg)
+{
+ do_lsw(env, addr, nb, reg, GETPC());
+}
+
+/* PPC32 specification says we must generate an exception if
+ * rA is in the range of registers to be loaded.
+ * In an other hand, IBM says this is valid, but rA won't be loaded.
+ * For now, I'll follow the spec...
+ */
+void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg,
+ uint32_t ra, uint32_t rb)
+{
+ if (likely(xer_bc != 0)) {
+ int num_used_regs = (xer_bc + 3) / 4;
+ if (unlikely((ra != 0 && lsw_reg_in_range(reg, num_used_regs, ra)) ||
+ lsw_reg_in_range(reg, num_used_regs, rb))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_LSWX, GETPC());
+ } else {
+ do_lsw(env, addr, xer_bc, reg, GETPC());
+ }
+ }
+}
+
+void helper_stsw(CPUPPCState *env, target_ulong addr, uint32_t nb,
+ uint32_t reg)
+{
+ int sh;
+
+ for (; nb > 3; nb -= 4) {
+ cpu_stl_data_ra(env, addr, env->gpr[reg], GETPC());
+ reg = (reg + 1) % 32;
+ addr = addr_add(env, addr, 4);
+ }
+ if (unlikely(nb > 0)) {
+ for (sh = 24; nb > 0; nb--, sh -= 8) {
+ cpu_stb_data_ra(env, addr, (env->gpr[reg] >> sh) & 0xFF, GETPC());
+ addr = addr_add(env, addr, 1);
+ }
+ }
+}
+
+void helper_dcbz(CPUPPCState *env, target_ulong addr, uint32_t opcode)
+{
+ target_ulong mask, dcbz_size = env->dcache_line_size;
+ uint32_t i;
+ void *haddr;
+
+#if defined(TARGET_PPC64)
+ /* Check for dcbz vs dcbzl on 970 */
+ if (env->excp_model == POWERPC_EXCP_970 &&
+ !(opcode & 0x00200000) && ((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1) {
+ dcbz_size = 32;
+ }
+#endif
+
+ /* Align address */
+ mask = ~(dcbz_size - 1);
+ addr &= mask;
+
+ /* Check reservation */
+ if ((env->reserve_addr & mask) == (addr & mask)) {
+ env->reserve_addr = (target_ulong)-1ULL;
+ }
+
+ /* Try fast path translate */
+ haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, env->dmmu_idx);
+ if (haddr) {
+ memset(haddr, 0, dcbz_size);
+ } else {
+ /* Slow path */
+ for (i = 0; i < dcbz_size; i += 8) {
+ cpu_stq_data_ra(env, addr + i, 0, GETPC());
+ }
+ }
+}
+
+void helper_icbi(CPUPPCState *env, target_ulong addr)
+{
+ addr &= ~(env->dcache_line_size - 1);
+ /* Invalidate one cache line :
+ * PowerPC specification says this is to be treated like a load
+ * (not a fetch) by the MMU. To be sure it will be so,
+ * do the load "by hand".
+ */
+ cpu_ldl_data_ra(env, addr, GETPC());
+}
+
+/* XXX: to be tested */
+target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg,
+ uint32_t ra, uint32_t rb)
+{
+ int i, c, d;
+
+ d = 24;
+ for (i = 0; i < xer_bc; i++) {
+ c = cpu_ldub_data_ra(env, addr, GETPC());
+ addr = addr_add(env, addr, 1);
+ /* ra (if not 0) and rb are never modified */
+ if (likely(reg != rb && (ra == 0 || reg != ra))) {
+ env->gpr[reg] = (env->gpr[reg] & ~(0xFF << d)) | (c << d);
+ }
+ if (unlikely(c == xer_cmp)) {
+ break;
+ }
+ if (likely(d != 0)) {
+ d -= 8;
+ } else {
+ d = 24;
+ reg++;
+ reg = reg & 0x1F;
+ }
+ }
+ return i;
+}
+
+/*****************************************************************************/
+/* Altivec extension helpers */
+#if defined(HOST_WORDS_BIGENDIAN)
+#define HI_IDX 0
+#define LO_IDX 1
+#else
+#define HI_IDX 1
+#define LO_IDX 0
+#endif
+
+/* We use msr_le to determine index ordering in a vector. However,
+ byteswapping is not simply controlled by msr_le. We also need to take
+ into account endianness of the target. This is done for the little-endian
+ PPC64 user-mode target. */
+
+#define LVE(name, access, swap, element) \
+ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
+ target_ulong addr) \
+ { \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ int adjust = HI_IDX*(n_elems - 1); \
+ int sh = sizeof(r->element[0]) >> 1; \
+ int index = (addr & 0xf) >> sh; \
+ if (msr_le) { \
+ index = n_elems - index - 1; \
+ } \
+ \
+ if (needs_byteswap(env)) { \
+ r->element[LO_IDX ? index : (adjust - index)] = \
+ swap(access(env, addr, GETPC())); \
+ } else { \
+ r->element[LO_IDX ? index : (adjust - index)] = \
+ access(env, addr, GETPC()); \
+ } \
+ }
+#define I(x) (x)
+LVE(lvebx, cpu_ldub_data_ra, I, u8)
+LVE(lvehx, cpu_lduw_data_ra, bswap16, u16)
+LVE(lvewx, cpu_ldl_data_ra, bswap32, u32)
+#undef I
+#undef LVE
+
+#define STVE(name, access, swap, element) \
+ void helper_##name(CPUPPCState *env, ppc_avr_t *r, \
+ target_ulong addr) \
+ { \
+ size_t n_elems = ARRAY_SIZE(r->element); \
+ int adjust = HI_IDX * (n_elems - 1); \
+ int sh = sizeof(r->element[0]) >> 1; \
+ int index = (addr & 0xf) >> sh; \
+ if (msr_le) { \
+ index = n_elems - index - 1; \
+ } \
+ \
+ if (needs_byteswap(env)) { \
+ access(env, addr, swap(r->element[LO_IDX ? index : \
+ (adjust - index)]), \
+ GETPC()); \
+ } else { \
+ access(env, addr, r->element[LO_IDX ? index : \
+ (adjust - index)], GETPC()); \
+ } \
+ }
+#define I(x) (x)
+STVE(stvebx, cpu_stb_data_ra, I, u8)
+STVE(stvehx, cpu_stw_data_ra, bswap16, u16)
+STVE(stvewx, cpu_stl_data_ra, bswap32, u32)
+#undef I
+#undef LVE
+
+#undef HI_IDX
+#undef LO_IDX
+
+void helper_tbegin(CPUPPCState *env)
+{
+ /* As a degenerate implementation, always fail tbegin. The reason
+ * given is "Nesting overflow". The "persistent" bit is set,
+ * providing a hint to the error handler to not retry. The TFIAR
+ * captures the address of the failure, which is this tbegin
+ * instruction. Instruction execution will continue with the
+ * next instruction in memory, which is precisely what we want.
+ */
+
+ env->spr[SPR_TEXASR] =
+ (1ULL << TEXASR_FAILURE_PERSISTENT) |
+ (1ULL << TEXASR_NESTING_OVERFLOW) |
+ (msr_hv << TEXASR_PRIVILEGE_HV) |
+ (msr_pr << TEXASR_PRIVILEGE_PR) |
+ (1ULL << TEXASR_FAILURE_SUMMARY) |
+ (1ULL << TEXASR_TFIAR_EXACT);
+ env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr;
+ env->spr[SPR_TFHAR] = env->nip + 4;
+ env->crf[0] = 0xB; /* 0b1010 = transaction failure */
+}
diff --git a/target/ppc/mfrom_table.c b/target/ppc/mfrom_table.c
new file mode 100644
index 0000000000..6a1fa375c9
--- /dev/null
+++ b/target/ppc/mfrom_table.c
@@ -0,0 +1,79 @@
+static const uint8_t mfrom_ROM_table[602] =
+{
+ 77, 77, 76, 76, 75, 75, 74, 74,
+ 73, 73, 72, 72, 71, 71, 70, 70,
+ 69, 69, 68, 68, 68, 67, 67, 66,
+ 66, 65, 65, 64, 64, 64, 63, 63,
+ 62, 62, 61, 61, 61, 60, 60, 59,
+ 59, 58, 58, 58, 57, 57, 56, 56,
+ 56, 55, 55, 54, 54, 54, 53, 53,
+ 53, 52, 52, 51, 51, 51, 50, 50,
+ 50, 49, 49, 49, 48, 48, 47, 47,
+ 47, 46, 46, 46, 45, 45, 45, 44,
+ 44, 44, 43, 43, 43, 42, 42, 42,
+ 42, 41, 41, 41, 40, 40, 40, 39,
+ 39, 39, 39, 38, 38, 38, 37, 37,
+ 37, 37, 36, 36, 36, 35, 35, 35,
+ 35, 34, 34, 34, 34, 33, 33, 33,
+ 33, 32, 32, 32, 32, 31, 31, 31,
+ 31, 30, 30, 30, 30, 29, 29, 29,
+ 29, 28, 28, 28, 28, 28, 27, 27,
+ 27, 27, 26, 26, 26, 26, 26, 25,
+ 25, 25, 25, 25, 24, 24, 24, 24,
+ 24, 23, 23, 23, 23, 23, 23, 22,
+ 22, 22, 22, 22, 21, 21, 21, 21,
+ 21, 21, 20, 20, 20, 20, 20, 20,
+ 19, 19, 19, 19, 19, 19, 19, 18,
+ 18, 18, 18, 18, 18, 17, 17, 17,
+ 17, 17, 17, 17, 16, 16, 16, 16,
+ 16, 16, 16, 16, 15, 15, 15, 15,
+ 15, 15, 15, 15, 14, 14, 14, 14,
+ 14, 14, 14, 14, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 9, 9, 9,
+ 9, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 8, 8, 8, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 8,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 0,
+};
diff --git a/target/ppc/mfrom_table_gen.c b/target/ppc/mfrom_table_gen.c
new file mode 100644
index 0000000000..631791808e
--- /dev/null
+++ b/target/ppc/mfrom_table_gen.c
@@ -0,0 +1,32 @@
+#define _GNU_SOURCE
+#include "qemu/osdep.h"
+#include <math.h>
+
+int main (void)
+{
+ double d;
+ uint8_t n;
+ int i;
+
+ printf("static const uint8_t mfrom_ROM_table[602] =\n{\n ");
+ for (i = 0; i < 602; i++) {
+ /* Extremely decomposed:
+ * -T0 / 256
+ * T0 = 256 * log10(10 + 1.0) + 0.5
+ */
+ d = -i;
+ d /= 256.0;
+ d = exp10(d);
+ d += 1.0;
+ d = log10(d);
+ d *= 256;
+ d += 0.5;
+ n = d;
+ printf("%3d, ", n);
+ if ((i & 7) == 7)
+ printf("\n ");
+ }
+ printf("\n};\n");
+
+ return 0;
+}
diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c
new file mode 100644
index 0000000000..1e6e705a4e
--- /dev/null
+++ b/target/ppc/misc_helper.c
@@ -0,0 +1,210 @@
+/*
+ * Miscellaneous PowerPC emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#include "helper_regs.h"
+
+/*****************************************************************************/
+/* SPR accesses */
+void helper_load_dump_spr(CPUPPCState *env, uint32_t sprn)
+{
+ qemu_log("Read SPR %d %03x => " TARGET_FMT_lx "\n", sprn, sprn,
+ env->spr[sprn]);
+}
+
+void helper_store_dump_spr(CPUPPCState *env, uint32_t sprn)
+{
+ qemu_log("Write SPR %d %03x <= " TARGET_FMT_lx "\n", sprn, sprn,
+ env->spr[sprn]);
+}
+
+#ifdef TARGET_PPC64
+static void raise_fu_exception(CPUPPCState *env, uint32_t bit,
+ uint32_t sprn, uint32_t cause,
+ uintptr_t raddr)
+{
+ qemu_log("Facility SPR %d is unavailable (SPR FSCR:%d)\n", sprn, bit);
+
+ env->spr[SPR_FSCR] &= ~((target_ulong)FSCR_IC_MASK << FSCR_IC_POS);
+ cause &= FSCR_IC_MASK;
+ env->spr[SPR_FSCR] |= (target_ulong)cause << FSCR_IC_POS;
+
+ raise_exception_err_ra(env, POWERPC_EXCP_FU, 0, raddr);
+}
+#endif
+
+void helper_fscr_facility_check(CPUPPCState *env, uint32_t bit,
+ uint32_t sprn, uint32_t cause)
+{
+#ifdef TARGET_PPC64
+ if (env->spr[SPR_FSCR] & (1ULL << bit)) {
+ /* Facility is enabled, continue */
+ return;
+ }
+ raise_fu_exception(env, bit, sprn, cause, GETPC());
+#endif
+}
+
+void helper_msr_facility_check(CPUPPCState *env, uint32_t bit,
+ uint32_t sprn, uint32_t cause)
+{
+#ifdef TARGET_PPC64
+ if (env->msr & (1ULL << bit)) {
+ /* Facility is enabled, continue */
+ return;
+ }
+ raise_fu_exception(env, bit, sprn, cause, GETPC());
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void helper_store_sdr1(CPUPPCState *env, target_ulong val)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ if (!env->external_htab) {
+ if (env->spr[SPR_SDR1] != val) {
+ ppc_store_sdr1(env, val);
+ tlb_flush(CPU(cpu), 1);
+ }
+ }
+}
+
+void helper_store_hid0_601(CPUPPCState *env, target_ulong val)
+{
+ target_ulong hid0;
+
+ hid0 = env->spr[SPR_HID0];
+ if ((val ^ hid0) & 0x00000008) {
+ /* Change current endianness */
+ env->hflags &= ~(1 << MSR_LE);
+ env->hflags_nmsr &= ~(1 << MSR_LE);
+ env->hflags_nmsr |= (1 << MSR_LE) & (((val >> 3) & 1) << MSR_LE);
+ env->hflags |= env->hflags_nmsr;
+ qemu_log("%s: set endianness to %c => " TARGET_FMT_lx "\n", __func__,
+ val & 0x8 ? 'l' : 'b', env->hflags);
+ }
+ env->spr[SPR_HID0] = (uint32_t)val;
+}
+
+void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ if (likely(env->pb[num] != value)) {
+ env->pb[num] = value;
+ /* Should be optimized */
+ tlb_flush(CPU(cpu), 1);
+ }
+}
+
+void helper_store_40x_dbcr0(CPUPPCState *env, target_ulong val)
+{
+ store_40x_dbcr0(env, val);
+}
+
+void helper_store_40x_sler(CPUPPCState *env, target_ulong val)
+{
+ store_40x_sler(env, val);
+}
+#endif
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+target_ulong helper_clcs(CPUPPCState *env, uint32_t arg)
+{
+ switch (arg) {
+ case 0x0CUL:
+ /* Instruction cache line size */
+ return env->icache_line_size;
+ break;
+ case 0x0DUL:
+ /* Data cache line size */
+ return env->dcache_line_size;
+ break;
+ case 0x0EUL:
+ /* Minimum cache line size */
+ return (env->icache_line_size < env->dcache_line_size) ?
+ env->icache_line_size : env->dcache_line_size;
+ break;
+ case 0x0FUL:
+ /* Maximum cache line size */
+ return (env->icache_line_size > env->dcache_line_size) ?
+ env->icache_line_size : env->dcache_line_size;
+ break;
+ default:
+ /* Undefined */
+ return 0;
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* Special registers manipulation */
+
+/* GDBstub can read and write MSR... */
+void ppc_store_msr(CPUPPCState *env, target_ulong value)
+{
+ hreg_store_msr(env, value, 0);
+}
+
+/* This code is lifted from MacOnLinux. It is called whenever
+ * THRM1,2 or 3 is read an fixes up the values in such a way
+ * that will make MacOS not hang. These registers exist on some
+ * 75x and 74xx processors.
+ */
+void helper_fixup_thrm(CPUPPCState *env)
+{
+ target_ulong v, t;
+ int i;
+
+#define THRM1_TIN (1 << 31)
+#define THRM1_TIV (1 << 30)
+#define THRM1_THRES(x) (((x) & 0x7f) << 23)
+#define THRM1_TID (1 << 2)
+#define THRM1_TIE (1 << 1)
+#define THRM1_V (1 << 0)
+#define THRM3_E (1 << 0)
+
+ if (!(env->spr[SPR_THRM3] & THRM3_E)) {
+ return;
+ }
+
+ /* Note: Thermal interrupts are unimplemented */
+ for (i = SPR_THRM1; i <= SPR_THRM2; i++) {
+ v = env->spr[i];
+ if (!(v & THRM1_V)) {
+ continue;
+ }
+ v |= THRM1_TIV;
+ v &= ~THRM1_TIN;
+ t = v & THRM1_THRES(127);
+ if ((v & THRM1_TID) && t < THRM1_THRES(24)) {
+ v |= THRM1_TIN;
+ }
+ if (!(v & THRM1_TID) && t > THRM1_THRES(24)) {
+ v |= THRM1_TIN;
+ }
+ env->spr[i] = v;
+ }
+}
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
new file mode 100644
index 0000000000..29bace622a
--- /dev/null
+++ b/target/ppc/mmu-hash32.c
@@ -0,0 +1,568 @@
+/*
+ * PowerPC MMU, TLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (c) 2013 David Gibson, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "mmu-hash32.h"
+#include "exec/log.h"
+
+//#define DEBUG_BAT
+
+#ifdef DEBUG_BATS
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_BATS(...) do { } while (0)
+#endif
+
+struct mmu_ctx_hash32 {
+ hwaddr raddr; /* Real address */
+ int prot; /* Protection bits */
+ int key; /* Access key */
+};
+
+static int ppc_hash32_pp_prot(int key, int pp, int nx)
+{
+ int prot;
+
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
+
+ case 0x3:
+ prot = PAGE_READ;
+ break;
+
+ default:
+ abort();
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ prot = 0;
+ break;
+
+ case 0x1:
+ case 0x3:
+ prot = PAGE_READ;
+ break;
+
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
+
+ default:
+ abort();
+ }
+ }
+ if (nx == 0) {
+ prot |= PAGE_EXEC;
+ }
+
+ return prot;
+}
+
+static int ppc_hash32_pte_prot(PowerPCCPU *cpu,
+ target_ulong sr, ppc_hash_pte32_t pte)
+{
+ CPUPPCState *env = &cpu->env;
+ unsigned pp, key;
+
+ key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
+ pp = pte.pte1 & HPTE32_R_PP;
+
+ return ppc_hash32_pp_prot(key, pp, !!(sr & SR32_NX));
+}
+
+static target_ulong hash32_bat_size(PowerPCCPU *cpu,
+ target_ulong batu, target_ulong batl)
+{
+ CPUPPCState *env = &cpu->env;
+
+ if ((msr_pr && !(batu & BATU32_VP))
+ || (!msr_pr && !(batu & BATU32_VS))) {
+ return 0;
+ }
+
+ return BATU32_BEPI & ~((batu & BATU32_BL) << 15);
+}
+
+static int hash32_bat_prot(PowerPCCPU *cpu,
+ target_ulong batu, target_ulong batl)
+{
+ int pp, prot;
+
+ prot = 0;
+ pp = batl & BATL32_PP;
+ if (pp != 0) {
+ prot = PAGE_READ | PAGE_EXEC;
+ if (pp == 0x2) {
+ prot |= PAGE_WRITE;
+ }
+ }
+ return prot;
+}
+
+static target_ulong hash32_bat_601_size(PowerPCCPU *cpu,
+ target_ulong batu, target_ulong batl)
+{
+ if (!(batl & BATL32_601_V)) {
+ return 0;
+ }
+
+ return BATU32_BEPI & ~((batl & BATL32_601_BL) << 17);
+}
+
+static int hash32_bat_601_prot(PowerPCCPU *cpu,
+ target_ulong batu, target_ulong batl)
+{
+ CPUPPCState *env = &cpu->env;
+ int key, pp;
+
+ pp = batu & BATU32_601_PP;
+ if (msr_pr == 0) {
+ key = !!(batu & BATU32_601_KS);
+ } else {
+ key = !!(batu & BATU32_601_KP);
+ }
+ return ppc_hash32_pp_prot(key, pp, 0);
+}
+
+static hwaddr ppc_hash32_bat_lookup(PowerPCCPU *cpu, target_ulong ea, int rwx,
+ int *prot)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong *BATlt, *BATut;
+ int i;
+
+ LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ rwx == 2 ? 'I' : 'D', ea);
+ if (rwx == 2) {
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ } else {
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ }
+ for (i = 0; i < env->nb_BATs; i++) {
+ target_ulong batu = BATut[i];
+ target_ulong batl = BATlt[i];
+ target_ulong mask;
+
+ if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
+ mask = hash32_bat_601_size(cpu, batu, batl);
+ } else {
+ mask = hash32_bat_size(cpu, batu, batl);
+ }
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n", __func__,
+ type == ACCESS_CODE ? 'I' : 'D', i, ea, batu, batl);
+
+ if (mask && ((ea & mask) == (batu & BATU32_BEPI))) {
+ hwaddr raddr = (batl & mask) | (ea & ~mask);
+
+ if (unlikely(env->mmu_model == POWERPC_MMU_601)) {
+ *prot = hash32_bat_601_prot(cpu, batu, batl);
+ } else {
+ *prot = hash32_bat_prot(cpu, batu, batl);
+ }
+
+ return raddr & TARGET_PAGE_MASK;
+ }
+ }
+
+ /* No hit */
+#if defined(DEBUG_BATS)
+ if (qemu_log_enabled()) {
+ LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", ea);
+ for (i = 0; i < 4; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & BATU32_BEPIU;
+ BEPIl = *BATu & BATU32_BEPIL;
+ bl = (*BATu & 0x00001FFC) << 15;
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, type == ACCESS_CODE ? 'I' : 'D', i, ea,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+ }
+#endif
+
+ return -1;
+}
+
+static int ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr,
+ target_ulong eaddr, int rwx,
+ hwaddr *raddr, int *prot)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ int key = !!(msr_pr ? (sr & SR32_KP) : (sr & SR32_KS));
+
+ qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
+
+ if ((sr & 0x1FF00000) >> 20 == 0x07f) {
+ /* Memory-forced I/O controller interface access */
+ /* If T=1 and BUID=x'07F', the 601 performs a memory access
+ * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
+ */
+ *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ if (rwx == 2) {
+ /* No code fetch is allowed in direct-store areas */
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ return 1;
+ }
+
+ switch (env->access_type) {
+ case ACCESS_INT:
+ /* Integer load/store : only access allowed */
+ break;
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = POWERPC_EXCP_ALIGN_FP;
+ env->spr[SPR_DAR] = eaddr;
+ return 1;
+ case ACCESS_RES:
+ /* lwarx, ldarx or srwcx. */
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (rwx == 1) {
+ env->spr[SPR_DSISR] = 0x06000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04000000;
+ }
+ return 1;
+ case ACCESS_CACHE:
+ /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
+ /* Should make the instruction do no-op.
+ * As it already do no-op, it's quite easy :-)
+ */
+ *raddr = eaddr;
+ return 0;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (rwx == 1) {
+ env->spr[SPR_DSISR] = 0x06100000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04100000;
+ }
+ return 1;
+ default:
+ cpu_abort(cs, "ERROR: instruction should not need "
+ "address translation\n");
+ }
+ if ((rwx == 1 || key != 1) && (rwx == 0 || key != 0)) {
+ *raddr = eaddr;
+ return 0;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (rwx == 1) {
+ env->spr[SPR_DSISR] = 0x0a000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ return 1;
+ }
+}
+
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash)
+{
+ CPUPPCState *env = &cpu->env;
+
+ return (hash * HASH_PTEG_SIZE_32) & env->htab_mask;
+}
+
+static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
+ bool secondary, target_ulong ptem,
+ ppc_hash_pte32_t *pte)
+{
+ hwaddr pte_offset = pteg_off;
+ target_ulong pte0, pte1;
+ int i;
+
+ for (i = 0; i < HPTES_PER_GROUP; i++) {
+ pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
+ pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
+
+ if ((pte0 & HPTE32_V_VALID)
+ && (secondary == !!(pte0 & HPTE32_V_SECONDARY))
+ && HPTE32_V_COMPARE(pte0, ptem)) {
+ pte->pte0 = pte0;
+ pte->pte1 = pte1;
+ return pte_offset;
+ }
+
+ pte_offset += HASH_PTE_SIZE_32;
+ }
+
+ return -1;
+}
+
+static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu,
+ target_ulong sr, target_ulong eaddr,
+ ppc_hash_pte32_t *pte)
+{
+ CPUPPCState *env = &cpu->env;
+ hwaddr pteg_off, pte_offset;
+ hwaddr hash;
+ uint32_t vsid, pgidx, ptem;
+
+ vsid = sr & SR32_VSID;
+ pgidx = (eaddr & ~SEGMENT_MASK_256M) >> TARGET_PAGE_BITS;
+ hash = vsid ^ pgidx;
+ ptem = (vsid << 7) | (pgidx >> 10);
+
+ /* Page address translation */
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
+ " htab_mask " TARGET_FMT_plx
+ " hash " TARGET_FMT_plx "\n",
+ env->htab_base, env->htab_mask, hash);
+
+ /* Primary PTEG lookup */
+ qemu_log_mask(CPU_LOG_MMU, "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=%" PRIx32 " ptem=%" PRIx32
+ " hash=" TARGET_FMT_plx "\n",
+ env->htab_base, env->htab_mask, vsid, ptem, hash);
+ pteg_off = get_pteg_offset32(cpu, hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 0, ptem, pte);
+ if (pte_offset == -1) {
+ /* Secondary PTEG lookup */
+ qemu_log_mask(CPU_LOG_MMU, "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=%" PRIx32 " api=%" PRIx32
+ " hash=" TARGET_FMT_plx "\n", env->htab_base,
+ env->htab_mask, vsid, ptem, ~hash);
+ pteg_off = get_pteg_offset32(cpu, ~hash);
+ pte_offset = ppc_hash32_pteg_search(cpu, pteg_off, 1, ptem, pte);
+ }
+
+ return pte_offset;
+}
+
+static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
+ target_ulong eaddr)
+{
+ hwaddr rpn = pte.pte1 & HPTE32_R_RPN;
+ hwaddr mask = ~TARGET_PAGE_MASK;
+
+ return (rpn & ~mask) | (eaddr & mask);
+}
+
+int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
+ int mmu_idx)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong sr;
+ hwaddr pte_offset;
+ ppc_hash_pte32_t pte;
+ int prot;
+ uint32_t new_pte1;
+ const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
+ hwaddr raddr;
+
+ assert((rwx == 0) || (rwx == 1) || (rwx == 2));
+
+ /* 1. Handle real mode accesses */
+ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
+ /* Translation is off */
+ raddr = eaddr;
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
+ TARGET_PAGE_SIZE);
+ return 0;
+ }
+
+ /* 2. Check Block Address Translation entries (BATs) */
+ if (env->nb_BATs != 0) {
+ raddr = ppc_hash32_bat_lookup(cpu, eaddr, rwx, &prot);
+ if (raddr != -1) {
+ if (need_prot[rwx] & ~prot) {
+ if (rwx == 2) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (rwx == 1) {
+ env->spr[SPR_DSISR] = 0x0a000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ return 1;
+ }
+
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
+ raddr & TARGET_PAGE_MASK, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
+ return 0;
+ }
+ }
+
+ /* 3. Look up the Segment Register */
+ sr = env->sr[eaddr >> 28];
+
+ /* 4. Handle direct store segments */
+ if (sr & SR32_T) {
+ if (ppc_hash32_direct_store(cpu, sr, eaddr, rwx,
+ &raddr, &prot) == 0) {
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK,
+ raddr & TARGET_PAGE_MASK, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
+ return 0;
+ } else {
+ return 1;
+ }
+ }
+
+ /* 5. Check for segment level no-execute violation */
+ if ((rwx == 2) && (sr & SR32_NX)) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ return 1;
+ }
+
+ /* 6. Locate the PTE in the hash table */
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
+ if (pte_offset == -1) {
+ if (rwx == 2) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x40000000;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (rwx == 1) {
+ env->spr[SPR_DSISR] = 0x42000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x40000000;
+ }
+ }
+
+ return 1;
+ }
+ qemu_log_mask(CPU_LOG_MMU,
+ "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
+
+ /* 7. Check access permissions */
+
+ prot = ppc_hash32_pte_prot(cpu, sr, pte);
+
+ if (need_prot[rwx] & ~prot) {
+ /* Access right violation */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ if (rwx == 2) {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ if (rwx == 1) {
+ env->spr[SPR_DSISR] = 0x0a000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ return 1;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
+
+ /* 8. Update PTE referenced and changed bits if necessary */
+
+ new_pte1 = pte.pte1 | HPTE32_R_R; /* set referenced bit */
+ if (rwx == 1) {
+ new_pte1 |= HPTE32_R_C; /* set changed (dirty) bit */
+ } else {
+ /* Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit */
+ prot &= ~PAGE_WRITE;
+ }
+
+ if (new_pte1 != pte.pte1) {
+ ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1);
+ }
+
+ /* 9. Determine the real address from the PTE */
+
+ raddr = ppc_hash32_pte_raddr(sr, pte, eaddr);
+
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ prot, mmu_idx, TARGET_PAGE_SIZE);
+
+ return 0;
+}
+
+hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong sr;
+ hwaddr pte_offset;
+ ppc_hash_pte32_t pte;
+ int prot;
+
+ if (msr_dr == 0) {
+ /* Translation is off */
+ return eaddr;
+ }
+
+ if (env->nb_BATs != 0) {
+ hwaddr raddr = ppc_hash32_bat_lookup(cpu, eaddr, 0, &prot);
+ if (raddr != -1) {
+ return raddr;
+ }
+ }
+
+ sr = env->sr[eaddr >> 28];
+
+ if (sr & SR32_T) {
+ /* FIXME: Add suitable debug support for Direct Store segments */
+ return -1;
+ }
+
+ pte_offset = ppc_hash32_htab_lookup(cpu, sr, eaddr, &pte);
+ if (pte_offset == -1) {
+ return -1;
+ }
+
+ return ppc_hash32_pte_raddr(sr, pte, eaddr) & TARGET_PAGE_MASK;
+}
diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h
new file mode 100644
index 0000000000..5b9fb08d1a
--- /dev/null
+++ b/target/ppc/mmu-hash32.h
@@ -0,0 +1,112 @@
+#ifndef MMU_HASH32_H
+#define MMU_HASH32_H
+
+#ifndef CONFIG_USER_ONLY
+
+hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
+hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
+int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+/*
+ * Segment register definitions
+ */
+
+#define SR32_T 0x80000000
+#define SR32_KS 0x40000000
+#define SR32_KP 0x20000000
+#define SR32_NX 0x10000000
+#define SR32_VSID 0x00ffffff
+
+/*
+ * Block Address Translation (BAT) definitions
+ */
+
+#define BATU32_BEPI 0xfffe0000
+#define BATU32_BL 0x00001ffc
+#define BATU32_VS 0x00000002
+#define BATU32_VP 0x00000001
+
+
+#define BATL32_BRPN 0xfffe0000
+#define BATL32_WIMG 0x00000078
+#define BATL32_PP 0x00000003
+
+/* PowerPC 601 has slightly different BAT registers */
+
+#define BATU32_601_KS 0x00000008
+#define BATU32_601_KP 0x00000004
+#define BATU32_601_PP 0x00000003
+
+#define BATL32_601_V 0x00000040
+#define BATL32_601_BL 0x0000003f
+
+/*
+ * Hash page table definitions
+ */
+
+#define HPTES_PER_GROUP 8
+#define HASH_PTE_SIZE_32 8
+#define HASH_PTEG_SIZE_32 (HASH_PTE_SIZE_32 * HPTES_PER_GROUP)
+
+#define HPTE32_V_VALID 0x80000000
+#define HPTE32_V_VSID 0x7fffff80
+#define HPTE32_V_SECONDARY 0x00000040
+#define HPTE32_V_API 0x0000003f
+#define HPTE32_V_COMPARE(x, y) (!(((x) ^ (y)) & 0x7fffffbf))
+
+#define HPTE32_R_RPN 0xfffff000
+#define HPTE32_R_R 0x00000100
+#define HPTE32_R_C 0x00000080
+#define HPTE32_R_W 0x00000040
+#define HPTE32_R_I 0x00000020
+#define HPTE32_R_M 0x00000010
+#define HPTE32_R_G 0x00000008
+#define HPTE32_R_WIMG 0x00000078
+#define HPTE32_R_PP 0x00000003
+
+static inline target_ulong ppc_hash32_load_hpte0(PowerPCCPU *cpu,
+ hwaddr pte_offset)
+{
+ CPUPPCState *env = &cpu->env;
+
+ assert(!env->external_htab); /* Not supported on 32-bit for now */
+ return ldl_phys(CPU(cpu)->as, env->htab_base + pte_offset);
+}
+
+static inline target_ulong ppc_hash32_load_hpte1(PowerPCCPU *cpu,
+ hwaddr pte_offset)
+{
+ CPUPPCState *env = &cpu->env;
+
+ assert(!env->external_htab); /* Not supported on 32-bit for now */
+ return ldl_phys(CPU(cpu)->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2);
+}
+
+static inline void ppc_hash32_store_hpte0(PowerPCCPU *cpu,
+ hwaddr pte_offset, target_ulong pte0)
+{
+ CPUPPCState *env = &cpu->env;
+
+ assert(!env->external_htab); /* Not supported on 32-bit for now */
+ stl_phys(CPU(cpu)->as, env->htab_base + pte_offset, pte0);
+}
+
+static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu,
+ hwaddr pte_offset, target_ulong pte1)
+{
+ CPUPPCState *env = &cpu->env;
+
+ assert(!env->external_htab); /* Not supported on 32-bit for now */
+ stl_phys(CPU(cpu)->as,
+ env->htab_base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1);
+}
+
+typedef struct {
+ uint32_t pte0, pte1;
+} ppc_hash_pte32_t;
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif /* MMU_HASH32_H */
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
new file mode 100644
index 0000000000..fdb7a787bf
--- /dev/null
+++ b/target/ppc/mmu-hash64.c
@@ -0,0 +1,1059 @@
+/*
+ * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (c) 2013 David Gibson, IBM Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/error-report.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "mmu-hash64.h"
+#include "exec/log.h"
+
+//#define DEBUG_SLB
+
+#ifdef DEBUG_SLB
+# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_SLB(...) do { } while (0)
+#endif
+
+/*
+ * Used to indicate that a CPU has its hash page table (HPT) managed
+ * within the host kernel
+ */
+#define MMU_HASH64_KVM_MANAGED_HPT ((void *)-1)
+
+/*
+ * SLB handling
+ */
+
+static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
+{
+ CPUPPCState *env = &cpu->env;
+ uint64_t esid_256M, esid_1T;
+ int n;
+
+ LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
+
+ esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
+ esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
+
+ for (n = 0; n < env->slb_nr; n++) {
+ ppc_slb_t *slb = &env->slb[n];
+
+ LOG_SLB("%s: slot %d %016" PRIx64 " %016"
+ PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
+ /* We check for 1T matches on all MMUs here - if the MMU
+ * doesn't have 1T segment support, we will have prevented 1T
+ * entries from being inserted in the slbmte code. */
+ if (((slb->esid == esid_256M) &&
+ ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
+ || ((slb->esid == esid_1T) &&
+ ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
+ return slb;
+ }
+ }
+
+ return NULL;
+}
+
+void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+ uint64_t slbe, slbv;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
+ for (i = 0; i < env->slb_nr; i++) {
+ slbe = env->slb[i].esid;
+ slbv = env->slb[i].vsid;
+ if (slbe == 0 && slbv == 0) {
+ continue;
+ }
+ cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
+ i, slbe, slbv);
+ }
+}
+
+void helper_slbia(CPUPPCState *env)
+{
+ int n;
+
+ /* XXX: Warning: slbia never invalidates the first segment */
+ for (n = 1; n < env->slb_nr; n++) {
+ ppc_slb_t *slb = &env->slb[n];
+
+ if (slb->esid & SLB_ESID_V) {
+ slb->esid &= ~SLB_ESID_V;
+ /* XXX: given the fact that segment size is 256 MB or 1TB,
+ * and we still don't have a tlb_flush_mask(env, n, mask)
+ * in QEMU, we just invalidate all TLBs
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ }
+ }
+}
+
+void helper_slbie(CPUPPCState *env, target_ulong addr)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppc_slb_t *slb;
+
+ slb = slb_lookup(cpu, addr);
+ if (!slb) {
+ return;
+ }
+
+ if (slb->esid & SLB_ESID_V) {
+ slb->esid &= ~SLB_ESID_V;
+
+ /* XXX: given the fact that segment size is 256 MB or 1TB,
+ * and we still don't have a tlb_flush_mask(env, n, mask)
+ * in QEMU, we just invalidate all TLBs
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ }
+}
+
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t *slb = &env->slb[slot];
+ const struct ppc_one_seg_page_size *sps = NULL;
+ int i;
+
+ if (slot >= env->slb_nr) {
+ return -1; /* Bad slot number */
+ }
+ if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
+ return -1; /* Reserved bits set */
+ }
+ if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
+ return -1; /* Bad segment size */
+ }
+ if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
+ return -1; /* 1T segment on MMU that doesn't support it */
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
+ " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
+ slot, esid, vsid);
+ return -1;
+ }
+
+ slb->esid = esid;
+ slb->vsid = vsid;
+ slb->sps = sps;
+
+ LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
+ " %016" PRIx64 "\n", __func__, slot, esid, vsid,
+ slb->esid, slb->vsid);
+
+ return 0;
+}
+
+static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
+ target_ulong *rt)
+{
+ CPUPPCState *env = &cpu->env;
+ int slot = rb & 0xfff;
+ ppc_slb_t *slb = &env->slb[slot];
+
+ if (slot >= env->slb_nr) {
+ return -1;
+ }
+
+ *rt = slb->esid;
+ return 0;
+}
+
+static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
+ target_ulong *rt)
+{
+ CPUPPCState *env = &cpu->env;
+ int slot = rb & 0xfff;
+ ppc_slb_t *slb = &env->slb[slot];
+
+ if (slot >= env->slb_nr) {
+ return -1;
+ }
+
+ *rt = slb->vsid;
+ return 0;
+}
+
+static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
+ target_ulong *rt)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t *slb;
+
+ if (!msr_is_64bit(env, env->msr)) {
+ rb &= 0xffffffff;
+ }
+ slb = slb_lookup(cpu, rb);
+ if (slb == NULL) {
+ *rt = (target_ulong)-1ul;
+ } else {
+ *rt = slb->vsid;
+ }
+ return 0;
+}
+
+void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+}
+
+target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ target_ulong rt = 0;
+
+ if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+ return rt;
+}
+
+target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ target_ulong rt = 0;
+
+ if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+ return rt;
+}
+
+target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ target_ulong rt = 0;
+
+ if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL, GETPC());
+ }
+ return rt;
+}
+
+/*
+ * 64-bit hash table MMU handling
+ */
+void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
+ Error **errp)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong htabsize = value & SDR_64_HTABSIZE;
+
+ env->spr[SPR_SDR1] = value;
+ if (htabsize > 28) {
+ error_setg(errp,
+ "Invalid HTABSIZE 0x" TARGET_FMT_lx" stored in SDR1",
+ htabsize);
+ htabsize = 28;
+ }
+ env->htab_mask = (1ULL << (htabsize + 18 - 7)) - 1;
+ env->htab_base = value & SDR_64_HTABORG;
+}
+
+void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
+ Error **errp)
+{
+ CPUPPCState *env = &cpu->env;
+ Error *local_err = NULL;
+
+ if (hpt) {
+ env->external_htab = hpt;
+ } else {
+ env->external_htab = MMU_HASH64_KVM_MANAGED_HPT;
+ }
+ ppc_hash64_set_sdr1(cpu, (target_ulong)(uintptr_t)hpt | (shift - 18),
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* Not strictly necessary, but makes it clearer that an external
+ * htab is in use when debugging */
+ env->htab_base = -1;
+
+ if (kvm_enabled()) {
+ if (kvmppc_put_books_sregs(cpu) < 0) {
+ error_setg(errp, "Unable to update SDR1 in KVM");
+ }
+ }
+}
+
+static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
+ ppc_slb_t *slb, ppc_hash_pte64_t pte)
+{
+ CPUPPCState *env = &cpu->env;
+ unsigned pp, key;
+ /* Some pp bit combinations have undefined behaviour, so default
+ * to no access in those cases */
+ int prot = 0;
+
+ key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
+ : (slb->vsid & SLB_VSID_KS));
+ pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
+
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
+
+ case 0x3:
+ case 0x6:
+ prot = PAGE_READ;
+ break;
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ case 0x6:
+ prot = 0;
+ break;
+
+ case 0x1:
+ case 0x3:
+ prot = PAGE_READ;
+ break;
+
+ case 0x2:
+ prot = PAGE_READ | PAGE_WRITE;
+ break;
+ }
+ }
+
+ /* No execute if either noexec or guarded bits set */
+ if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
+ || (slb->vsid & SLB_VSID_N)) {
+ prot |= PAGE_EXEC;
+ }
+
+ return prot;
+}
+
+static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
+{
+ CPUPPCState *env = &cpu->env;
+ int key, amrbits;
+ int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ /* Only recent MMUs implement Virtual Page Class Key Protection */
+ if (!(env->mmu_model & POWERPC_MMU_AMR)) {
+ return prot;
+ }
+
+ key = HPTE64_R_KEY(pte.pte1);
+ amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
+
+ /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
+ /* env->spr[SPR_AMR]); */
+
+ /*
+ * A store is permitted if the AMR bit is 0. Remove write
+ * protection if it is set.
+ */
+ if (amrbits & 0x2) {
+ prot &= ~PAGE_WRITE;
+ }
+ /*
+ * A load is permitted if the AMR bit is 0. Remove read
+ * protection if it is set.
+ */
+ if (amrbits & 0x1) {
+ prot &= ~PAGE_READ;
+ }
+
+ return prot;
+}
+
+uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
+{
+ uint64_t token = 0;
+ hwaddr pte_offset;
+
+ pte_offset = pte_index * HASH_PTE_SIZE_64;
+ if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
+ /*
+ * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
+ */
+ token = kvmppc_hash64_read_pteg(cpu, pte_index);
+ } else if (cpu->env.external_htab) {
+ /*
+ * HTAB is controlled by QEMU. Just point to the internally
+ * accessible PTEG.
+ */
+ token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
+ } else if (cpu->env.htab_base) {
+ token = cpu->env.htab_base + pte_offset;
+ }
+ return token;
+}
+
+void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
+{
+ if (cpu->env.external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
+ kvmppc_hash64_free_pteg(token);
+ }
+}
+
+static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
+ uint64_t pte0, uint64_t pte1)
+{
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ if (sps->page_shift != 12) {
+ /* 4kiB page in a non 4kiB segment */
+ return 0;
+ }
+ /* Normal 4kiB page */
+ return 12;
+ }
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_page_size *ps = &sps->enc[i];
+ uint64_t mask;
+
+ if (!ps->page_shift) {
+ break;
+ }
+
+ if (ps->page_shift == 12) {
+ /* L bit is set so this can't be a 4kiB page */
+ continue;
+ }
+
+ mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
+
+ if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
+ return ps->page_shift;
+ }
+ }
+
+ return 0; /* Bad page size encoding */
+}
+
+static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
+ const struct ppc_one_seg_page_size *sps,
+ target_ulong ptem,
+ ppc_hash_pte64_t *pte, unsigned *pshift)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+ uint64_t token;
+ target_ulong pte0, pte1;
+ target_ulong pte_index;
+
+ pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
+ token = ppc_hash64_start_access(cpu, pte_index);
+ if (!token) {
+ return -1;
+ }
+ for (i = 0; i < HPTES_PER_GROUP; i++) {
+ pte0 = ppc_hash64_load_hpte0(cpu, token, i);
+ pte1 = ppc_hash64_load_hpte1(cpu, token, i);
+
+ /* This compares V, B, H (secondary) and the AVPN */
+ if (HPTE64_V_COMPARE(pte0, ptem)) {
+ *pshift = hpte_page_shift(sps, pte0, pte1);
+ /*
+ * If there is no match, ignore the PTE, it could simply
+ * be for a different segment size encoding and the
+ * architecture specifies we should not match. Linux will
+ * potentially leave behind PTEs for the wrong base page
+ * size when demoting segments.
+ */
+ if (*pshift == 0) {
+ continue;
+ }
+ /* We don't do anything with pshift yet as qemu TLB only deals
+ * with 4K pages anyway
+ */
+ pte->pte0 = pte0;
+ pte->pte1 = pte1;
+ ppc_hash64_stop_access(cpu, token);
+ return (pte_index + i) * HASH_PTE_SIZE_64;
+ }
+ }
+ ppc_hash64_stop_access(cpu, token);
+ /*
+ * We didn't find a valid entry.
+ */
+ return -1;
+}
+
+static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
+ ppc_slb_t *slb, target_ulong eaddr,
+ ppc_hash_pte64_t *pte, unsigned *pshift)
+{
+ CPUPPCState *env = &cpu->env;
+ hwaddr pte_offset;
+ hwaddr hash;
+ uint64_t vsid, epnmask, epn, ptem;
+ const struct ppc_one_seg_page_size *sps = slb->sps;
+
+ /* The SLB store path should prevent any bad page size encodings
+ * getting in there, so: */
+ assert(sps);
+
+ /* If ISL is set in LPCR we need to clamp the page size to 4K */
+ if (env->spr[SPR_LPCR] & LPCR_ISL) {
+ /* We assume that when using TCG, 4k is first entry of SPS */
+ sps = &env->sps.sps[0];
+ assert(sps->page_shift == 12);
+ }
+
+ epnmask = ~((1ULL << sps->page_shift) - 1);
+
+ if (slb->vsid & SLB_VSID_B) {
+ /* 1TB segment */
+ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
+ epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
+ hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
+ } else {
+ /* 256M segment */
+ vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
+ epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
+ hash = vsid ^ (epn >> sps->page_shift);
+ }
+ ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
+ ptem |= HPTE64_V_VALID;
+
+ /* Page address translation */
+ qemu_log_mask(CPU_LOG_MMU,
+ "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
+ " hash " TARGET_FMT_plx "\n",
+ env->htab_base, env->htab_mask, hash);
+
+ /* Primary PTEG lookup */
+ qemu_log_mask(CPU_LOG_MMU,
+ "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
+ " hash=" TARGET_FMT_plx "\n",
+ env->htab_base, env->htab_mask, vsid, ptem, hash);
+ pte_offset = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
+
+ if (pte_offset == -1) {
+ /* Secondary PTEG lookup */
+ ptem |= HPTE64_V_SECONDARY;
+ qemu_log_mask(CPU_LOG_MMU,
+ "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
+ " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
+ " hash=" TARGET_FMT_plx "\n", env->htab_base,
+ env->htab_mask, vsid, ptem, ~hash);
+
+ pte_offset = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
+ }
+
+ return pte_offset;
+}
+
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1)
+{
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ if (!(pte0 & HPTE64_V_LARGE)) {
+ return 12;
+ }
+
+ /*
+ * The encodings in env->sps need to be carefully chosen so that
+ * this gives an unambiguous result.
+ */
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps = &env->sps.sps[i];
+ unsigned shift;
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ shift = hpte_page_shift(sps, pte0, pte1);
+ if (shift) {
+ return shift;
+ }
+ }
+
+ return 0;
+}
+
+static void ppc_hash64_set_isi(CPUState *cs, CPUPPCState *env,
+ uint64_t error_code)
+{
+ bool vpm;
+
+ if (msr_ir) {
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
+ } else {
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ }
+ if (vpm && !msr_hv) {
+ cs->exception_index = POWERPC_EXCP_HISI;
+ } else {
+ cs->exception_index = POWERPC_EXCP_ISI;
+ }
+ env->error_code = error_code;
+}
+
+static void ppc_hash64_set_dsi(CPUState *cs, CPUPPCState *env, uint64_t dar,
+ uint64_t dsisr)
+{
+ bool vpm;
+
+ if (msr_dr) {
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
+ } else {
+ vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ }
+ if (vpm && !msr_hv) {
+ cs->exception_index = POWERPC_EXCP_HDSI;
+ env->spr[SPR_HDAR] = dar;
+ env->spr[SPR_HDSISR] = dsisr;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->spr[SPR_DAR] = dar;
+ env->spr[SPR_DSISR] = dsisr;
+ }
+ env->error_code = 0;
+}
+
+
+int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
+ int rwx, int mmu_idx)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t *slb;
+ unsigned apshift;
+ hwaddr pte_offset;
+ ppc_hash_pte64_t pte;
+ int pp_prot, amr_prot, prot;
+ uint64_t new_pte1, dsisr;
+ const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
+ hwaddr raddr;
+
+ assert((rwx == 0) || (rwx == 1) || (rwx == 2));
+
+ /* Note on LPCR usage: 970 uses HID4, but our special variant
+ * of store_spr copies relevant fields into env->spr[SPR_LPCR].
+ * Similarily we filter unimplemented bits when storing into
+ * LPCR depending on the MMU version. This code can thus just
+ * use the LPCR "as-is".
+ */
+
+ /* 1. Handle real mode accesses */
+ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
+ /* Translation is supposedly "off" */
+ /* In real mode the top 4 effective address bits are (mostly) ignored */
+ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
+
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if (msr_hv || !env->has_hv_mode) {
+ if (!(eaddr >> 63)) {
+ raddr |= env->spr[SPR_HRMOR];
+ }
+ } else {
+ /* Otherwise, check VPM for RMA vs VRMA */
+ if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+ slb = &env->vrma_slb;
+ if (slb->sps) {
+ goto skip_slb_search;
+ }
+ /* Not much else to do here */
+ cs->exception_index = POWERPC_EXCP_MCHECK;
+ env->error_code = 0;
+ return 1;
+ } else if (raddr < env->rmls) {
+ /* RMA. Check bounds in RMLS */
+ raddr |= env->spr[SPR_RMOR];
+ } else {
+ /* The access failed, generate the approriate interrupt */
+ if (rwx == 2) {
+ ppc_hash64_set_isi(cs, env, 0x08000000);
+ } else {
+ dsisr = 0x08000000;
+ if (rwx == 1) {
+ dsisr |= 0x02000000;
+ }
+ ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
+ }
+ return 1;
+ }
+ }
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
+ TARGET_PAGE_SIZE);
+ return 0;
+ }
+
+ /* 2. Translation is on, so look up the SLB */
+ slb = slb_lookup(cpu, eaddr);
+ if (!slb) {
+ if (rwx == 2) {
+ cs->exception_index = POWERPC_EXCP_ISEG;
+ env->error_code = 0;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DSEG;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = eaddr;
+ }
+ return 1;
+ }
+
+skip_slb_search:
+
+ /* 3. Check for segment level no-execute violation */
+ if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
+ ppc_hash64_set_isi(cs, env, 0x10000000);
+ return 1;
+ }
+
+ /* 4. Locate the PTE in the hash table */
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
+ if (pte_offset == -1) {
+ dsisr = 0x40000000;
+ if (rwx == 2) {
+ ppc_hash64_set_isi(cs, env, dsisr);
+ } else {
+ if (rwx == 1) {
+ dsisr |= 0x02000000;
+ }
+ ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
+ }
+ return 1;
+ }
+ qemu_log_mask(CPU_LOG_MMU,
+ "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
+
+ /* 5. Check access permissions */
+
+ pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
+ amr_prot = ppc_hash64_amr_prot(cpu, pte);
+ prot = pp_prot & amr_prot;
+
+ if ((need_prot[rwx] & ~prot) != 0) {
+ /* Access right violation */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ if (rwx == 2) {
+ ppc_hash64_set_isi(cs, env, 0x08000000);
+ } else {
+ dsisr = 0;
+ if (need_prot[rwx] & ~pp_prot) {
+ dsisr |= 0x08000000;
+ }
+ if (rwx == 1) {
+ dsisr |= 0x02000000;
+ }
+ if (need_prot[rwx] & ~amr_prot) {
+ dsisr |= 0x00200000;
+ }
+ ppc_hash64_set_dsi(cs, env, eaddr, dsisr);
+ }
+ return 1;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
+
+ /* 6. Update PTE referenced and changed bits if necessary */
+
+ new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
+ if (rwx == 1) {
+ new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
+ } else {
+ /* Treat the page as read-only for now, so that a later write
+ * will pass through this function again to set the C bit */
+ prot &= ~PAGE_WRITE;
+ }
+
+ if (new_pte1 != pte.pte1) {
+ ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
+ pte.pte0, new_pte1);
+ }
+
+ /* 7. Determine the real address from the PTE */
+
+ raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
+
+ tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
+ prot, mmu_idx, 1ULL << apshift);
+
+ return 0;
+}
+
+hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_slb_t *slb;
+ hwaddr pte_offset, raddr;
+ ppc_hash_pte64_t pte;
+ unsigned apshift;
+
+ /* Handle real mode */
+ if (msr_dr == 0) {
+ /* In real mode the top 4 effective address bits are ignored */
+ raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
+
+ /* In HV mode, add HRMOR if top EA bit is clear */
+ if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
+ return raddr | env->spr[SPR_HRMOR];
+ }
+
+ /* Otherwise, check VPM for RMA vs VRMA */
+ if (env->spr[SPR_LPCR] & LPCR_VPM0) {
+ slb = &env->vrma_slb;
+ if (!slb->sps) {
+ return -1;
+ }
+ } else if (raddr < env->rmls) {
+ /* RMA. Check bounds in RMLS */
+ return raddr | env->spr[SPR_RMOR];
+ } else {
+ return -1;
+ }
+ } else {
+ slb = slb_lookup(cpu, addr);
+ if (!slb) {
+ return -1;
+ }
+ }
+
+ pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
+ if (pte_offset == -1) {
+ return -1;
+ }
+
+ return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
+ & TARGET_PAGE_MASK;
+}
+
+void ppc_hash64_store_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ CPUPPCState *env = &cpu->env;
+
+ if (env->external_htab == MMU_HASH64_KVM_MANAGED_HPT) {
+ kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
+ return;
+ }
+
+ pte_index *= HASH_PTE_SIZE_64;
+ if (env->external_htab) {
+ stq_p(env->external_htab + pte_index, pte0);
+ stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
+ } else {
+ stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
+ stq_phys(CPU(cpu)->as,
+ env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
+ }
+}
+
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1)
+{
+ /*
+ * XXX: given the fact that there are too many segments to
+ * invalidate, and we still don't have a tlb_flush_mask(env, n,
+ * mask) in QEMU, we just invalidate all TLBs
+ */
+ cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
+}
+
+void ppc_hash64_update_rmls(CPUPPCState *env)
+{
+ uint64_t lpcr = env->spr[SPR_LPCR];
+
+ /*
+ * This is the full 4 bits encoding of POWER8. Previous
+ * CPUs only support a subset of these but the filtering
+ * is done when writing LPCR
+ */
+ switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
+ case 0x8: /* 32MB */
+ env->rmls = 0x2000000ull;
+ break;
+ case 0x3: /* 64MB */
+ env->rmls = 0x4000000ull;
+ break;
+ case 0x7: /* 128MB */
+ env->rmls = 0x8000000ull;
+ break;
+ case 0x4: /* 256MB */
+ env->rmls = 0x10000000ull;
+ break;
+ case 0x2: /* 1GB */
+ env->rmls = 0x40000000ull;
+ break;
+ case 0x1: /* 16GB */
+ env->rmls = 0x400000000ull;
+ break;
+ default:
+ /* What to do here ??? */
+ env->rmls = 0;
+ }
+}
+
+void ppc_hash64_update_vrma(CPUPPCState *env)
+{
+ const struct ppc_one_seg_page_size *sps = NULL;
+ target_ulong esid, vsid, lpcr;
+ ppc_slb_t *slb = &env->vrma_slb;
+ uint32_t vrmasd;
+ int i;
+
+ /* First clear it */
+ slb->esid = slb->vsid = 0;
+ slb->sps = NULL;
+
+ /* Is VRMA enabled ? */
+ lpcr = env->spr[SPR_LPCR];
+ if (!(lpcr & LPCR_VPM0)) {
+ return;
+ }
+
+ /* Make one up. Mostly ignore the ESID which will not be
+ * needed for translation
+ */
+ vsid = SLB_VSID_VRMA;
+ vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+ vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
+ esid = SLB_ESID_V;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];
+
+ if (!sps1->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
+ sps = sps1;
+ break;
+ }
+ }
+
+ if (!sps) {
+ error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
+ " vsid 0x"TARGET_FMT_lx, esid, vsid);
+ return;
+ }
+
+ slb->vsid = vsid;
+ slb->esid = esid;
+ slb->sps = sps;
+}
+
+void helper_store_lpcr(CPUPPCState *env, target_ulong val)
+{
+ uint64_t lpcr = 0;
+
+ /* Filter out bits */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_64B: /* 970 */
+ if (val & 0x40) {
+ lpcr |= LPCR_LPES0;
+ }
+ if (val & 0x8000000000000000ull) {
+ lpcr |= LPCR_LPES1;
+ }
+ if (val & 0x20) {
+ lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
+ }
+ if (val & 0x4000000000000000ull) {
+ lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
+ }
+ if (val & 0x2000000000000000ull) {
+ lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
+ }
+ env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
+
+ /* XXX We could also write LPID from HID4 here
+ * but since we don't tag any translation on it
+ * it doesn't actually matter
+ */
+ /* XXX For proper emulation of 970 we also need
+ * to dig HRMOR out of HID5
+ */
+ break;
+ case POWERPC_MMU_2_03: /* P5p */
+ lpcr = val & (LPCR_RMLS | LPCR_ILE |
+ LPCR_LPES0 | LPCR_LPES1 |
+ LPCR_RMI | LPCR_HDICE);
+ break;
+ case POWERPC_MMU_2_06: /* P7 */
+ lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
+ LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
+ LPCR_MER | LPCR_TC |
+ LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
+ break;
+ case POWERPC_MMU_2_07: /* P8 */
+ lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
+ LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
+ LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
+ LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
+ break;
+ default:
+ ;
+ }
+ env->spr[SPR_LPCR] = lpcr;
+ ppc_hash64_update_rmls(env);
+ ppc_hash64_update_vrma(env);
+}
diff --git a/target/ppc/mmu-hash64.h b/target/ppc/mmu-hash64.h
new file mode 100644
index 0000000000..ab5d347a53
--- /dev/null
+++ b/target/ppc/mmu-hash64.h
@@ -0,0 +1,136 @@
+#ifndef MMU_HASH64_H
+#define MMU_HASH64_H
+
+#ifndef CONFIG_USER_ONLY
+
+#ifdef TARGET_PPC64
+void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
+int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
+ target_ulong esid, target_ulong vsid);
+hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
+int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
+ int mmu_idx);
+void ppc_hash64_store_hpte(PowerPCCPU *cpu, target_ulong index,
+ target_ulong pte0, target_ulong pte1);
+void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
+ target_ulong pte_index,
+ target_ulong pte0, target_ulong pte1);
+unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
+ uint64_t pte0, uint64_t pte1);
+void ppc_hash64_update_vrma(CPUPPCState *env);
+void ppc_hash64_update_rmls(CPUPPCState *env);
+#endif
+
+/*
+ * SLB definitions
+ */
+
+/* Bits in the SLB ESID word */
+#define SLB_ESID_ESID 0xFFFFFFFFF0000000ULL
+#define SLB_ESID_V 0x0000000008000000ULL /* valid */
+
+/* Bits in the SLB VSID word */
+#define SLB_VSID_SHIFT 12
+#define SLB_VSID_SHIFT_1T 24
+#define SLB_VSID_SSIZE_SHIFT 62
+#define SLB_VSID_B 0xc000000000000000ULL
+#define SLB_VSID_B_256M 0x0000000000000000ULL
+#define SLB_VSID_B_1T 0x4000000000000000ULL
+#define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL
+#define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T)
+#define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID)
+#define SLB_VSID_KS 0x0000000000000800ULL
+#define SLB_VSID_KP 0x0000000000000400ULL
+#define SLB_VSID_N 0x0000000000000200ULL /* no-execute */
+#define SLB_VSID_L 0x0000000000000100ULL
+#define SLB_VSID_C 0x0000000000000080ULL /* class */
+#define SLB_VSID_LP 0x0000000000000030ULL
+#define SLB_VSID_ATTR 0x0000000000000FFFULL
+#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
+#define SLB_VSID_4K 0x0000000000000000ULL
+#define SLB_VSID_64K 0x0000000000000110ULL
+#define SLB_VSID_16M 0x0000000000000100ULL
+#define SLB_VSID_16G 0x0000000000000120ULL
+
+/*
+ * Hash page table definitions
+ */
+
+#define HPTES_PER_GROUP 8
+#define HASH_PTE_SIZE_64 16
+#define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
+
+#define HPTE64_V_SSIZE_SHIFT 62
+#define HPTE64_V_AVPN_SHIFT 7
+#define HPTE64_V_AVPN 0x3fffffffffffff80ULL
+#define HPTE64_V_AVPN_VAL(x) (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT)
+#define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff83ULL))
+#define HPTE64_V_LARGE 0x0000000000000004ULL
+#define HPTE64_V_SECONDARY 0x0000000000000002ULL
+#define HPTE64_V_VALID 0x0000000000000001ULL
+
+#define HPTE64_R_PP0 0x8000000000000000ULL
+#define HPTE64_R_TS 0x4000000000000000ULL
+#define HPTE64_R_KEY_HI 0x3000000000000000ULL
+#define HPTE64_R_RPN_SHIFT 12
+#define HPTE64_R_RPN 0x0ffffffffffff000ULL
+#define HPTE64_R_FLAGS 0x00000000000003ffULL
+#define HPTE64_R_PP 0x0000000000000003ULL
+#define HPTE64_R_N 0x0000000000000004ULL
+#define HPTE64_R_G 0x0000000000000008ULL
+#define HPTE64_R_M 0x0000000000000010ULL
+#define HPTE64_R_I 0x0000000000000020ULL
+#define HPTE64_R_W 0x0000000000000040ULL
+#define HPTE64_R_WIMG 0x0000000000000078ULL
+#define HPTE64_R_C 0x0000000000000080ULL
+#define HPTE64_R_R 0x0000000000000100ULL
+#define HPTE64_R_KEY_LO 0x0000000000000e00ULL
+#define HPTE64_R_KEY(x) ((((x) & HPTE64_R_KEY_HI) >> 60) | \
+ (((x) & HPTE64_R_KEY_LO) >> 9))
+
+#define HPTE64_V_1TB_SEG 0x4000000000000000ULL
+#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL
+
+void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
+ Error **errp);
+void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
+ Error **errp);
+
+uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index);
+void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token);
+
+static inline target_ulong ppc_hash64_load_hpte0(PowerPCCPU *cpu,
+ uint64_t token, int index)
+{
+ CPUPPCState *env = &cpu->env;
+ uint64_t addr;
+
+ addr = token + (index * HASH_PTE_SIZE_64);
+ if (env->external_htab) {
+ return ldq_p((const void *)(uintptr_t)addr);
+ } else {
+ return ldq_phys(CPU(cpu)->as, addr);
+ }
+}
+
+static inline target_ulong ppc_hash64_load_hpte1(PowerPCCPU *cpu,
+ uint64_t token, int index)
+{
+ CPUPPCState *env = &cpu->env;
+ uint64_t addr;
+
+ addr = token + (index * HASH_PTE_SIZE_64) + HASH_PTE_SIZE_64/2;
+ if (env->external_htab) {
+ return ldq_p((const void *)(uintptr_t)addr);
+ } else {
+ return ldq_phys(CPU(cpu)->as, addr);
+ }
+}
+
+typedef struct {
+ uint64_t pte0, pte1;
+} ppc_hash_pte64_t;
+
+#endif /* CONFIG_USER_ONLY */
+
+#endif /* MMU_HASH64_H */
diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c
new file mode 100644
index 0000000000..d09fc0a85f
--- /dev/null
+++ b/target/ppc/mmu_helper.c
@@ -0,0 +1,2907 @@
+/*
+ * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "mmu-hash64.h"
+#include "mmu-hash32.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/log.h"
+#include "helper_regs.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_BATS
+//#define DEBUG_SOFTWARE_TLB
+//#define DUMP_PAGE_TABLES
+//#define FLUSH_ALL_TLBS
+
+#ifdef DEBUG_MMU
+# define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0)
+#else
+# define LOG_MMU_STATE(cpu) do { } while (0)
+#endif
+
+#ifdef DEBUG_SOFTWARE_TLB
+# define LOG_SWTLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_SWTLB(...) do { } while (0)
+#endif
+
+#ifdef DEBUG_BATS
+# define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
+#else
+# define LOG_BATS(...) do { } while (0)
+#endif
+
+/*****************************************************************************/
+/* PowerPC MMU emulation */
+
+/* Context used internally during MMU translations */
+typedef struct mmu_ctx_t mmu_ctx_t;
+struct mmu_ctx_t {
+ hwaddr raddr; /* Real address */
+ hwaddr eaddr; /* Effective address */
+ int prot; /* Protection bits */
+ hwaddr hash[2]; /* Pagetable hash values */
+ target_ulong ptem; /* Virtual segment ID | API */
+ int key; /* Access key */
+ int nx; /* Non-execute area */
+};
+
+/* Common routines used by software and hardware TLBs emulation */
+static inline int pte_is_valid(target_ulong pte0)
+{
+ return pte0 & 0x80000000 ? 1 : 0;
+}
+
+static inline void pte_invalidate(target_ulong *pte0)
+{
+ *pte0 &= ~0x80000000;
+}
+
+#define PTE_PTEM_MASK 0x7FFFFFBF
+#define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B)
+
+static int pp_check(int key, int pp, int nx)
+{
+ int access;
+
+ /* Compute access rights */
+ access = 0;
+ if (key == 0) {
+ switch (pp) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ access |= PAGE_WRITE;
+ /* No break here */
+ case 0x3:
+ access |= PAGE_READ;
+ break;
+ }
+ } else {
+ switch (pp) {
+ case 0x0:
+ access = 0;
+ break;
+ case 0x1:
+ case 0x3:
+ access = PAGE_READ;
+ break;
+ case 0x2:
+ access = PAGE_READ | PAGE_WRITE;
+ break;
+ }
+ }
+ if (nx == 0) {
+ access |= PAGE_EXEC;
+ }
+
+ return access;
+}
+
+static int check_prot(int prot, int rw, int access_type)
+{
+ int ret;
+
+ if (access_type == ACCESS_CODE) {
+ if (prot & PAGE_EXEC) {
+ ret = 0;
+ } else {
+ ret = -2;
+ }
+ } else if (rw) {
+ if (prot & PAGE_WRITE) {
+ ret = 0;
+ } else {
+ ret = -2;
+ }
+ } else {
+ if (prot & PAGE_READ) {
+ ret = 0;
+ } else {
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0,
+ target_ulong pte1, int h, int rw, int type)
+{
+ target_ulong ptem, mmask;
+ int access, ret, pteh, ptev, pp;
+
+ ret = -1;
+ /* Check validity and table match */
+ ptev = pte_is_valid(pte0);
+ pteh = (pte0 >> 6) & 1;
+ if (ptev && h == pteh) {
+ /* Check vsid & api */
+ ptem = pte0 & PTE_PTEM_MASK;
+ mmask = PTE_CHECK_MASK;
+ pp = pte1 & 0x00000003;
+ if (ptem == ctx->ptem) {
+ if (ctx->raddr != (hwaddr)-1ULL) {
+ /* all matches should have equal RPN, WIMG & PP */
+ if ((ctx->raddr & mmask) != (pte1 & mmask)) {
+ qemu_log_mask(CPU_LOG_MMU, "Bad RPN/WIMG/PP\n");
+ return -3;
+ }
+ }
+ /* Compute access rights */
+ access = pp_check(ctx->key, pp, ctx->nx);
+ /* Keep the matching PTE informations */
+ ctx->raddr = pte1;
+ ctx->prot = access;
+ ret = check_prot(ctx->prot, rw, type);
+ if (ret == 0) {
+ /* Access granted */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
+ } else {
+ /* Access right violation */
+ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int pte_update_flags(mmu_ctx_t *ctx, target_ulong *pte1p,
+ int ret, int rw)
+{
+ int store = 0;
+
+ /* Update page flags */
+ if (!(*pte1p & 0x00000100)) {
+ /* Update accessed flag */
+ *pte1p |= 0x00000100;
+ store = 1;
+ }
+ if (!(*pte1p & 0x00000080)) {
+ if (rw == 1 && ret == 0) {
+ /* Update changed flag */
+ *pte1p |= 0x00000080;
+ store = 1;
+ } else {
+ /* Force page fault for first write access */
+ ctx->prot &= ~PAGE_WRITE;
+ }
+ }
+
+ return store;
+}
+
+/* Software driven TLB helpers */
+static inline int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr,
+ int way, int is_code)
+{
+ int nr;
+
+ /* Select TLB num in a way from address */
+ nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1);
+ /* Select TLB way */
+ nr += env->tlb_per_way * way;
+ /* 6xx have separate TLBs for instructions and data */
+ if (is_code && env->id_tlbs == 1) {
+ nr += env->nb_tlb;
+ }
+
+ return nr;
+}
+
+static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppc6xx_tlb_t *tlb;
+ int nr, max;
+
+ /* LOG_SWTLB("Invalidate all TLBs\n"); */
+ /* Invalidate all defined software TLB */
+ max = env->nb_tlb;
+ if (env->id_tlbs == 1) {
+ max *= 2;
+ }
+ for (nr = 0; nr < max; nr++) {
+ tlb = &env->tlb.tlb6[nr];
+ pte_invalidate(&tlb->pte0);
+ }
+ tlb_flush(CPU(cpu), 1);
+}
+
+static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
+ target_ulong eaddr,
+ int is_code, int match_epn)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ ppc6xx_tlb_t *tlb;
+ int way, nr;
+
+ /* Invalidate ITLB + DTLB, all ways */
+ for (way = 0; way < env->nb_ways; way++) {
+ nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
+ tlb = &env->tlb.tlb6[nr];
+ if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
+ LOG_SWTLB("TLB invalidate %d/%d " TARGET_FMT_lx "\n", nr,
+ env->nb_tlb, eaddr);
+ pte_invalidate(&tlb->pte0);
+ tlb_flush_page(cs, tlb->EPN);
+ }
+ }
+#else
+ /* XXX: PowerPC specification say this is valid as well */
+ ppc6xx_tlb_invalidate_all(env);
+#endif
+}
+
+static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
+ target_ulong eaddr, int is_code)
+{
+ ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
+}
+
+static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
+ int is_code, target_ulong pte0, target_ulong pte1)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr;
+
+ nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
+ tlb = &env->tlb.tlb6[nr];
+ LOG_SWTLB("Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb, EPN, pte0, pte1);
+ /* Invalidate any pending reference in QEMU for this virtual address */
+ ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
+ tlb->pte0 = pte0;
+ tlb->pte1 = pte1;
+ tlb->EPN = EPN;
+ /* Store last way for LRU mechanism */
+ env->last_way = way;
+}
+
+static inline int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw, int access_type)
+{
+ ppc6xx_tlb_t *tlb;
+ int nr, best, way;
+ int ret;
+
+ best = -1;
+ ret = -1; /* No TLB found */
+ for (way = 0; way < env->nb_ways; way++) {
+ nr = ppc6xx_tlb_getnum(env, eaddr, way,
+ access_type == ACCESS_CODE ? 1 : 0);
+ tlb = &env->tlb.tlb6[nr];
+ /* This test "emulates" the PTE index match for hardware TLBs */
+ if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
+ LOG_SWTLB("TLB %d/%d %s [" TARGET_FMT_lx " " TARGET_FMT_lx
+ "] <> " TARGET_FMT_lx "\n", nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
+ continue;
+ }
+ LOG_SWTLB("TLB %d/%d %s " TARGET_FMT_lx " <> " TARGET_FMT_lx " "
+ TARGET_FMT_lx " %c %c\n", nr, env->nb_tlb,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, eaddr, tlb->pte1,
+ rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
+ switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) {
+ case -3:
+ /* TLB inconsistency */
+ return -1;
+ case -2:
+ /* Access violation */
+ ret = -2;
+ best = nr;
+ break;
+ case -1:
+ default:
+ /* No match */
+ break;
+ case 0:
+ /* access granted */
+ /* XXX: we should go on looping to check all TLBs consistency
+ * but we can speed-up the whole thing as the
+ * result would be undefined if TLBs are not consistent.
+ */
+ ret = 0;
+ best = nr;
+ goto done;
+ }
+ }
+ if (best != -1) {
+ done:
+ LOG_SWTLB("found TLB at addr " TARGET_FMT_plx " prot=%01x ret=%d\n",
+ ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
+ /* Update page flags */
+ pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, rw);
+ }
+
+ return ret;
+}
+
+/* Perform BAT hit & translation */
+static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp,
+ int *validp, int *protp, target_ulong *BATu,
+ target_ulong *BATl)
+{
+ target_ulong bl;
+ int pp, valid, prot;
+
+ bl = (*BATu & 0x00001FFC) << 15;
+ valid = 0;
+ prot = 0;
+ if (((msr_pr == 0) && (*BATu & 0x00000002)) ||
+ ((msr_pr != 0) && (*BATu & 0x00000001))) {
+ valid = 1;
+ pp = *BATl & 0x00000003;
+ if (pp != 0) {
+ prot = PAGE_READ | PAGE_EXEC;
+ if (pp == 0x2) {
+ prot |= PAGE_WRITE;
+ }
+ }
+ }
+ *blp = bl;
+ *validp = valid;
+ *protp = prot;
+}
+
+static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong virtual, int rw, int type)
+{
+ target_ulong *BATlt, *BATut, *BATu, *BATl;
+ target_ulong BEPIl, BEPIu, bl;
+ int i, valid, prot;
+ int ret = -1;
+
+ LOG_BATS("%s: %cBAT v " TARGET_FMT_lx "\n", __func__,
+ type == ACCESS_CODE ? 'I' : 'D', virtual);
+ switch (type) {
+ case ACCESS_CODE:
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ break;
+ default:
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ break;
+ }
+ for (i = 0; i < env->nb_BATs; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bat_size_prot(env, &bl, &valid, &prot, BATu, BATl);
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n", __func__,
+ type == ACCESS_CODE ? 'I' : 'D', i, virtual, *BATu, *BATl);
+ if ((virtual & 0xF0000000) == BEPIu &&
+ ((virtual & 0x0FFE0000) & ~bl) == BEPIl) {
+ /* BAT matches */
+ if (valid != 0) {
+ /* Get physical address */
+ ctx->raddr = (*BATl & 0xF0000000) |
+ ((virtual & 0x0FFE0000 & bl) | (*BATl & 0x0FFE0000)) |
+ (virtual & 0x0001F000);
+ /* Compute access rights */
+ ctx->prot = prot;
+ ret = check_prot(ctx->prot, rw, type);
+ if (ret == 0) {
+ LOG_BATS("BAT %d match: r " TARGET_FMT_plx " prot=%c%c\n",
+ i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-',
+ ctx->prot & PAGE_WRITE ? 'W' : '-');
+ }
+ break;
+ }
+ }
+ }
+ if (ret < 0) {
+#if defined(DEBUG_BATS)
+ if (qemu_log_enabled()) {
+ LOG_BATS("no BAT match for " TARGET_FMT_lx ":\n", virtual);
+ for (i = 0; i < 4; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bl = (*BATu & 0x00001FFC) << 15;
+ LOG_BATS("%s: %cBAT%d v " TARGET_FMT_lx " BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ __func__, type == ACCESS_CODE ? 'I' : 'D', i, virtual,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+ }
+#endif
+ }
+ /* No hit */
+ return ret;
+}
+
+/* Perform segment based translation */
+static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw, int type)
+{
+ hwaddr hash;
+ target_ulong vsid;
+ int ds, pr, target_page_bits;
+ int ret;
+ target_ulong sr, pgidx;
+
+ pr = msr_pr;
+ ctx->eaddr = eaddr;
+
+ sr = env->sr[eaddr >> 28];
+ ctx->key = (((sr & 0x20000000) && (pr != 0)) ||
+ ((sr & 0x40000000) && (pr == 0))) ? 1 : 0;
+ ds = sr & 0x80000000 ? 1 : 0;
+ ctx->nx = sr & 0x10000000 ? 1 : 0;
+ vsid = sr & 0x00FFFFFF;
+ target_page_bits = TARGET_PAGE_BITS;
+ qemu_log_mask(CPU_LOG_MMU,
+ "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx
+ " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx
+ " ir=%d dr=%d pr=%d %d t=%d\n",
+ eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir,
+ (int)msr_dr, pr != 0 ? 1 : 0, rw, type);
+ pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits;
+ hash = vsid ^ pgidx;
+ ctx->ptem = (vsid << 7) | (pgidx >> 10);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n",
+ ctx->key, ds, ctx->nx, vsid);
+ ret = -1;
+ if (!ds) {
+ /* Check if instruction fetch is allowed, if needed */
+ if (type != ACCESS_CODE || ctx->nx == 0) {
+ /* Page address translation */
+ qemu_log_mask(CPU_LOG_MMU, "htab_base " TARGET_FMT_plx
+ " htab_mask " TARGET_FMT_plx
+ " hash " TARGET_FMT_plx "\n",
+ env->htab_base, env->htab_mask, hash);
+ ctx->hash[0] = hash;
+ ctx->hash[1] = ~hash;
+
+ /* Initialize real address with an invalid value */
+ ctx->raddr = (hwaddr)-1ULL;
+ /* Software TLB search */
+ ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type);
+#if defined(DUMP_PAGE_TABLES)
+ if (qemu_loglevel_mask(CPU_LOG_MMU)) {
+ CPUState *cs = ENV_GET_CPU(env);
+ hwaddr curaddr;
+ uint32_t a0, a1, a2, a3;
+
+ qemu_log("Page table: " TARGET_FMT_plx " len " TARGET_FMT_plx
+ "\n", env->htab_base, env->htab_mask + 0x80);
+ for (curaddr = env->htab_base;
+ curaddr < (env->htab_base + env->htab_mask + 0x80);
+ curaddr += 16) {
+ a0 = ldl_phys(cs->as, curaddr);
+ a1 = ldl_phys(cs->as, curaddr + 4);
+ a2 = ldl_phys(cs->as, curaddr + 8);
+ a3 = ldl_phys(cs->as, curaddr + 12);
+ if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) {
+ qemu_log(TARGET_FMT_plx ": %08x %08x %08x %08x\n",
+ curaddr, a0, a1, a2, a3);
+ }
+ }
+ }
+#endif
+ } else {
+ qemu_log_mask(CPU_LOG_MMU, "No access allowed\n");
+ ret = -3;
+ }
+ } else {
+ target_ulong sr;
+
+ qemu_log_mask(CPU_LOG_MMU, "direct store...\n");
+ /* Direct-store segment : absolutely *BUGGY* for now */
+
+ /* Direct-store implies a 32-bit MMU.
+ * Check the Segment Register's bus unit ID (BUID).
+ */
+ sr = env->sr[eaddr >> 28];
+ if ((sr & 0x1FF00000) >> 20 == 0x07f) {
+ /* Memory-forced I/O controller interface access */
+ /* If T=1 and BUID=x'07F', the 601 performs a memory access
+ * to SR[28-31] LA[4-31], bypassing all protection mechanisms.
+ */
+ ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF);
+ ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ switch (type) {
+ case ACCESS_INT:
+ /* Integer load/store : only access allowed */
+ break;
+ case ACCESS_CODE:
+ /* No code fetch is allowed in direct-store areas */
+ return -4;
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ return -4;
+ case ACCESS_RES:
+ /* lwarx, ldarx or srwcx. */
+ return -4;
+ case ACCESS_CACHE:
+ /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */
+ /* Should make the instruction do no-op.
+ * As it already do no-op, it's quite easy :-)
+ */
+ ctx->raddr = eaddr;
+ return 0;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ return -4;
+ default:
+ qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need "
+ "address translation\n");
+ return -4;
+ }
+ if ((rw == 1 || ctx->key != 1) && (rw == 0 || ctx->key != 0)) {
+ ctx->raddr = eaddr;
+ ret = 2;
+ } else {
+ ret = -2;
+ }
+ }
+
+ return ret;
+}
+
+/* Generic TLB check function for embedded PowerPC implementations */
+static int ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddrp,
+ target_ulong address, uint32_t pid, int ext,
+ int i)
+{
+ target_ulong mask;
+
+ /* Check valid flag */
+ if (!(tlb->prot & PAGE_VALID)) {
+ return -1;
+ }
+ mask = ~(tlb->size - 1);
+ LOG_SWTLB("%s: TLB %d address " TARGET_FMT_lx " PID %u <=> " TARGET_FMT_lx
+ " " TARGET_FMT_lx " %u %x\n", __func__, i, address, pid, tlb->EPN,
+ mask, (uint32_t)tlb->PID, tlb->prot);
+ /* Check PID */
+ if (tlb->PID != 0 && tlb->PID != pid) {
+ return -1;
+ }
+ /* Check effective address */
+ if ((address & mask) != tlb->EPN) {
+ return -1;
+ }
+ *raddrp = (tlb->RPN & mask) | (address & ~mask);
+ if (ext) {
+ /* Extend the physical address to 36 bits */
+ *raddrp |= (uint64_t)(tlb->RPN & 0xF) << 32;
+ }
+
+ return 0;
+}
+
+/* Generic TLB search function for PowerPC embedded implementations */
+static int ppcemb_tlb_search(CPUPPCState *env, target_ulong address,
+ uint32_t pid)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i, ret;
+
+ /* Default return value is no match */
+ ret = -1;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, 0, i) == 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/* Helpers specific to PowerPC 40x implementations */
+static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppcemb_tlb_t *tlb;
+ int i;
+
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ tlb->prot &= ~PAGE_VALID;
+ }
+ tlb_flush(CPU(cpu), 1);
+}
+
+static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong address, int rw,
+ int access_type)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i, ret, zsel, zpr, pr;
+
+ ret = -1;
+ raddr = (hwaddr)-1ULL;
+ pr = msr_pr;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ if (ppcemb_tlb_check(env, tlb, &raddr, address,
+ env->spr[SPR_40x_PID], 0, i) < 0) {
+ continue;
+ }
+ zsel = (tlb->attr >> 4) & 0xF;
+ zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3;
+ LOG_SWTLB("%s: TLB %d zsel %d zpr %d rw %d attr %08x\n",
+ __func__, i, zsel, zpr, rw, tlb->attr);
+ /* Check execute enable bit */
+ switch (zpr) {
+ case 0x2:
+ if (pr != 0) {
+ goto check_perms;
+ }
+ /* No break here */
+ case 0x3:
+ /* All accesses granted */
+ ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ ret = 0;
+ break;
+ case 0x0:
+ if (pr != 0) {
+ /* Raise Zone protection fault. */
+ env->spr[SPR_40x_ESR] = 1 << 22;
+ ctx->prot = 0;
+ ret = -2;
+ break;
+ }
+ /* No break here */
+ case 0x1:
+ check_perms:
+ /* Check from TLB entry */
+ ctx->prot = tlb->prot;
+ ret = check_prot(ctx->prot, rw, access_type);
+ if (ret == -2) {
+ env->spr[SPR_40x_ESR] = 0;
+ }
+ break;
+ }
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ return 0;
+ }
+ }
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+
+ return ret;
+}
+
+void store_40x_sler(CPUPPCState *env, uint32_t val)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ /* XXX: TO BE FIXED */
+ if (val != 0x00000000) {
+ cpu_abort(CPU(cpu), "Little-endian regions are not supported by now\n");
+ }
+ env->spr[SPR_405_SLER] = val;
+}
+
+static inline int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb,
+ hwaddr *raddr, int *prot,
+ target_ulong address, int rw,
+ int access_type, int i)
+{
+ int ret, prot2;
+
+ if (ppcemb_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID],
+ !env->nb_pids, i) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID1] &&
+ ppcemb_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID1], 0, i) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID2] &&
+ ppcemb_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID2], 0, i) >= 0) {
+ goto found_tlb;
+ }
+
+ LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ return -1;
+
+found_tlb:
+
+ if (msr_pr != 0) {
+ prot2 = tlb->prot & 0xF;
+ } else {
+ prot2 = (tlb->prot >> 4) & 0xF;
+ }
+
+ /* Check the address space */
+ if (access_type == ACCESS_CODE) {
+ if (msr_ir != (tlb->attr & 1)) {
+ LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ return -1;
+ }
+
+ *prot = prot2;
+ if (prot2 & PAGE_EXEC) {
+ LOG_SWTLB("%s: good TLB!\n", __func__);
+ return 0;
+ }
+
+ LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
+ ret = -3;
+ } else {
+ if (msr_dr != (tlb->attr & 1)) {
+ LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ return -1;
+ }
+
+ *prot = prot2;
+ if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
+ LOG_SWTLB("%s: found TLB!\n", __func__);
+ return 0;
+ }
+
+ LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
+ ret = -2;
+ }
+
+ return ret;
+}
+
+static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong address, int rw,
+ int access_type)
+{
+ ppcemb_tlb_t *tlb;
+ hwaddr raddr;
+ int i, ret;
+
+ ret = -1;
+ raddr = (hwaddr)-1ULL;
+ for (i = 0; i < env->nb_tlb; i++) {
+ tlb = &env->tlb.tlbe[i];
+ ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, rw,
+ access_type, i);
+ if (!ret) {
+ break;
+ }
+ }
+
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ } else {
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ }
+
+ return ret;
+}
+
+static void booke206_flush_tlb(CPUPPCState *env, int flags,
+ const int check_iprot)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ int tlb_size;
+ int i, j;
+ ppcmas_tlb_t *tlb = env->tlb.tlbm;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ if (flags & (1 << i)) {
+ tlb_size = booke206_tlb_size(env, i);
+ for (j = 0; j < tlb_size; j++) {
+ if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
+ tlb[j].mas1 &= ~MAS1_VALID;
+ }
+ }
+ }
+ tlb += booke206_tlb_size(env, i);
+ }
+
+ tlb_flush(CPU(cpu), 1);
+}
+
+static hwaddr booke206_tlb_to_page_size(CPUPPCState *env,
+ ppcmas_tlb_t *tlb)
+{
+ int tlbm_size;
+
+ tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+
+ return 1024ULL << tlbm_size;
+}
+
+/* TLB check function for MAS based SoftTLBs */
+static int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb,
+ hwaddr *raddrp, target_ulong address,
+ uint32_t pid)
+{
+ hwaddr mask;
+ uint32_t tlb_pid;
+
+ if (!msr_cm) {
+ /* In 32bit mode we can only address 32bit EAs */
+ address = (uint32_t)address;
+ }
+
+ /* Check valid flag */
+ if (!(tlb->mas1 & MAS1_VALID)) {
+ return -1;
+ }
+
+ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
+ LOG_SWTLB("%s: TLB ADDR=0x" TARGET_FMT_lx " PID=0x%x MAS1=0x%x MAS2=0x%"
+ PRIx64 " mask=0x%" HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%"
+ PRIx32 "\n", __func__, address, pid, tlb->mas1, tlb->mas2, mask,
+ tlb->mas7_3, tlb->mas8);
+
+ /* Check PID */
+ tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT;
+ if (tlb_pid != 0 && tlb_pid != pid) {
+ return -1;
+ }
+
+ /* Check effective address */
+ if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) {
+ return -1;
+ }
+
+ if (raddrp) {
+ *raddrp = (tlb->mas7_3 & mask) | (address & ~mask);
+ }
+
+ return 0;
+}
+
+static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb,
+ hwaddr *raddr, int *prot,
+ target_ulong address, int rw,
+ int access_type)
+{
+ int ret;
+ int prot2 = 0;
+
+ if (ppcmas_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID]) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID1] &&
+ ppcmas_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID1]) >= 0) {
+ goto found_tlb;
+ }
+
+ if (env->spr[SPR_BOOKE_PID2] &&
+ ppcmas_tlb_check(env, tlb, raddr, address,
+ env->spr[SPR_BOOKE_PID2]) >= 0) {
+ goto found_tlb;
+ }
+
+ LOG_SWTLB("%s: TLB entry not found\n", __func__);
+ return -1;
+
+found_tlb:
+
+ if (msr_pr != 0) {
+ if (tlb->mas7_3 & MAS3_UR) {
+ prot2 |= PAGE_READ;
+ }
+ if (tlb->mas7_3 & MAS3_UW) {
+ prot2 |= PAGE_WRITE;
+ }
+ if (tlb->mas7_3 & MAS3_UX) {
+ prot2 |= PAGE_EXEC;
+ }
+ } else {
+ if (tlb->mas7_3 & MAS3_SR) {
+ prot2 |= PAGE_READ;
+ }
+ if (tlb->mas7_3 & MAS3_SW) {
+ prot2 |= PAGE_WRITE;
+ }
+ if (tlb->mas7_3 & MAS3_SX) {
+ prot2 |= PAGE_EXEC;
+ }
+ }
+
+ /* Check the address space and permissions */
+ if (access_type == ACCESS_CODE) {
+ if (msr_ir != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
+ LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ return -1;
+ }
+
+ *prot = prot2;
+ if (prot2 & PAGE_EXEC) {
+ LOG_SWTLB("%s: good TLB!\n", __func__);
+ return 0;
+ }
+
+ LOG_SWTLB("%s: no PAGE_EXEC: %x\n", __func__, prot2);
+ ret = -3;
+ } else {
+ if (msr_dr != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
+ LOG_SWTLB("%s: AS doesn't match\n", __func__);
+ return -1;
+ }
+
+ *prot = prot2;
+ if ((!rw && prot2 & PAGE_READ) || (rw && (prot2 & PAGE_WRITE))) {
+ LOG_SWTLB("%s: found TLB!\n", __func__);
+ return 0;
+ }
+
+ LOG_SWTLB("%s: PAGE_READ/WRITE doesn't match: %x\n", __func__, prot2);
+ ret = -2;
+ }
+
+ return ret;
+}
+
+static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong address, int rw,
+ int access_type)
+{
+ ppcmas_tlb_t *tlb;
+ hwaddr raddr;
+ int i, j, ret;
+
+ ret = -1;
+ raddr = (hwaddr)-1ULL;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int ways = booke206_tlb_ways(env, i);
+
+ for (j = 0; j < ways; j++) {
+ tlb = booke206_get_tlbm(env, i, address, j);
+ if (!tlb) {
+ continue;
+ }
+ ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address,
+ rw, access_type);
+ if (ret != -1) {
+ goto found_tlb;
+ }
+ }
+ }
+
+found_tlb:
+
+ if (ret >= 0) {
+ ctx->raddr = raddr;
+ LOG_SWTLB("%s: access granted " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, ctx->raddr, ctx->prot,
+ ret);
+ } else {
+ LOG_SWTLB("%s: access refused " TARGET_FMT_lx " => " TARGET_FMT_plx
+ " %d %d\n", __func__, address, raddr, ctx->prot, ret);
+ }
+
+ return ret;
+}
+
+static const char *book3e_tsize_to_str[32] = {
+ "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K",
+ "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M",
+ "1G", "2G", "4G", "8G", "16G", "32G", "64G", "128G", "256G", "512G",
+ "1T", "2T"
+};
+
+static void mmubooke_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
+ CPUPPCState *env)
+{
+ ppcemb_tlb_t *entry;
+ int i;
+
+ if (kvm_enabled() && !env->kvm_sw_tlb) {
+ cpu_fprintf(f, "Cannot access KVM TLB\n");
+ return;
+ }
+
+ cpu_fprintf(f, "\nTLB:\n");
+ cpu_fprintf(f, "Effective Physical Size PID Prot "
+ "Attr\n");
+
+ entry = &env->tlb.tlbe[0];
+ for (i = 0; i < env->nb_tlb; i++, entry++) {
+ hwaddr ea, pa;
+ target_ulong mask;
+ uint64_t size = (uint64_t)entry->size;
+ char size_buf[20];
+
+ /* Check valid flag */
+ if (!(entry->prot & PAGE_VALID)) {
+ continue;
+ }
+
+ mask = ~(entry->size - 1);
+ ea = entry->EPN & mask;
+ pa = entry->RPN & mask;
+ /* Extend the physical address to 36 bits */
+ pa |= (hwaddr)(entry->RPN & 0xF) << 32;
+ size /= 1024;
+ if (size >= 1024) {
+ snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "M", size / 1024);
+ } else {
+ snprintf(size_buf, sizeof(size_buf), "%3" PRId64 "k", size);
+ }
+ cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %s %-5u %08x %08x\n",
+ (uint64_t)ea, (uint64_t)pa, size_buf, (uint32_t)entry->PID,
+ entry->prot, entry->attr);
+ }
+
+}
+
+static void mmubooke206_dump_one_tlb(FILE *f, fprintf_function cpu_fprintf,
+ CPUPPCState *env, int tlbn, int offset,
+ int tlbsize)
+{
+ ppcmas_tlb_t *entry;
+ int i;
+
+ cpu_fprintf(f, "\nTLB%d:\n", tlbn);
+ cpu_fprintf(f, "Effective Physical Size TID TS SRWX"
+ " URWX WIMGE U0123\n");
+
+ entry = &env->tlb.tlbm[offset];
+ for (i = 0; i < tlbsize; i++, entry++) {
+ hwaddr ea, pa, size;
+ int tsize;
+
+ if (!(entry->mas1 & MAS1_VALID)) {
+ continue;
+ }
+
+ tsize = (entry->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+ size = 1024ULL << tsize;
+ ea = entry->mas2 & ~(size - 1);
+ pa = entry->mas7_3 & ~(size - 1);
+
+ cpu_fprintf(f, "0x%016" PRIx64 " 0x%016" PRIx64 " %4s %-5u %1u S%c%c%c"
+ "U%c%c%c %c%c%c%c%c U%c%c%c%c\n",
+ (uint64_t)ea, (uint64_t)pa,
+ book3e_tsize_to_str[tsize],
+ (entry->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT,
+ (entry->mas1 & MAS1_TS) >> MAS1_TS_SHIFT,
+ entry->mas7_3 & MAS3_SR ? 'R' : '-',
+ entry->mas7_3 & MAS3_SW ? 'W' : '-',
+ entry->mas7_3 & MAS3_SX ? 'X' : '-',
+ entry->mas7_3 & MAS3_UR ? 'R' : '-',
+ entry->mas7_3 & MAS3_UW ? 'W' : '-',
+ entry->mas7_3 & MAS3_UX ? 'X' : '-',
+ entry->mas2 & MAS2_W ? 'W' : '-',
+ entry->mas2 & MAS2_I ? 'I' : '-',
+ entry->mas2 & MAS2_M ? 'M' : '-',
+ entry->mas2 & MAS2_G ? 'G' : '-',
+ entry->mas2 & MAS2_E ? 'E' : '-',
+ entry->mas7_3 & MAS3_U0 ? '0' : '-',
+ entry->mas7_3 & MAS3_U1 ? '1' : '-',
+ entry->mas7_3 & MAS3_U2 ? '2' : '-',
+ entry->mas7_3 & MAS3_U3 ? '3' : '-');
+ }
+}
+
+static void mmubooke206_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
+ CPUPPCState *env)
+{
+ int offset = 0;
+ int i;
+
+ if (kvm_enabled() && !env->kvm_sw_tlb) {
+ cpu_fprintf(f, "Cannot access KVM TLB\n");
+ return;
+ }
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int size = booke206_tlb_size(env, i);
+
+ if (size == 0) {
+ continue;
+ }
+
+ mmubooke206_dump_one_tlb(f, cpu_fprintf, env, i, offset, size);
+ offset += size;
+ }
+}
+
+static void mmu6xx_dump_BATs(FILE *f, fprintf_function cpu_fprintf,
+ CPUPPCState *env, int type)
+{
+ target_ulong *BATlt, *BATut, *BATu, *BATl;
+ target_ulong BEPIl, BEPIu, bl;
+ int i;
+
+ switch (type) {
+ case ACCESS_CODE:
+ BATlt = env->IBAT[1];
+ BATut = env->IBAT[0];
+ break;
+ default:
+ BATlt = env->DBAT[1];
+ BATut = env->DBAT[0];
+ break;
+ }
+
+ for (i = 0; i < env->nb_BATs; i++) {
+ BATu = &BATut[i];
+ BATl = &BATlt[i];
+ BEPIu = *BATu & 0xF0000000;
+ BEPIl = *BATu & 0x0FFE0000;
+ bl = (*BATu & 0x00001FFC) << 15;
+ cpu_fprintf(f, "%s BAT%d BATu " TARGET_FMT_lx
+ " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " "
+ TARGET_FMT_lx " " TARGET_FMT_lx "\n",
+ type == ACCESS_CODE ? "code" : "data", i,
+ *BATu, *BATl, BEPIu, BEPIl, bl);
+ }
+}
+
+static void mmu6xx_dump_mmu(FILE *f, fprintf_function cpu_fprintf,
+ CPUPPCState *env)
+{
+ ppc6xx_tlb_t *tlb;
+ target_ulong sr;
+ int type, way, entry, i;
+
+ cpu_fprintf(f, "HTAB base = 0x%"HWADDR_PRIx"\n", env->htab_base);
+ cpu_fprintf(f, "HTAB mask = 0x%"HWADDR_PRIx"\n", env->htab_mask);
+
+ cpu_fprintf(f, "\nSegment registers:\n");
+ for (i = 0; i < 32; i++) {
+ sr = env->sr[i];
+ if (sr & 0x80000000) {
+ cpu_fprintf(f, "%02d T=%d Ks=%d Kp=%d BUID=0x%03x "
+ "CNTLR_SPEC=0x%05x\n", i,
+ sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
+ sr & 0x20000000 ? 1 : 0, (uint32_t)((sr >> 20) & 0x1FF),
+ (uint32_t)(sr & 0xFFFFF));
+ } else {
+ cpu_fprintf(f, "%02d T=%d Ks=%d Kp=%d N=%d VSID=0x%06x\n", i,
+ sr & 0x80000000 ? 1 : 0, sr & 0x40000000 ? 1 : 0,
+ sr & 0x20000000 ? 1 : 0, sr & 0x10000000 ? 1 : 0,
+ (uint32_t)(sr & 0x00FFFFFF));
+ }
+ }
+
+ cpu_fprintf(f, "\nBATs:\n");
+ mmu6xx_dump_BATs(f, cpu_fprintf, env, ACCESS_INT);
+ mmu6xx_dump_BATs(f, cpu_fprintf, env, ACCESS_CODE);
+
+ if (env->id_tlbs != 1) {
+ cpu_fprintf(f, "ERROR: 6xx MMU should have separated TLB"
+ " for code and data\n");
+ }
+
+ cpu_fprintf(f, "\nTLBs [EPN EPN + SIZE]\n");
+
+ for (type = 0; type < 2; type++) {
+ for (way = 0; way < env->nb_ways; way++) {
+ for (entry = env->nb_tlb * type + env->tlb_per_way * way;
+ entry < (env->nb_tlb * type + env->tlb_per_way * (way + 1));
+ entry++) {
+
+ tlb = &env->tlb.tlb6[entry];
+ cpu_fprintf(f, "%s TLB %02d/%02d way:%d %s ["
+ TARGET_FMT_lx " " TARGET_FMT_lx "]\n",
+ type ? "code" : "data", entry % env->nb_tlb,
+ env->nb_tlb, way,
+ pte_is_valid(tlb->pte0) ? "valid" : "inval",
+ tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE);
+ }
+ }
+ }
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
+{
+ switch (env->mmu_model) {
+ case POWERPC_MMU_BOOKE:
+ mmubooke_dump_mmu(f, cpu_fprintf, env);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ mmubooke206_dump_mmu(f, cpu_fprintf, env);
+ break;
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ mmu6xx_dump_mmu(f, cpu_fprintf, env);
+ break;
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_06a:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+ dump_slb(f, cpu_fprintf, ppc_env_get_cpu(env));
+ break;
+#endif
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s: unimplemented\n", __func__);
+ }
+}
+
+static inline int check_physical(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw)
+{
+ int in_plb, ret;
+
+ ctx->raddr = eaddr;
+ ctx->prot = PAGE_READ | PAGE_EXEC;
+ ret = 0;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_REAL:
+ case POWERPC_MMU_BOOKE:
+ ctx->prot |= PAGE_WRITE;
+ break;
+
+ case POWERPC_MMU_SOFT_4xx_Z:
+ if (unlikely(msr_pe != 0)) {
+ /* 403 family add some particular protections,
+ * using PBL/PBU registers for accesses with no translation.
+ */
+ in_plb =
+ /* Check PLB validity */
+ (env->pb[0] < env->pb[1] &&
+ /* and address in plb area */
+ eaddr >= env->pb[0] && eaddr < env->pb[1]) ||
+ (env->pb[2] < env->pb[3] &&
+ eaddr >= env->pb[2] && eaddr < env->pb[3]) ? 1 : 0;
+ if (in_plb ^ msr_px) {
+ /* Access in protected area */
+ if (rw == 1) {
+ /* Access is not allowed */
+ ret = -2;
+ }
+ } else {
+ /* Read-write access is allowed */
+ ctx->prot |= PAGE_WRITE;
+ }
+ }
+ break;
+
+ default:
+ /* Caller's checks mean we should never get here for other models */
+ abort();
+ return -1;
+ }
+
+ return ret;
+}
+
+static int get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx,
+ target_ulong eaddr, int rw, int access_type)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ int ret = -1;
+ bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0)
+ || (access_type != ACCESS_CODE && msr_dr == 0);
+
+#if 0
+ qemu_log("%s\n", __func__);
+#endif
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ if (real_mode) {
+ ret = check_physical(env, ctx, eaddr, rw);
+ } else {
+ /* Try to find a BAT */
+ if (env->nb_BATs != 0) {
+ ret = get_bat_6xx_tlb(env, ctx, eaddr, rw, access_type);
+ }
+ if (ret < 0) {
+ /* We didn't match any BAT entry or don't have BATs */
+ ret = get_segment_6xx_tlb(env, ctx, eaddr, rw, access_type);
+ }
+ }
+ break;
+
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ if (real_mode) {
+ ret = check_physical(env, ctx, eaddr, rw);
+ } else {
+ ret = mmu40x_get_physical_address(env, ctx, eaddr,
+ rw, access_type);
+ }
+ break;
+ case POWERPC_MMU_BOOKE:
+ ret = mmubooke_get_physical_address(env, ctx, eaddr,
+ rw, access_type);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ ret = mmubooke206_get_physical_address(env, ctx, eaddr, rw,
+ access_type);
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_REAL:
+ if (real_mode) {
+ ret = check_physical(env, ctx, eaddr, rw);
+ } else {
+ cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n");
+ }
+ return -1;
+ default:
+ cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n");
+ return -1;
+ }
+#if 0
+ qemu_log("%s address " TARGET_FMT_lx " => %d " TARGET_FMT_plx "\n",
+ __func__, eaddr, ret, ctx->raddr);
+#endif
+
+ return ret;
+}
+
+hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ mmu_ctx_t ctx;
+
+ switch (env->mmu_model) {
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_06a:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+ return ppc_hash64_get_phys_page_debug(cpu, addr);
+#endif
+
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ return ppc_hash32_get_phys_page_debug(cpu, addr);
+
+ default:
+ ;
+ }
+
+ if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) {
+
+ /* Some MMUs have separate TLBs for code and data. If we only try an
+ * ACCESS_INT, we may not be able to read instructions mapped by code
+ * TLBs, so we also try a ACCESS_CODE.
+ */
+ if (unlikely(get_physical_address(env, &ctx, addr, 0,
+ ACCESS_CODE) != 0)) {
+ return -1;
+ }
+ }
+
+ return ctx.raddr & TARGET_PAGE_MASK;
+}
+
+static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address,
+ int rw)
+{
+ env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
+ env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
+ env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
+ env->spr[SPR_BOOKE_MAS3] = 0;
+ env->spr[SPR_BOOKE_MAS6] = 0;
+ env->spr[SPR_BOOKE_MAS7] = 0;
+
+ /* AS */
+ if (((rw == 2) && msr_ir) || ((rw != 2) && msr_dr)) {
+ env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
+ env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS;
+ }
+
+ env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID;
+ env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK;
+
+ switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) {
+ case MAS4_TIDSELD_PID0:
+ env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID] << MAS1_TID_SHIFT;
+ break;
+ case MAS4_TIDSELD_PID1:
+ env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID1] << MAS1_TID_SHIFT;
+ break;
+ case MAS4_TIDSELD_PID2:
+ env->spr[SPR_BOOKE_MAS1] |= env->spr[SPR_BOOKE_PID2] << MAS1_TID_SHIFT;
+ break;
+ }
+
+ env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16;
+
+ /* next victim logic */
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
+ env->last_way++;
+ env->last_way &= booke206_tlb_ways(env, 0) - 1;
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
+}
+
+/* Perform address translation */
+static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address,
+ int rw, int mmu_idx)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ mmu_ctx_t ctx;
+ int access_type;
+ int ret = 0;
+
+ if (rw == 2) {
+ /* code access */
+ rw = 0;
+ access_type = ACCESS_CODE;
+ } else {
+ /* data access */
+ access_type = env->access_type;
+ }
+ ret = get_physical_address(env, &ctx, address, rw, access_type);
+ if (ret == 0) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
+ } else if (ret < 0) {
+ LOG_MMU_STATE(cs);
+ if (access_type == ACCESS_CODE) {
+ switch (ret) {
+ case -1:
+ /* No matches in page tables or TLB */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ cs->exception_index = POWERPC_EXCP_IFTLB;
+ env->error_code = 1 << 18;
+ env->spr[SPR_IMISS] = address;
+ env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem;
+ goto tlb_miss;
+ case POWERPC_MMU_SOFT_74xx:
+ cs->exception_index = POWERPC_EXCP_IFTLB;
+ goto tlb_miss_74xx;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ cs->exception_index = POWERPC_EXCP_ITLB;
+ env->error_code = 0;
+ env->spr[SPR_40x_DEAR] = address;
+ env->spr[SPR_40x_ESR] = 0x00000000;
+ break;
+ case POWERPC_MMU_BOOKE206:
+ booke206_update_mas_tlb_miss(env, address, rw);
+ /* fall through */
+ case POWERPC_MMU_BOOKE:
+ cs->exception_index = POWERPC_EXCP_ITLB;
+ env->error_code = 0;
+ env->spr[SPR_BOOKE_DEAR] = address;
+ return -1;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(cs, "PowerPC in real mode should never raise "
+ "any MMU exceptions\n");
+ return -1;
+ default:
+ cpu_abort(cs, "Unknown or invalid MMU model\n");
+ return -1;
+ }
+ break;
+ case -2:
+ /* Access rights violation */
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x08000000;
+ break;
+ case -3:
+ /* No execute protection violation */
+ if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->spr[SPR_BOOKE_ESR] = 0x00000000;
+ }
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ break;
+ case -4:
+ /* Direct store exception */
+ /* No code fetch is allowed in direct-store areas */
+ cs->exception_index = POWERPC_EXCP_ISI;
+ env->error_code = 0x10000000;
+ break;
+ }
+ } else {
+ switch (ret) {
+ case -1:
+ /* No matches in page tables or TLB */
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ if (rw == 1) {
+ cs->exception_index = POWERPC_EXCP_DSTLB;
+ env->error_code = 1 << 16;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DLTLB;
+ env->error_code = 0;
+ }
+ env->spr[SPR_DMISS] = address;
+ env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem;
+ tlb_miss:
+ env->error_code |= ctx.key << 19;
+ env->spr[SPR_HASH1] = env->htab_base +
+ get_pteg_offset32(cpu, ctx.hash[0]);
+ env->spr[SPR_HASH2] = env->htab_base +
+ get_pteg_offset32(cpu, ctx.hash[1]);
+ break;
+ case POWERPC_MMU_SOFT_74xx:
+ if (rw == 1) {
+ cs->exception_index = POWERPC_EXCP_DSTLB;
+ } else {
+ cs->exception_index = POWERPC_EXCP_DLTLB;
+ }
+ tlb_miss_74xx:
+ /* Implement LRU algorithm */
+ env->error_code = ctx.key << 19;
+ env->spr[SPR_TLBMISS] = (address & ~((target_ulong)0x3)) |
+ ((env->last_way + 1) & (env->nb_ways - 1));
+ env->spr[SPR_PTEHI] = 0x80000000 | ctx.ptem;
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ cs->exception_index = POWERPC_EXCP_DTLB;
+ env->error_code = 0;
+ env->spr[SPR_40x_DEAR] = address;
+ if (rw) {
+ env->spr[SPR_40x_ESR] = 0x00800000;
+ } else {
+ env->spr[SPR_40x_ESR] = 0x00000000;
+ }
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(cs, "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE206:
+ booke206_update_mas_tlb_miss(env, address, rw);
+ /* fall through */
+ case POWERPC_MMU_BOOKE:
+ cs->exception_index = POWERPC_EXCP_DTLB;
+ env->error_code = 0;
+ env->spr[SPR_BOOKE_DEAR] = address;
+ env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0;
+ return -1;
+ case POWERPC_MMU_REAL:
+ cpu_abort(cs, "PowerPC in real mode should never raise "
+ "any MMU exceptions\n");
+ return -1;
+ default:
+ cpu_abort(cs, "Unknown or invalid MMU model\n");
+ return -1;
+ }
+ break;
+ case -2:
+ /* Access rights violation */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ if (env->mmu_model == POWERPC_MMU_SOFT_4xx
+ || env->mmu_model == POWERPC_MMU_SOFT_4xx_Z) {
+ env->spr[SPR_40x_DEAR] = address;
+ if (rw) {
+ env->spr[SPR_40x_ESR] |= 0x00800000;
+ }
+ } else if ((env->mmu_model == POWERPC_MMU_BOOKE) ||
+ (env->mmu_model == POWERPC_MMU_BOOKE206)) {
+ env->spr[SPR_BOOKE_DEAR] = address;
+ env->spr[SPR_BOOKE_ESR] = rw ? ESR_ST : 0;
+ } else {
+ env->spr[SPR_DAR] = address;
+ if (rw == 1) {
+ env->spr[SPR_DSISR] = 0x0A000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x08000000;
+ }
+ }
+ break;
+ case -4:
+ /* Direct store exception */
+ switch (access_type) {
+ case ACCESS_FLOAT:
+ /* Floating point load/store */
+ cs->exception_index = POWERPC_EXCP_ALIGN;
+ env->error_code = POWERPC_EXCP_ALIGN_FP;
+ env->spr[SPR_DAR] = address;
+ break;
+ case ACCESS_RES:
+ /* lwarx, ldarx or stwcx. */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ if (rw == 1) {
+ env->spr[SPR_DSISR] = 0x06000000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04000000;
+ }
+ break;
+ case ACCESS_EXT:
+ /* eciwx or ecowx */
+ cs->exception_index = POWERPC_EXCP_DSI;
+ env->error_code = 0;
+ env->spr[SPR_DAR] = address;
+ if (rw == 1) {
+ env->spr[SPR_DSISR] = 0x06100000;
+ } else {
+ env->spr[SPR_DSISR] = 0x04100000;
+ }
+ break;
+ default:
+ printf("DSI: invalid exception (%d)\n", ret);
+ cs->exception_index = POWERPC_EXCP_PROGRAM;
+ env->error_code =
+ POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL;
+ env->spr[SPR_DAR] = address;
+ break;
+ }
+ break;
+ }
+ }
+#if 0
+ printf("%s: set exception to %d %02x\n", __func__,
+ cs->exception, env->error_code);
+#endif
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/*****************************************************************************/
+/* BATs management */
+#if !defined(FLUSH_ALL_TLBS)
+static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
+ target_ulong mask)
+{
+ CPUState *cs = CPU(ppc_env_get_cpu(env));
+ target_ulong base, end, page;
+
+ base = BATu & ~0x0001FFFF;
+ end = base + mask + 0x00020000;
+ LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " ("
+ TARGET_FMT_lx ")\n", base, end, mask);
+ for (page = base; page != end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, page);
+ }
+ LOG_BATS("Flush done\n");
+}
+#endif
+
+static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
+ target_ulong value)
+{
+ LOG_BATS("Set %cBAT%d%c to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", ID,
+ nr, ul == 0 ? 'u' : 'l', value, env->nip);
+}
+
+void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ target_ulong mask;
+#if defined(FLUSH_ALL_TLBS)
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+#endif
+
+ dump_store_bat(env, 'I', 0, nr, value);
+ if (env->IBAT[0][nr] != value) {
+ mask = (value << 15) & 0x0FFE0000UL;
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#endif
+ /* When storing valid upper BAT, mask BEPI and BRPN
+ * and invalidate all TLBs covered by this BAT
+ */
+ mask = (value << 15) & 0x0FFE0000UL;
+ env->IBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
+ (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ tlb_flush(CPU(cpu), 1);
+#endif
+ }
+}
+
+void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ dump_store_bat(env, 'I', 1, nr, value);
+ env->IBAT[1][nr] = value;
+}
+
+void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ target_ulong mask;
+#if defined(FLUSH_ALL_TLBS)
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+#endif
+
+ dump_store_bat(env, 'D', 0, nr, value);
+ if (env->DBAT[0][nr] != value) {
+ /* When storing valid upper BAT, mask BEPI and BRPN
+ * and invalidate all TLBs covered by this BAT
+ */
+ mask = (value << 15) & 0x0FFE0000UL;
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->DBAT[0][nr], mask);
+#endif
+ mask = (value << 15) & 0x0FFE0000UL;
+ env->DBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
+ (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->DBAT[0][nr], mask);
+#else
+ tlb_flush(CPU(cpu), 1);
+#endif
+ }
+}
+
+void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ dump_store_bat(env, 'D', 1, nr, value);
+ env->DBAT[1][nr] = value;
+}
+
+void helper_store_601_batu(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+ target_ulong mask;
+#if defined(FLUSH_ALL_TLBS)
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ int do_inval;
+#endif
+
+ dump_store_bat(env, 'I', 0, nr, value);
+ if (env->IBAT[0][nr] != value) {
+#if defined(FLUSH_ALL_TLBS)
+ do_inval = 0;
+#endif
+ mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
+ if (env->IBAT[1][nr] & 0x40) {
+ /* Invalidate BAT only if it is valid */
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ /* When storing valid upper BAT, mask BEPI and BRPN
+ * and invalidate all TLBs covered by this BAT
+ */
+ env->IBAT[0][nr] = (value & 0x00001FFFUL) |
+ (value & ~0x0001FFFFUL & ~mask);
+ env->DBAT[0][nr] = env->IBAT[0][nr];
+ if (env->IBAT[1][nr] & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+#if defined(FLUSH_ALL_TLBS)
+ if (do_inval) {
+ tlb_flush(CPU(cpu), 1);
+ }
+#endif
+ }
+}
+
+void helper_store_601_batl(CPUPPCState *env, uint32_t nr, target_ulong value)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ target_ulong mask;
+#else
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ int do_inval;
+#endif
+
+ dump_store_bat(env, 'I', 1, nr, value);
+ if (env->IBAT[1][nr] != value) {
+#if defined(FLUSH_ALL_TLBS)
+ do_inval = 0;
+#endif
+ if (env->IBAT[1][nr] & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ mask = (env->IBAT[1][nr] << 17) & 0x0FFE0000UL;
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ if (value & 0x40) {
+#if !defined(FLUSH_ALL_TLBS)
+ mask = (value << 17) & 0x0FFE0000UL;
+ do_invalidate_BAT(env, env->IBAT[0][nr], mask);
+#else
+ do_inval = 1;
+#endif
+ }
+ env->IBAT[1][nr] = value;
+ env->DBAT[1][nr] = value;
+#if defined(FLUSH_ALL_TLBS)
+ if (do_inval) {
+ tlb_flush(CPU(cpu), 1);
+ }
+#endif
+ }
+}
+
+/*****************************************************************************/
+/* TLB management */
+void ppc_tlb_invalidate_all(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ ppc6xx_tlb_invalidate_all(env);
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ case POWERPC_MMU_SOFT_4xx_Z:
+ ppc4xx_tlb_invalidate_all(env);
+ break;
+ case POWERPC_MMU_REAL:
+ cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n");
+ break;
+ case POWERPC_MMU_MPC8xx:
+ /* XXX: TODO */
+ cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n");
+ break;
+ case POWERPC_MMU_BOOKE:
+ tlb_flush(CPU(cpu), 1);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ booke206_flush_tlb(env, -1, 0);
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_06a:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+#endif /* defined(TARGET_PPC64) */
+ env->tlb_need_flush = 0;
+ tlb_flush(CPU(cpu), 1);
+ break;
+ default:
+ /* XXX: TODO */
+ cpu_abort(CPU(cpu), "Unknown MMU model %d\n", env->mmu_model);
+ break;
+ }
+}
+
+void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
+{
+#if !defined(FLUSH_ALL_TLBS)
+ addr &= TARGET_PAGE_MASK;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+ ppc6xx_tlb_invalidate_virt(env, addr, 0);
+ if (env->id_tlbs == 1) {
+ ppc6xx_tlb_invalidate_virt(env, addr, 1);
+ }
+ break;
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ /* Actual CPUs invalidate entire congruence classes based on the
+ * geometry of their TLBs and some OSes take that into account,
+ * we just mark the TLB to be flushed later (context synchronizing
+ * event or sync instruction on 32-bit).
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ break;
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_06a:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+ /* tlbie invalidate TLBs for all segments */
+ /* XXX: given the fact that there are too many segments to invalidate,
+ * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
+ * we just invalidate all TLBs
+ */
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+ break;
+#endif /* defined(TARGET_PPC64) */
+ default:
+ /* Should never reach here with other MMU models */
+ assert(0);
+ }
+#else
+ ppc_tlb_invalidate_all(env);
+#endif
+}
+
+/*****************************************************************************/
+/* Special registers manipulation */
+void ppc_store_sdr1(CPUPPCState *env, target_ulong value)
+{
+ qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value);
+ assert(!env->external_htab);
+ env->spr[SPR_SDR1] = value;
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ Error *local_err = NULL;
+
+ ppc_hash64_set_sdr1(cpu, value, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ error_free(local_err);
+ }
+ } else
+#endif /* defined(TARGET_PPC64) */
+ {
+ /* FIXME: Should check for valid HTABMASK values */
+ env->htab_mask = ((value & SDR_32_HTABMASK) << 16) | 0xFFFF;
+ env->htab_base = value & SDR_32_HTABORG;
+ }
+}
+
+/* Segment registers load and store */
+target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
+{
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ /* XXX */
+ return 0;
+ }
+#endif
+ return env->sr[sr_num];
+}
+
+void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
+{
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
+ (int)srnum, value, env->sr[srnum]);
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ uint64_t esid, vsid;
+
+ /* ESID = srnum */
+ esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
+
+ /* VSID = VSID */
+ vsid = (value & 0xfffffff) << 12;
+ /* flags = flags */
+ vsid |= ((value >> 27) & 0xf) << 8;
+
+ ppc_store_slb(cpu, srnum, esid, vsid);
+ } else
+#endif
+ if (env->sr[srnum] != value) {
+ env->sr[srnum] = value;
+/* Invalidating 256MB of virtual memory in 4kB pages is way longer than
+ flusing the whole TLB. */
+#if !defined(FLUSH_ALL_TLBS) && 0
+ {
+ target_ulong page, end;
+ /* Invalidate 256 MB of virtual memory */
+ page = (16 << 20) * srnum;
+ end = page + (16 << 20);
+ for (; page != end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(CPU(cpu), page);
+ }
+ }
+#else
+ env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
+#endif
+ }
+}
+
+/* TLB management */
+void helper_tlbia(CPUPPCState *env)
+{
+ ppc_tlb_invalidate_all(env);
+}
+
+void helper_tlbie(CPUPPCState *env, target_ulong addr)
+{
+ ppc_tlb_invalidate_one(env, addr);
+}
+
+void helper_tlbiva(CPUPPCState *env, target_ulong addr)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ /* tlbiva instruction only exists on BookE */
+ assert(env->mmu_model == POWERPC_MMU_BOOKE);
+ /* XXX: TODO */
+ cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n");
+}
+
+/* Software driven TLBs management */
+/* PowerPC 602/603 software TLB load instructions helpers */
+static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
+{
+ target_ulong RPN, CMP, EPN;
+ int way;
+
+ RPN = env->spr[SPR_RPA];
+ if (is_code) {
+ CMP = env->spr[SPR_ICMP];
+ EPN = env->spr[SPR_IMISS];
+ } else {
+ CMP = env->spr[SPR_DCMP];
+ EPN = env->spr[SPR_DMISS];
+ }
+ way = (env->spr[SPR_SRR1] >> 17) & 1;
+ (void)EPN; /* avoid a compiler warning */
+ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
+ RPN, way);
+ /* Store this TLB */
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
+}
+
+void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
+{
+ do_6xx_tlb(env, EPN, 0);
+}
+
+void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
+{
+ do_6xx_tlb(env, EPN, 1);
+}
+
+/* PowerPC 74xx software TLB load instructions helpers */
+static void do_74xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
+{
+ target_ulong RPN, CMP, EPN;
+ int way;
+
+ RPN = env->spr[SPR_PTELO];
+ CMP = env->spr[SPR_PTEHI];
+ EPN = env->spr[SPR_TLBMISS] & ~0x3;
+ way = env->spr[SPR_TLBMISS] & 0x3;
+ (void)EPN; /* avoid a compiler warning */
+ LOG_SWTLB("%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx " PTE0 " TARGET_FMT_lx
+ " PTE1 " TARGET_FMT_lx " way %d\n", __func__, new_EPN, EPN, CMP,
+ RPN, way);
+ /* Store this TLB */
+ ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
+ way, is_code, CMP, RPN);
+}
+
+void helper_74xx_tlbd(CPUPPCState *env, target_ulong EPN)
+{
+ do_74xx_tlb(env, EPN, 0);
+}
+
+void helper_74xx_tlbi(CPUPPCState *env, target_ulong EPN)
+{
+ do_74xx_tlb(env, EPN, 1);
+}
+
+/*****************************************************************************/
+/* PowerPC 601 specific instructions (POWER bridge) */
+
+target_ulong helper_rac(CPUPPCState *env, target_ulong addr)
+{
+ mmu_ctx_t ctx;
+ int nb_BATs;
+ target_ulong ret = 0;
+
+ /* We don't have to generate many instances of this instruction,
+ * as rac is supervisor only.
+ */
+ /* XXX: FIX THIS: Pretend we have no BAT */
+ nb_BATs = env->nb_BATs;
+ env->nb_BATs = 0;
+ if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) {
+ ret = ctx.raddr;
+ }
+ env->nb_BATs = nb_BATs;
+ return ret;
+}
+
+static inline target_ulong booke_tlb_to_page_size(int size)
+{
+ return 1024 << (2 * size);
+}
+
+static inline int booke_page_size_to_tlb(target_ulong page_size)
+{
+ int size;
+
+ switch (page_size) {
+ case 0x00000400UL:
+ size = 0x0;
+ break;
+ case 0x00001000UL:
+ size = 0x1;
+ break;
+ case 0x00004000UL:
+ size = 0x2;
+ break;
+ case 0x00010000UL:
+ size = 0x3;
+ break;
+ case 0x00040000UL:
+ size = 0x4;
+ break;
+ case 0x00100000UL:
+ size = 0x5;
+ break;
+ case 0x00400000UL:
+ size = 0x6;
+ break;
+ case 0x01000000UL:
+ size = 0x7;
+ break;
+ case 0x04000000UL:
+ size = 0x8;
+ break;
+ case 0x10000000UL:
+ size = 0x9;
+ break;
+ case 0x40000000UL:
+ size = 0xA;
+ break;
+#if defined(TARGET_PPC64)
+ case 0x000100000000ULL:
+ size = 0xB;
+ break;
+ case 0x000400000000ULL:
+ size = 0xC;
+ break;
+ case 0x001000000000ULL:
+ size = 0xD;
+ break;
+ case 0x004000000000ULL:
+ size = 0xE;
+ break;
+ case 0x010000000000ULL:
+ size = 0xF;
+ break;
+#endif
+ default:
+ size = -1;
+ break;
+ }
+
+ return size;
+}
+
+/* Helpers for 4xx TLB management */
+#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
+
+#define PPC4XX_TLBHI_V 0x00000040
+#define PPC4XX_TLBHI_E 0x00000020
+#define PPC4XX_TLBHI_SIZE_MIN 0
+#define PPC4XX_TLBHI_SIZE_MAX 7
+#define PPC4XX_TLBHI_SIZE_DEFAULT 1
+#define PPC4XX_TLBHI_SIZE_SHIFT 7
+#define PPC4XX_TLBHI_SIZE_MASK 0x00000007
+
+#define PPC4XX_TLBLO_EX 0x00000200
+#define PPC4XX_TLBLO_WR 0x00000100
+#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
+#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
+
+target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+ int size;
+
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ ret = tlb->EPN;
+ if (tlb->prot & PAGE_VALID) {
+ ret |= PPC4XX_TLBHI_V;
+ }
+ size = booke_page_size_to_tlb(tlb->size);
+ if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
+ size = PPC4XX_TLBHI_SIZE_DEFAULT;
+ }
+ ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
+ env->spr[SPR_40x_PID] = tlb->PID;
+ return ret;
+}
+
+target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ ret = tlb->RPN;
+ if (tlb->prot & PAGE_EXEC) {
+ ret |= PPC4XX_TLBLO_EX;
+ }
+ if (tlb->prot & PAGE_WRITE) {
+ ret |= PPC4XX_TLBLO_WR;
+ }
+ return ret;
+}
+
+void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
+ target_ulong val)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ ppcemb_tlb_t *tlb;
+ target_ulong page, end;
+
+ LOG_SWTLB("%s entry %d val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ val);
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ /* Invalidate previous TLB (if it's valid) */
+ if (tlb->prot & PAGE_VALID) {
+ end = tlb->EPN + tlb->size;
+ LOG_SWTLB("%s: invalidate old TLB %d start " TARGET_FMT_lx " end "
+ TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, page);
+ }
+ }
+ tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
+ & PPC4XX_TLBHI_SIZE_MASK);
+ /* We cannot handle TLB size < TARGET_PAGE_SIZE.
+ * If this ever occurs, one should use the ppcemb target instead
+ * of the ppc or ppc64 one
+ */
+ if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
+ cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
+ "are not supported (%d)\n",
+ tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
+ }
+ tlb->EPN = val & ~(tlb->size - 1);
+ if (val & PPC4XX_TLBHI_V) {
+ tlb->prot |= PAGE_VALID;
+ if (val & PPC4XX_TLBHI_E) {
+ /* XXX: TO BE FIXED */
+ cpu_abort(cs,
+ "Little-endian TLB entries are not supported by now\n");
+ }
+ } else {
+ tlb->prot &= ~PAGE_VALID;
+ }
+ tlb->PID = env->spr[SPR_40x_PID]; /* PID */
+ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+ /* Invalidate new TLB (if valid) */
+ if (tlb->prot & PAGE_VALID) {
+ end = tlb->EPN + tlb->size;
+ LOG_SWTLB("%s: invalidate TLB %d start " TARGET_FMT_lx " end "
+ TARGET_FMT_lx "\n", __func__, (int)entry, tlb->EPN, end);
+ for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, page);
+ }
+ }
+}
+
+void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
+ target_ulong val)
+{
+ ppcemb_tlb_t *tlb;
+
+ LOG_SWTLB("%s entry %i val " TARGET_FMT_lx "\n", __func__, (int)entry,
+ val);
+ entry &= PPC4XX_TLB_ENTRY_MASK;
+ tlb = &env->tlb.tlbe[entry];
+ tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
+ tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
+ tlb->prot = PAGE_READ;
+ if (val & PPC4XX_TLBLO_EX) {
+ tlb->prot |= PAGE_EXEC;
+ }
+ if (val & PPC4XX_TLBLO_WR) {
+ tlb->prot |= PAGE_WRITE;
+ }
+ LOG_SWTLB("%s: set up TLB %d RPN " TARGET_FMT_plx " EPN " TARGET_FMT_lx
+ " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
+ (int)entry, tlb->RPN, tlb->EPN, tlb->size,
+ tlb->prot & PAGE_READ ? 'r' : '-',
+ tlb->prot & PAGE_WRITE ? 'w' : '-',
+ tlb->prot & PAGE_EXEC ? 'x' : '-',
+ tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
+}
+
+target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
+{
+ return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
+}
+
+/* PowerPC 440 TLB management */
+void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
+ target_ulong value)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ ppcemb_tlb_t *tlb;
+ target_ulong EPN, RPN, size;
+ int do_flush_tlbs;
+
+ LOG_SWTLB("%s word %d entry %d value " TARGET_FMT_lx "\n",
+ __func__, word, (int)entry, value);
+ do_flush_tlbs = 0;
+ entry &= 0x3F;
+ tlb = &env->tlb.tlbe[entry];
+ switch (word) {
+ default:
+ /* Just here to please gcc */
+ case 0:
+ EPN = value & 0xFFFFFC00;
+ if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN) {
+ do_flush_tlbs = 1;
+ }
+ tlb->EPN = EPN;
+ size = booke_tlb_to_page_size((value >> 4) & 0xF);
+ if ((tlb->prot & PAGE_VALID) && tlb->size < size) {
+ do_flush_tlbs = 1;
+ }
+ tlb->size = size;
+ tlb->attr &= ~0x1;
+ tlb->attr |= (value >> 8) & 1;
+ if (value & 0x200) {
+ tlb->prot |= PAGE_VALID;
+ } else {
+ if (tlb->prot & PAGE_VALID) {
+ tlb->prot &= ~PAGE_VALID;
+ do_flush_tlbs = 1;
+ }
+ }
+ tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
+ if (do_flush_tlbs) {
+ tlb_flush(CPU(cpu), 1);
+ }
+ break;
+ case 1:
+ RPN = value & 0xFFFFFC0F;
+ if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) {
+ tlb_flush(CPU(cpu), 1);
+ }
+ tlb->RPN = RPN;
+ break;
+ case 2:
+ tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
+ tlb->prot = tlb->prot & PAGE_VALID;
+ if (value & 0x1) {
+ tlb->prot |= PAGE_READ << 4;
+ }
+ if (value & 0x2) {
+ tlb->prot |= PAGE_WRITE << 4;
+ }
+ if (value & 0x4) {
+ tlb->prot |= PAGE_EXEC << 4;
+ }
+ if (value & 0x8) {
+ tlb->prot |= PAGE_READ;
+ }
+ if (value & 0x10) {
+ tlb->prot |= PAGE_WRITE;
+ }
+ if (value & 0x20) {
+ tlb->prot |= PAGE_EXEC;
+ }
+ break;
+ }
+}
+
+target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
+ target_ulong entry)
+{
+ ppcemb_tlb_t *tlb;
+ target_ulong ret;
+ int size;
+
+ entry &= 0x3F;
+ tlb = &env->tlb.tlbe[entry];
+ switch (word) {
+ default:
+ /* Just here to please gcc */
+ case 0:
+ ret = tlb->EPN;
+ size = booke_page_size_to_tlb(tlb->size);
+ if (size < 0 || size > 0xF) {
+ size = 1;
+ }
+ ret |= size << 4;
+ if (tlb->attr & 0x1) {
+ ret |= 0x100;
+ }
+ if (tlb->prot & PAGE_VALID) {
+ ret |= 0x200;
+ }
+ env->spr[SPR_440_MMUCR] &= ~0x000000FF;
+ env->spr[SPR_440_MMUCR] |= tlb->PID;
+ break;
+ case 1:
+ ret = tlb->RPN;
+ break;
+ case 2:
+ ret = tlb->attr & ~0x1;
+ if (tlb->prot & (PAGE_READ << 4)) {
+ ret |= 0x1;
+ }
+ if (tlb->prot & (PAGE_WRITE << 4)) {
+ ret |= 0x2;
+ }
+ if (tlb->prot & (PAGE_EXEC << 4)) {
+ ret |= 0x4;
+ }
+ if (tlb->prot & PAGE_READ) {
+ ret |= 0x8;
+ }
+ if (tlb->prot & PAGE_WRITE) {
+ ret |= 0x10;
+ }
+ if (tlb->prot & PAGE_EXEC) {
+ ret |= 0x20;
+ }
+ break;
+ }
+ return ret;
+}
+
+target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
+{
+ return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
+}
+
+/* PowerPC BookE 2.06 TLB management */
+
+static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ uint32_t tlbncfg = 0;
+ int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
+ int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
+ int tlb;
+
+ tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
+ tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
+
+ if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
+ cpu_abort(CPU(cpu), "we don't support HES yet\n");
+ }
+
+ return booke206_get_tlbm(env, tlb, ea, esel);
+}
+
+void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+
+ env->spr[pidn] = pid;
+ /* changing PIDs mean we're in a different address space now */
+ tlb_flush(CPU(cpu), 1);
+}
+
+void helper_booke206_tlbwe(CPUPPCState *env)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ uint32_t tlbncfg, tlbn;
+ ppcmas_tlb_t *tlb;
+ uint32_t size_tlb, size_ps;
+ target_ulong mask;
+
+
+ switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
+ case MAS0_WQ_ALWAYS:
+ /* good to go, write that entry */
+ break;
+ case MAS0_WQ_COND:
+ /* XXX check if reserved */
+ if (0) {
+ return;
+ }
+ break;
+ case MAS0_WQ_CLR_RSRV:
+ /* XXX clear entry */
+ return;
+ default:
+ /* no idea what to do */
+ return;
+ }
+
+ if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
+ !msr_gs) {
+ /* XXX we don't support direct LRAT setting yet */
+ fprintf(stderr, "cpu: don't support LRAT setting yet\n");
+ return;
+ }
+
+ tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
+ tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
+
+ tlb = booke206_cur_tlb(env);
+
+ if (!tlb) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ }
+
+ /* check that we support the targeted size */
+ size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
+ size_ps = booke206_tlbnps(env, tlbn);
+ if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
+ !(size_ps & (1 << size_tlb))) {
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ }
+
+ if (msr_gs) {
+ cpu_abort(CPU(cpu), "missing HV implementation\n");
+ }
+ tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
+ env->spr[SPR_BOOKE_MAS3];
+ tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
+
+ /* MAV 1.0 only */
+ if (!(tlbncfg & TLBnCFG_AVAIL)) {
+ /* force !AVAIL TLB entries to correct page size */
+ tlb->mas1 &= ~MAS1_TSIZE_MASK;
+ /* XXX can be configured in MMUCSR0 */
+ tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
+ }
+
+ /* Make a mask from TLB size to discard invalid bits in EPN field */
+ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
+ /* Add a mask for page attributes */
+ mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
+
+ if (!msr_cm) {
+ /* Executing a tlbwe instruction in 32-bit mode will set
+ * bits 0:31 of the TLB EPN field to zero.
+ */
+ mask &= 0xffffffff;
+ }
+
+ tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
+
+ if (!(tlbncfg & TLBnCFG_IPROT)) {
+ /* no IPROT supported by TLB */
+ tlb->mas1 &= ~MAS1_IPROT;
+ }
+
+ if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
+ tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK);
+ } else {
+ tlb_flush(CPU(cpu), 1);
+ }
+}
+
+static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
+{
+ int tlbn = booke206_tlbm_to_tlbn(env, tlb);
+ int way = booke206_tlbm_to_way(env, tlb);
+
+ env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
+ env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
+
+ env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
+ env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
+ env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
+ env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
+}
+
+void helper_booke206_tlbre(CPUPPCState *env)
+{
+ ppcmas_tlb_t *tlb = NULL;
+
+ tlb = booke206_cur_tlb(env);
+ if (!tlb) {
+ env->spr[SPR_BOOKE_MAS1] = 0;
+ } else {
+ booke206_tlb_to_mas(env, tlb);
+ }
+}
+
+void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
+{
+ ppcmas_tlb_t *tlb = NULL;
+ int i, j;
+ hwaddr raddr;
+ uint32_t spid, sas;
+
+ spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
+ sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
+
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int ways = booke206_tlb_ways(env, i);
+
+ for (j = 0; j < ways; j++) {
+ tlb = booke206_get_tlbm(env, i, address, j);
+
+ if (!tlb) {
+ continue;
+ }
+
+ if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
+ continue;
+ }
+
+ if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
+ continue;
+ }
+
+ booke206_tlb_to_mas(env, tlb);
+ return;
+ }
+ }
+
+ /* no entry found, fill with defaults */
+ env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
+ env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
+ env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
+ env->spr[SPR_BOOKE_MAS3] = 0;
+ env->spr[SPR_BOOKE_MAS7] = 0;
+
+ if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
+ env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
+ }
+
+ env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
+ << MAS1_TID_SHIFT;
+
+ /* next victim logic */
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
+ env->last_way++;
+ env->last_way &= booke206_tlb_ways(env, 0) - 1;
+ env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
+}
+
+static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
+ uint32_t ea)
+{
+ int i;
+ int ways = booke206_tlb_ways(env, tlbn);
+ target_ulong mask;
+
+ for (i = 0; i < ways; i++) {
+ ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
+ if (!tlb) {
+ continue;
+ }
+ mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
+ if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
+ !(tlb->mas1 & MAS1_IPROT)) {
+ tlb->mas1 &= ~MAS1_VALID;
+ }
+ }
+}
+
+void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
+{
+ CPUState *cs;
+
+ if (address & 0x4) {
+ /* flush all entries */
+ if (address & 0x8) {
+ /* flush all of TLB1 */
+ booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
+ } else {
+ /* flush all of TLB0 */
+ booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
+ }
+ return;
+ }
+
+ if (address & 0x8) {
+ /* flush TLB1 entries */
+ booke206_invalidate_ea_tlb(env, 1, address);
+ CPU_FOREACH(cs) {
+ tlb_flush(cs, 1);
+ }
+ } else {
+ /* flush TLB0 entries */
+ booke206_invalidate_ea_tlb(env, 0, address);
+ CPU_FOREACH(cs) {
+ tlb_flush_page(cs, address & MAS2_EPN_MASK);
+ }
+ }
+}
+
+void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
+{
+ /* XXX missing LPID handling */
+ booke206_flush_tlb(env, -1, 1);
+}
+
+void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ int i, j;
+ int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
+ ppcmas_tlb_t *tlb = env->tlb.tlbm;
+ int tlb_size;
+
+ /* XXX missing LPID handling */
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ tlb_size = booke206_tlb_size(env, i);
+ for (j = 0; j < tlb_size; j++) {
+ if (!(tlb[j].mas1 & MAS1_IPROT) &&
+ ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
+ tlb[j].mas1 &= ~MAS1_VALID;
+ }
+ }
+ tlb += booke206_tlb_size(env, i);
+ }
+ tlb_flush(CPU(cpu), 1);
+}
+
+void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ int i, j;
+ ppcmas_tlb_t *tlb;
+ int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
+ int pid = tid >> MAS6_SPID_SHIFT;
+ int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
+ int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
+ /* XXX check for unsupported isize and raise an invalid opcode then */
+ int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
+ /* XXX implement MAV2 handling */
+ bool mav2 = false;
+
+ /* XXX missing LPID handling */
+ /* flush by pid and ea */
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ int ways = booke206_tlb_ways(env, i);
+
+ for (j = 0; j < ways; j++) {
+ tlb = booke206_get_tlbm(env, i, address, j);
+ if (!tlb) {
+ continue;
+ }
+ if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
+ (tlb->mas1 & MAS1_IPROT) ||
+ ((tlb->mas1 & MAS1_IND) != ind) ||
+ ((tlb->mas8 & MAS8_TGS) != sgs)) {
+ continue;
+ }
+ if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
+ /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
+ continue;
+ }
+ /* XXX e500mc doesn't match SAS, but other cores might */
+ tlb->mas1 &= ~MAS1_VALID;
+ }
+ }
+ tlb_flush(CPU(cpu), 1);
+}
+
+void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
+{
+ int flags = 0;
+
+ if (type & 2) {
+ flags |= BOOKE206_FLUSH_TLB1;
+ }
+
+ if (type & 4) {
+ flags |= BOOKE206_FLUSH_TLB0;
+ }
+
+ booke206_flush_tlb(env, flags, 1);
+}
+
+
+void helper_check_tlb_flush_local(CPUPPCState *env)
+{
+ check_tlb_flush(env, false);
+}
+
+void helper_check_tlb_flush_global(CPUPPCState *env)
+{
+ check_tlb_flush(env, true);
+}
+
+/*****************************************************************************/
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cs);
+ CPUPPCState *env = &cpu->env;
+ int ret;
+
+ if (pcc->handle_mmu_fault) {
+ ret = pcc->handle_mmu_fault(cpu, addr, access_type, mmu_idx);
+ } else {
+ ret = cpu_ppc_handle_mmu_fault(env, addr, access_type, mmu_idx);
+ }
+ if (unlikely(ret != 0)) {
+ raise_exception_err_ra(env, cs->exception_index, env->error_code,
+ retaddr);
+ }
+}
diff --git a/target/ppc/monitor.c b/target/ppc/monitor.c
new file mode 100644
index 0000000000..c2d0806dd1
--- /dev/null
+++ b/target/ppc/monitor.c
@@ -0,0 +1,147 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+static target_long monitor_get_ccr (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ unsigned int u;
+ int i;
+
+ u = 0;
+ for (i = 0; i < 8; i++)
+ u |= env->crf[i] << (32 - (4 * (i + 1)));
+
+ return u;
+}
+
+static target_long monitor_get_decr (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return cpu_ppc_load_decr(env);
+}
+
+static target_long monitor_get_tbu (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return cpu_ppc_load_tbu(env);
+}
+
+static target_long monitor_get_tbl (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return cpu_ppc_load_tbl(env);
+}
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env();
+
+ dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
+}
+
+const MonitorDef monitor_defs[] = {
+ { "fpscr", offsetof(CPUPPCState, fpscr) },
+ /* Next instruction pointer */
+ { "nip|pc", offsetof(CPUPPCState, nip) },
+ { "lr", offsetof(CPUPPCState, lr) },
+ { "ctr", offsetof(CPUPPCState, ctr) },
+ { "decr", 0, &monitor_get_decr, },
+ { "ccr|cr", 0, &monitor_get_ccr, },
+ /* Machine state register */
+ { "xer", offsetof(CPUPPCState, xer) },
+ { "msr", offsetof(CPUPPCState, msr) },
+ { "tbu", 0, &monitor_get_tbu, },
+ { "tbl", 0, &monitor_get_tbl, },
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
+
+static int ppc_cpu_get_reg_num(const char *numstr, int maxnum, int *pregnum)
+{
+ int regnum;
+ char *endptr = NULL;
+
+ if (!*numstr) {
+ return false;
+ }
+
+ regnum = strtoul(numstr, &endptr, 10);
+ if (*endptr || (regnum >= maxnum)) {
+ return false;
+ }
+ *pregnum = regnum;
+
+ return true;
+}
+
+int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval)
+{
+ int i, regnum;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ /* General purpose registers */
+ if ((tolower(name[0]) == 'r') &&
+ ppc_cpu_get_reg_num(name + 1, ARRAY_SIZE(env->gpr), &regnum)) {
+ *pval = env->gpr[regnum];
+ return 0;
+ }
+
+ /* Floating point registers */
+ if ((tolower(name[0]) == 'f') &&
+ ppc_cpu_get_reg_num(name + 1, ARRAY_SIZE(env->fpr), &regnum)) {
+ *pval = env->fpr[regnum];
+ return 0;
+ }
+
+ /* Special purpose registers */
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); ++i) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (spr->name && (strcasecmp(name, spr->name) == 0)) {
+ *pval = env->spr[i];
+ return 0;
+ }
+ }
+
+ /* Segment registers */
+#if !defined(CONFIG_USER_ONLY)
+ if ((strncasecmp(name, "sr", 2) == 0) &&
+ ppc_cpu_get_reg_num(name + 2, ARRAY_SIZE(env->sr), &regnum)) {
+ *pval = env->sr[regnum];
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
diff --git a/target/ppc/timebase_helper.c b/target/ppc/timebase_helper.c
new file mode 100644
index 0000000000..73363e08ae
--- /dev/null
+++ b/target/ppc/timebase_helper.c
@@ -0,0 +1,176 @@
+/*
+ * PowerPC emulation helpers for QEMU.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "qemu/log.h"
+
+/*****************************************************************************/
+/* SPR accesses */
+
+target_ulong helper_load_tbl(CPUPPCState *env)
+{
+ return (target_ulong)cpu_ppc_load_tbl(env);
+}
+
+target_ulong helper_load_tbu(CPUPPCState *env)
+{
+ return cpu_ppc_load_tbu(env);
+}
+
+target_ulong helper_load_atbl(CPUPPCState *env)
+{
+ return (target_ulong)cpu_ppc_load_atbl(env);
+}
+
+target_ulong helper_load_atbu(CPUPPCState *env)
+{
+ return cpu_ppc_load_atbu(env);
+}
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+target_ulong helper_load_purr(CPUPPCState *env)
+{
+ return (target_ulong)cpu_ppc_load_purr(env);
+}
+#endif
+
+target_ulong helper_load_601_rtcl(CPUPPCState *env)
+{
+ return cpu_ppc601_load_rtcl(env);
+}
+
+target_ulong helper_load_601_rtcu(CPUPPCState *env)
+{
+ return cpu_ppc601_load_rtcu(env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void helper_store_tbl(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_tbl(env, val);
+}
+
+void helper_store_tbu(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_tbu(env, val);
+}
+
+void helper_store_atbl(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_atbl(env, val);
+}
+
+void helper_store_atbu(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_atbu(env, val);
+}
+
+void helper_store_601_rtcl(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc601_store_rtcl(env, val);
+}
+
+void helper_store_601_rtcu(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc601_store_rtcu(env, val);
+}
+
+target_ulong helper_load_decr(CPUPPCState *env)
+{
+ return cpu_ppc_load_decr(env);
+}
+
+void helper_store_decr(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_decr(env, val);
+}
+
+target_ulong helper_load_hdecr(CPUPPCState *env)
+{
+ return cpu_ppc_load_hdecr(env);
+}
+
+void helper_store_hdecr(CPUPPCState *env, target_ulong val)
+{
+ cpu_ppc_store_hdecr(env, val);
+}
+
+target_ulong helper_load_40x_pit(CPUPPCState *env)
+{
+ return load_40x_pit(env);
+}
+
+void helper_store_40x_pit(CPUPPCState *env, target_ulong val)
+{
+ store_40x_pit(env, val);
+}
+
+void helper_store_booke_tcr(CPUPPCState *env, target_ulong val)
+{
+ store_booke_tcr(env, val);
+}
+
+void helper_store_booke_tsr(CPUPPCState *env, target_ulong val)
+{
+ store_booke_tsr(env, val);
+}
+#endif
+
+/*****************************************************************************/
+/* Embedded PowerPC specific helpers */
+
+/* XXX: to be improved to check access rights when in user-mode */
+target_ulong helper_load_dcr(CPUPPCState *env, target_ulong dcrn)
+{
+ uint32_t val = 0;
+
+ if (unlikely(env->dcr_env == NULL)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ } else if (unlikely(ppc_dcr_read(env->dcr_env,
+ (uint32_t)dcrn, &val) != 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DCR read error %d %03x\n",
+ (uint32_t)dcrn, (uint32_t)dcrn);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_PRIV_REG, GETPC());
+ }
+ return val;
+}
+
+void helper_store_dcr(CPUPPCState *env, target_ulong dcrn, target_ulong val)
+{
+ if (unlikely(env->dcr_env == NULL)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "No DCR environment\n");
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_INVAL_INVAL, GETPC());
+ } else if (unlikely(ppc_dcr_write(env->dcr_env, (uint32_t)dcrn,
+ (uint32_t)val) != 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DCR write error %d %03x\n",
+ (uint32_t)dcrn, (uint32_t)dcrn);
+ raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
+ POWERPC_EXCP_INVAL |
+ POWERPC_EXCP_PRIV_REG, GETPC());
+ }
+}
diff --git a/target/ppc/trace-events b/target/ppc/trace-events
new file mode 100644
index 0000000000..b666156114
--- /dev/null
+++ b/target/ppc/trace-events
@@ -0,0 +1,5 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/ppc/kvm.c
+kvm_failed_spr_set(int str, const char *msg) "Warning: Unable to set SPR %d to KVM: %s"
+kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s"
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
new file mode 100644
index 0000000000..59e9552d2b
--- /dev/null
+++ b/target/ppc/translate.c
@@ -0,0 +1,7200 @@
+/*
+ * PowerPC emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "internal.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "qemu/host-utils.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+#define CPU_SINGLE_STEP 0x1
+#define CPU_BRANCH_STEP 0x2
+#define GDBSTUB_SINGLE_STEP 0x4
+
+/* Include definitions for instructions classes and implementations flags */
+//#define PPC_DEBUG_DISAS
+//#define DO_PPC_STATISTICS
+
+#ifdef PPC_DEBUG_DISAS
+# define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+/*****************************************************************************/
+/* Code translation helpers */
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static char cpu_reg_names[10*3 + 22*4 /* GPR */
+ + 10*4 + 22*5 /* SPE GPRh */
+ + 10*4 + 22*5 /* FPR */
+ + 2*(10*6 + 22*7) /* AVRh, AVRl */
+ + 10*5 + 22*6 /* VSR */
+ + 8*5 /* CRF */];
+static TCGv cpu_gpr[32];
+static TCGv cpu_gprh[32];
+static TCGv_i64 cpu_fpr[32];
+static TCGv_i64 cpu_avrh[32], cpu_avrl[32];
+static TCGv_i64 cpu_vsr[32];
+static TCGv_i32 cpu_crf[8];
+static TCGv cpu_nip;
+static TCGv cpu_msr;
+static TCGv cpu_ctr;
+static TCGv cpu_lr;
+#if defined(TARGET_PPC64)
+static TCGv cpu_cfar;
+#endif
+static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca;
+static TCGv cpu_reserve;
+static TCGv cpu_fpscr;
+static TCGv_i32 cpu_access_type;
+
+#include "exec/gen-icount.h"
+
+void ppc_translate_init(void)
+{
+ int i;
+ char* p;
+ size_t cpu_reg_names_size;
+ static int done_init = 0;
+
+ if (done_init)
+ return;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ p = cpu_reg_names;
+ cpu_reg_names_size = sizeof(cpu_reg_names);
+
+ for (i = 0; i < 8; i++) {
+ snprintf(p, cpu_reg_names_size, "crf%d", i);
+ cpu_crf[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUPPCState, crf[i]), p);
+ p += 5;
+ cpu_reg_names_size -= 5;
+ }
+
+ for (i = 0; i < 32; i++) {
+ snprintf(p, cpu_reg_names_size, "r%d", i);
+ cpu_gpr[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, gpr[i]), p);
+ p += (i < 10) ? 3 : 4;
+ cpu_reg_names_size -= (i < 10) ? 3 : 4;
+ snprintf(p, cpu_reg_names_size, "r%dH", i);
+ cpu_gprh[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, gprh[i]), p);
+ p += (i < 10) ? 4 : 5;
+ cpu_reg_names_size -= (i < 10) ? 4 : 5;
+
+ snprintf(p, cpu_reg_names_size, "fp%d", i);
+ cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, fpr[i]), p);
+ p += (i < 10) ? 4 : 5;
+ cpu_reg_names_size -= (i < 10) ? 4 : 5;
+
+ snprintf(p, cpu_reg_names_size, "avr%dH", i);
+#ifdef HOST_WORDS_BIGENDIAN
+ cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, avr[i].u64[0]), p);
+#else
+ cpu_avrh[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, avr[i].u64[1]), p);
+#endif
+ p += (i < 10) ? 6 : 7;
+ cpu_reg_names_size -= (i < 10) ? 6 : 7;
+
+ snprintf(p, cpu_reg_names_size, "avr%dL", i);
+#ifdef HOST_WORDS_BIGENDIAN
+ cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, avr[i].u64[1]), p);
+#else
+ cpu_avrl[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, avr[i].u64[0]), p);
+#endif
+ p += (i < 10) ? 6 : 7;
+ cpu_reg_names_size -= (i < 10) ? 6 : 7;
+ snprintf(p, cpu_reg_names_size, "vsr%d", i);
+ cpu_vsr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUPPCState, vsr[i]), p);
+ p += (i < 10) ? 5 : 6;
+ cpu_reg_names_size -= (i < 10) ? 5 : 6;
+ }
+
+ cpu_nip = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, nip), "nip");
+
+ cpu_msr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, msr), "msr");
+
+ cpu_ctr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ctr), "ctr");
+
+ cpu_lr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, lr), "lr");
+
+#if defined(TARGET_PPC64)
+ cpu_cfar = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, cfar), "cfar");
+#endif
+
+ cpu_xer = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, xer), "xer");
+ cpu_so = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, so), "SO");
+ cpu_ov = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ov), "OV");
+ cpu_ca = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, ca), "CA");
+
+ cpu_reserve = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, reserve_addr),
+ "reserve_addr");
+
+ cpu_fpscr = tcg_global_mem_new(cpu_env,
+ offsetof(CPUPPCState, fpscr), "fpscr");
+
+ cpu_access_type = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUPPCState, access_type), "access_type");
+
+ done_init = 1;
+}
+
+/* internal defines */
+struct DisasContext {
+ struct TranslationBlock *tb;
+ target_ulong nip;
+ uint32_t opcode;
+ uint32_t exception;
+ /* Routine used to access memory */
+ bool pr, hv, dr, le_mode;
+ bool lazy_tlb_flush;
+ bool need_access_type;
+ int mem_idx;
+ int access_type;
+ /* Translation flags */
+ TCGMemOp default_tcg_memop_mask;
+#if defined(TARGET_PPC64)
+ bool sf_mode;
+ bool has_cfar;
+#endif
+ bool fpu_enabled;
+ bool altivec_enabled;
+ bool vsx_enabled;
+ bool spe_enabled;
+ bool tm_enabled;
+ ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */
+ int singlestep_enabled;
+ uint64_t insns_flags;
+ uint64_t insns_flags2;
+};
+
+/* Return true iff byteswap is needed in a scalar memop */
+static inline bool need_byteswap(const DisasContext *ctx)
+{
+#if defined(TARGET_WORDS_BIGENDIAN)
+ return ctx->le_mode;
+#else
+ return !ctx->le_mode;
+#endif
+}
+
+/* True when active word size < size of target_long. */
+#ifdef TARGET_PPC64
+# define NARROW_MODE(C) (!(C)->sf_mode)
+#else
+# define NARROW_MODE(C) 0
+#endif
+
+struct opc_handler_t {
+ /* invalid bits for instruction 1 (Rc(opcode) == 0) */
+ uint32_t inval1;
+ /* invalid bits for instruction 2 (Rc(opcode) == 1) */
+ uint32_t inval2;
+ /* instruction type */
+ uint64_t type;
+ /* extended instruction type */
+ uint64_t type2;
+ /* handler */
+ void (*handler)(DisasContext *ctx);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ const char *oname;
+#endif
+#if defined(DO_PPC_STATISTICS)
+ uint64_t count;
+#endif
+};
+
+static inline void gen_set_access_type(DisasContext *ctx, int access_type)
+{
+ if (ctx->need_access_type && ctx->access_type != access_type) {
+ tcg_gen_movi_i32(cpu_access_type, access_type);
+ ctx->access_type = access_type;
+ }
+}
+
+static inline void gen_update_nip(DisasContext *ctx, target_ulong nip)
+{
+ if (NARROW_MODE(ctx)) {
+ nip = (uint32_t)nip;
+ }
+ tcg_gen_movi_tl(cpu_nip, nip);
+}
+
+static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error)
+{
+ TCGv_i32 t0, t1;
+
+ /* These are all synchronous exceptions, we set the PC back to
+ * the faulting instruction
+ */
+ if (ctx->exception == POWERPC_EXCP_NONE) {
+ gen_update_nip(ctx, ctx->nip - 4);
+ }
+ t0 = tcg_const_i32(excp);
+ t1 = tcg_const_i32(error);
+ gen_helper_raise_exception_err(cpu_env, t0, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ ctx->exception = (excp);
+}
+
+static void gen_exception(DisasContext *ctx, uint32_t excp)
+{
+ TCGv_i32 t0;
+
+ /* These are all synchronous exceptions, we set the PC back to
+ * the faulting instruction
+ */
+ if (ctx->exception == POWERPC_EXCP_NONE) {
+ gen_update_nip(ctx, ctx->nip - 4);
+ }
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ ctx->exception = (excp);
+}
+
+static void gen_exception_nip(DisasContext *ctx, uint32_t excp,
+ target_ulong nip)
+{
+ TCGv_i32 t0;
+
+ gen_update_nip(ctx, nip);
+ t0 = tcg_const_i32(excp);
+ gen_helper_raise_exception(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ ctx->exception = (excp);
+}
+
+static void gen_debug_exception(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+
+ /* These are all synchronous exceptions, we set the PC back to
+ * the faulting instruction
+ */
+ if ((ctx->exception != POWERPC_EXCP_BRANCH) &&
+ (ctx->exception != POWERPC_EXCP_SYNC)) {
+ gen_update_nip(ctx, ctx->nip);
+ }
+ t0 = tcg_const_i32(EXCP_DEBUG);
+ gen_helper_raise_exception(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+}
+
+static inline void gen_inval_exception(DisasContext *ctx, uint32_t error)
+{
+ /* Will be converted to program check if needed */
+ gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error);
+}
+
+static inline void gen_priv_exception(DisasContext *ctx, uint32_t error)
+{
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error);
+}
+
+static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error)
+{
+ /* Will be converted to program check if needed */
+ gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error);
+}
+
+/* Stop translation */
+static inline void gen_stop_exception(DisasContext *ctx)
+{
+ gen_update_nip(ctx, ctx->nip);
+ ctx->exception = POWERPC_EXCP_STOP;
+}
+
+#ifndef CONFIG_USER_ONLY
+/* No need to update nip here, as execution flow will change */
+static inline void gen_sync_exception(DisasContext *ctx)
+{
+ ctx->exception = POWERPC_EXCP_SYNC;
+}
+#endif
+
+#define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \
+GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE)
+
+#define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \
+GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2)
+
+#define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \
+GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE)
+
+#define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \
+GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2)
+
+#define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \
+GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2)
+
+#define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \
+GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2)
+
+typedef struct opcode_t {
+ unsigned char opc1, opc2, opc3, opc4;
+#if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */
+ unsigned char pad[4];
+#endif
+ opc_handler_t handler;
+ const char *oname;
+} opcode_t;
+
+/* Helpers for priv. check */
+#define GEN_PRIV \
+ do { \
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \
+ } while (0)
+
+#if defined(CONFIG_USER_ONLY)
+#define CHK_HV GEN_PRIV
+#define CHK_SV GEN_PRIV
+#define CHK_HVRM GEN_PRIV
+#else
+#define CHK_HV \
+ do { \
+ if (unlikely(ctx->pr || !ctx->hv)) { \
+ GEN_PRIV; \
+ } \
+ } while (0)
+#define CHK_SV \
+ do { \
+ if (unlikely(ctx->pr)) { \
+ GEN_PRIV; \
+ } \
+ } while (0)
+#define CHK_HVRM \
+ do { \
+ if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \
+ GEN_PRIV; \
+ } \
+ } while (0)
+#endif
+
+#define CHK_NONE
+
+
+/*****************************************************************************/
+/*** Instruction decoding ***/
+#define EXTRACT_HELPER(name, shift, nb) \
+static inline uint32_t name(uint32_t opcode) \
+{ \
+ return (opcode >> (shift)) & ((1 << (nb)) - 1); \
+}
+
+#define EXTRACT_SHELPER(name, shift, nb) \
+static inline int32_t name(uint32_t opcode) \
+{ \
+ return (int16_t)((opcode >> (shift)) & ((1 << (nb)) - 1)); \
+}
+
+#define EXTRACT_HELPER_SPLIT(name, shift1, nb1, shift2, nb2) \
+static inline uint32_t name(uint32_t opcode) \
+{ \
+ return (((opcode >> (shift1)) & ((1 << (nb1)) - 1)) << nb2) | \
+ ((opcode >> (shift2)) & ((1 << (nb2)) - 1)); \
+}
+
+#define EXTRACT_HELPER_DXFORM(name, \
+ d0_bits, shift_op_d0, shift_d0, \
+ d1_bits, shift_op_d1, shift_d1, \
+ d2_bits, shift_op_d2, shift_d2) \
+static inline int16_t name(uint32_t opcode) \
+{ \
+ return \
+ (((opcode >> (shift_op_d0)) & ((1 << (d0_bits)) - 1)) << (shift_d0)) | \
+ (((opcode >> (shift_op_d1)) & ((1 << (d1_bits)) - 1)) << (shift_d1)) | \
+ (((opcode >> (shift_op_d2)) & ((1 << (d2_bits)) - 1)) << (shift_d2)); \
+}
+
+
+/* Opcode part 1 */
+EXTRACT_HELPER(opc1, 26, 6);
+/* Opcode part 2 */
+EXTRACT_HELPER(opc2, 1, 5);
+/* Opcode part 3 */
+EXTRACT_HELPER(opc3, 6, 5);
+/* Opcode part 4 */
+EXTRACT_HELPER(opc4, 16, 5);
+/* Update Cr0 flags */
+EXTRACT_HELPER(Rc, 0, 1);
+/* Update Cr6 flags (Altivec) */
+EXTRACT_HELPER(Rc21, 10, 1);
+/* Destination */
+EXTRACT_HELPER(rD, 21, 5);
+/* Source */
+EXTRACT_HELPER(rS, 21, 5);
+/* First operand */
+EXTRACT_HELPER(rA, 16, 5);
+/* Second operand */
+EXTRACT_HELPER(rB, 11, 5);
+/* Third operand */
+EXTRACT_HELPER(rC, 6, 5);
+/*** Get CRn ***/
+EXTRACT_HELPER(crfD, 23, 3);
+EXTRACT_HELPER(crfS, 18, 3);
+EXTRACT_HELPER(crbD, 21, 5);
+EXTRACT_HELPER(crbA, 16, 5);
+EXTRACT_HELPER(crbB, 11, 5);
+/* SPR / TBL */
+EXTRACT_HELPER(_SPR, 11, 10);
+static inline uint32_t SPR(uint32_t opcode)
+{
+ uint32_t sprn = _SPR(opcode);
+
+ return ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
+}
+/*** Get constants ***/
+/* 16 bits signed immediate value */
+EXTRACT_SHELPER(SIMM, 0, 16);
+/* 16 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM, 0, 16);
+/* 5 bits signed immediate value */
+EXTRACT_HELPER(SIMM5, 16, 5);
+/* 5 bits signed immediate value */
+EXTRACT_HELPER(UIMM5, 16, 5);
+/* 4 bits unsigned immediate value */
+EXTRACT_HELPER(UIMM4, 16, 4);
+/* Bit count */
+EXTRACT_HELPER(NB, 11, 5);
+/* Shift count */
+EXTRACT_HELPER(SH, 11, 5);
+/* Vector shift count */
+EXTRACT_HELPER(VSH, 6, 4);
+/* Mask start */
+EXTRACT_HELPER(MB, 6, 5);
+/* Mask end */
+EXTRACT_HELPER(ME, 1, 5);
+/* Trap operand */
+EXTRACT_HELPER(TO, 21, 5);
+
+EXTRACT_HELPER(CRM, 12, 8);
+
+#ifndef CONFIG_USER_ONLY
+EXTRACT_HELPER(SR, 16, 4);
+#endif
+
+/* mtfsf/mtfsfi */
+EXTRACT_HELPER(FPBF, 23, 3);
+EXTRACT_HELPER(FPIMM, 12, 4);
+EXTRACT_HELPER(FPL, 25, 1);
+EXTRACT_HELPER(FPFLM, 17, 8);
+EXTRACT_HELPER(FPW, 16, 1);
+
+/* addpcis */
+EXTRACT_HELPER_DXFORM(DX, 10, 6, 6, 5, 16, 1, 1, 0, 0)
+#if defined(TARGET_PPC64)
+/* darn */
+EXTRACT_HELPER(L, 16, 2);
+#endif
+
+/*** Jump target decoding ***/
+/* Immediate address */
+static inline target_ulong LI(uint32_t opcode)
+{
+ return (opcode >> 0) & 0x03FFFFFC;
+}
+
+static inline uint32_t BD(uint32_t opcode)
+{
+ return (opcode >> 0) & 0xFFFC;
+}
+
+EXTRACT_HELPER(BO, 21, 5);
+EXTRACT_HELPER(BI, 16, 5);
+/* Absolute/relative address */
+EXTRACT_HELPER(AA, 1, 1);
+/* Link */
+EXTRACT_HELPER(LK, 0, 1);
+
+/* DFP Z22-form */
+EXTRACT_HELPER(DCM, 10, 6)
+
+/* DFP Z23-form */
+EXTRACT_HELPER(RMC, 9, 2)
+
+EXTRACT_HELPER_SPLIT(xT, 0, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xS, 0, 1, 21, 5);
+EXTRACT_HELPER_SPLIT(xA, 2, 1, 16, 5);
+EXTRACT_HELPER_SPLIT(xB, 1, 1, 11, 5);
+EXTRACT_HELPER_SPLIT(xC, 3, 1, 6, 5);
+EXTRACT_HELPER(DM, 8, 2);
+EXTRACT_HELPER(UIM, 16, 2);
+EXTRACT_HELPER(SHW, 8, 2);
+EXTRACT_HELPER(SP, 19, 2);
+EXTRACT_HELPER(IMM8, 11, 8);
+
+/*****************************************************************************/
+/* PowerPC instructions table */
+
+#if defined(DO_PPC_STATISTICS)
+#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = stringify(name), \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl1, \
+ .inval2 = invl2, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = stringify(name), \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = onam, \
+ }, \
+ .oname = onam, \
+}
+#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = stringify(name), \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ .oname = onam, \
+ }, \
+ .oname = onam, \
+}
+#else
+#define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl1, \
+ .inval2 = invl2, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = 0xff, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = onam, \
+}
+#define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = stringify(name), \
+}
+#define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \
+{ \
+ .opc1 = op1, \
+ .opc2 = op2, \
+ .opc3 = op3, \
+ .opc4 = op4, \
+ .handler = { \
+ .inval1 = invl, \
+ .type = _typ, \
+ .type2 = _typ2, \
+ .handler = &gen_##name, \
+ }, \
+ .oname = onam, \
+}
+#endif
+
+/* SPR load/store helpers */
+static inline void gen_load_spr(TCGv t, int reg)
+{
+ tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+}
+
+static inline void gen_store_spr(int reg, TCGv t)
+{
+ tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg]));
+}
+
+/* Invalid instruction */
+static void gen_invalid(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+static opc_handler_t invalid_handler = {
+ .inval1 = 0xFFFFFFFF,
+ .inval2 = 0xFFFFFFFF,
+ .type = PPC_NONE,
+ .type2 = PPC_NONE,
+ .handler = gen_invalid,
+};
+
+/*** Integer comparison ***/
+
+static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so);
+
+ tcg_gen_setcond_tl((s ? TCG_COND_LT: TCG_COND_LTU), t0, arg0, arg1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_shli_i32(t1, t1, CRF_LT);
+ tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
+
+ tcg_gen_setcond_tl((s ? TCG_COND_GT: TCG_COND_GTU), t0, arg0, arg1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_shli_i32(t1, t1, CRF_GT);
+ tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
+
+ tcg_gen_setcond_tl(TCG_COND_EQ, t0, arg0, arg1);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_gen_shli_i32(t1, t1, CRF_EQ);
+ tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t1);
+
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+ TCGv t0 = tcg_const_tl(arg1);
+ gen_op_cmp(arg0, t0, s, crf);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf)
+{
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ if (s) {
+ tcg_gen_ext32s_tl(t0, arg0);
+ tcg_gen_ext32s_tl(t1, arg1);
+ } else {
+ tcg_gen_ext32u_tl(t0, arg0);
+ tcg_gen_ext32u_tl(t1, arg1);
+ }
+ gen_op_cmp(t0, t1, s, crf);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf)
+{
+ TCGv t0 = tcg_const_tl(arg1);
+ gen_op_cmp32(arg0, t0, s, crf);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg)
+{
+ if (NARROW_MODE(ctx)) {
+ gen_op_cmpi32(reg, 0, 1, 0);
+ } else {
+ gen_op_cmpi(reg, 0, 1, 0);
+ }
+}
+
+/* cmp */
+static void gen_cmp(DisasContext *ctx)
+{
+ if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
+ gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 1, crfD(ctx->opcode));
+ } else {
+ gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 1, crfD(ctx->opcode));
+ }
+}
+
+/* cmpi */
+static void gen_cmpi(DisasContext *ctx)
+{
+ if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
+ gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
+ 1, crfD(ctx->opcode));
+ } else {
+ gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], SIMM(ctx->opcode),
+ 1, crfD(ctx->opcode));
+ }
+}
+
+/* cmpl */
+static void gen_cmpl(DisasContext *ctx)
+{
+ if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
+ gen_op_cmp(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 0, crfD(ctx->opcode));
+ } else {
+ gen_op_cmp32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ 0, crfD(ctx->opcode));
+ }
+}
+
+/* cmpli */
+static void gen_cmpli(DisasContext *ctx)
+{
+ if ((ctx->opcode & 0x00200000) && (ctx->insns_flags & PPC_64B)) {
+ gen_op_cmpi(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
+ 0, crfD(ctx->opcode));
+ } else {
+ gen_op_cmpi32(cpu_gpr[rA(ctx->opcode)], UIMM(ctx->opcode),
+ 0, crfD(ctx->opcode));
+ }
+}
+
+/* cmprb - range comparison: isupper, isaplha, islower*/
+static void gen_cmprb(DisasContext *ctx)
+{
+ TCGv_i32 src1 = tcg_temp_new_i32();
+ TCGv_i32 src2 = tcg_temp_new_i32();
+ TCGv_i32 src2lo = tcg_temp_new_i32();
+ TCGv_i32 src2hi = tcg_temp_new_i32();
+ TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)];
+
+ tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]);
+
+ tcg_gen_andi_i32(src1, src1, 0xFF);
+ tcg_gen_ext8u_i32(src2lo, src2);
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2hi, src2);
+
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
+ tcg_gen_and_i32(crf, src2lo, src2hi);
+
+ if (ctx->opcode & 0x00200000) {
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2lo, src2);
+ tcg_gen_shri_i32(src2, src2, 8);
+ tcg_gen_ext8u_i32(src2hi, src2);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1);
+ tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi);
+ tcg_gen_and_i32(src2lo, src2lo, src2hi);
+ tcg_gen_or_i32(crf, crf, src2lo);
+ }
+ tcg_gen_shli_i32(crf, crf, CRF_GT);
+ tcg_temp_free_i32(src1);
+ tcg_temp_free_i32(src2);
+ tcg_temp_free_i32(src2lo);
+ tcg_temp_free_i32(src2hi);
+}
+
+#if defined(TARGET_PPC64)
+/* cmpeqb */
+static void gen_cmpeqb(DisasContext *ctx)
+{
+ gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+}
+#endif
+
+/* isel (PowerPC 2.03 specification) */
+static void gen_isel(DisasContext *ctx)
+{
+ uint32_t bi = rC(ctx->opcode);
+ uint32_t mask = 0x08 >> (bi & 0x03);
+ TCGv t0 = tcg_temp_new();
+ TCGv zr;
+
+ tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]);
+ tcg_gen_andi_tl(t0, t0, mask);
+
+ zr = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr,
+ rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr,
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(zr);
+ tcg_temp_free(t0);
+}
+
+/* cmpb: PowerPC 2.05 specification */
+static void gen_cmpb(DisasContext *ctx)
+{
+ gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+}
+
+/*** Integer arithmetic ***/
+
+static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0,
+ TCGv arg1, TCGv arg2, int sub)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_xor_tl(cpu_ov, arg0, arg2);
+ tcg_gen_xor_tl(t0, arg1, arg2);
+ if (sub) {
+ tcg_gen_and_tl(cpu_ov, cpu_ov, t0);
+ } else {
+ tcg_gen_andc_tl(cpu_ov, cpu_ov, t0);
+ }
+ tcg_temp_free(t0);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32s_tl(cpu_ov, cpu_ov);
+ }
+ tcg_gen_shri_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+}
+
+/* Common add function */
+static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, bool add_ca, bool compute_ca,
+ bool compute_ov, bool compute_rc0)
+{
+ TCGv t0 = ret;
+
+ if (compute_ca || compute_ov) {
+ t0 = tcg_temp_new();
+ }
+
+ if (compute_ca) {
+ if (NARROW_MODE(ctx)) {
+ /* Caution: a non-obvious corner case of the spec is that we
+ must produce the *entire* 64-bit addition, but produce the
+ carry into bit 32. */
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */
+ tcg_gen_add_tl(t0, arg1, arg2);
+ if (add_ca) {
+ tcg_gen_add_tl(t0, t0, cpu_ca);
+ }
+ tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changed w/ carry */
+ tcg_temp_free(t1);
+ tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
+ tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
+ } else {
+ TCGv zero = tcg_const_tl(0);
+ if (add_ca) {
+ tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, cpu_ca, zero);
+ tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, arg2, zero);
+ } else {
+ tcg_gen_add2_tl(t0, cpu_ca, arg1, zero, arg2, zero);
+ }
+ tcg_temp_free(zero);
+ }
+ } else {
+ tcg_gen_add_tl(t0, arg1, arg2);
+ if (add_ca) {
+ tcg_gen_add_tl(t0, t0, cpu_ca);
+ }
+ }
+
+ if (compute_ov) {
+ gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0);
+ }
+ if (unlikely(compute_rc0)) {
+ gen_set_Rc0(ctx, t0);
+ }
+
+ if (!TCGV_EQUAL(t0, ret)) {
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ }
+}
+/* Add functions with two operands */
+#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+}
+/* Add functions with one operand and one immediate */
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_const_tl(const_val); \
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], t0, \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+ tcg_temp_free(t0); \
+}
+
+/* add add. addo addo. */
+GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
+/* addc addc. addco addco. */
+GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
+/* adde adde. addeo addeo. */
+GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
+/* addme addme. addmeo addmeo. */
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
+/* addze addze. addzeo addzeo.*/
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
+/* addi */
+static void gen_addi(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ /* li case */
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm);
+ } else {
+ tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], simm);
+ }
+}
+/* addic addic.*/
+static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0)
+{
+ TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ c, 0, 1, 0, compute_rc0);
+ tcg_temp_free(c);
+}
+
+static void gen_addic(DisasContext *ctx)
+{
+ gen_op_addic(ctx, 0);
+}
+
+static void gen_addic_(DisasContext *ctx)
+{
+ gen_op_addic(ctx, 1);
+}
+
+/* addis */
+static void gen_addis(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ /* lis case */
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], simm << 16);
+ } else {
+ tcg_gen_addi_tl(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], simm << 16);
+ }
+}
+
+/* addpcis */
+static void gen_addpcis(DisasContext *ctx)
+{
+ target_long d = DX(ctx->opcode);
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->nip + (d << 16));
+}
+
+static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign, int compute_ov)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_trunc_tl_i32(t1, arg2);
+ if (sign) {
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ } else {
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ }
+ if (compute_ov) {
+ tcg_gen_extu_i32_tl(cpu_ov, t2);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, ret);
+}
+/* Div functions */
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign, compute_ov); \
+}
+/* divwu divwu. divwuo divwuo. */
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1);
+/* divw divw. divwo divwo. */
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1);
+
+/* div[wd]eu[o][.] */
+#define GEN_DIVE(name, hlpr, compute_ov) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_const_i32(compute_ov); \
+ gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \
+ tcg_temp_free_i32(t0); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \
+ } \
+}
+
+GEN_DIVE(divweu, divweu, 0);
+GEN_DIVE(divweuo, divweu, 1);
+GEN_DIVE(divwe, divwe, 0);
+GEN_DIVE(divweo, divwe, 1);
+
+#if defined(TARGET_PPC64)
+static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign, int compute_ov)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
+ if (sign) {
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_div_i64(ret, t0, t1);
+ } else {
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_divu_i64(ret, t0, t1);
+ }
+ if (compute_ov) {
+ tcg_gen_mov_tl(cpu_ov, t2);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, ret);
+}
+
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign, compute_ov); \
+}
+/* divwu divwu. divwuo divwuo. */
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0);
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1);
+/* divw divw. divwo divwo. */
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0);
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1);
+
+GEN_DIVE(divdeu, divdeu, 0);
+GEN_DIVE(divdeuo, divdeu, 1);
+GEN_DIVE(divde, divde, 0);
+GEN_DIVE(divdeo, divde, 1);
+#endif
+
+static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_trunc_tl_i32(t1, arg2);
+ if (sign) {
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 t3 = tcg_temp_new_i32();
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i32(t2, t2, t3);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i32(t2, t2, t3);
+ tcg_gen_movi_i32(t3, 0);
+ tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_i32(t3, t0, t1);
+ tcg_gen_ext_i32_tl(ret, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ } else {
+ TCGv_i32 t2 = tcg_const_i32(1);
+ TCGv_i32 t3 = tcg_const_i32(0);
+ tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1);
+ tcg_gen_remu_i32(t3, t0, t1);
+ tcg_gen_extu_i32_tl(ret, t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ }
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+}
+
+#define GEN_INT_ARITH_MODW(name, opc3, sign) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign); \
+}
+
+GEN_INT_ARITH_MODW(moduw, 0x08, 0);
+GEN_INT_ARITH_MODW(modsw, 0x18, 1);
+
+#if defined(TARGET_PPC64)
+static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, int sign)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(t0, arg1);
+ tcg_gen_mov_i64(t1, arg2);
+ if (sign) {
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1);
+ tcg_gen_and_i64(t2, t2, t3);
+ tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_movi_i64(t3, 0);
+ tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1);
+ tcg_gen_rem_i64(ret, t0, t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ } else {
+ TCGv_i64 t2 = tcg_const_i64(1);
+ TCGv_i64 t3 = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1);
+ tcg_gen_remu_i64(ret, t0, t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ }
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+#define GEN_INT_ARITH_MODD(name, opc3, sign) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ sign); \
+}
+
+GEN_INT_ARITH_MODD(modud, 0x08, 0);
+GEN_INT_ARITH_MODD(modsd, 0x18, 1);
+#endif
+
+/* mulhw mulhw. */
+static void gen_mulhw(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_muls2_i32(t0, t1, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulhwu mulhwu. */
+static void gen_mulhwu(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mulu2_i32(t0, t1, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mullw mullw. */
+static void gen_mullw(DisasContext *ctx)
+{
+#if defined(TARGET_PPC64)
+ TCGv_i64 t0, t1;
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+#else
+ tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+#endif
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mullwo mullwo. */
+static void gen_mullwo(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_muls2_i32(t0, t1, t0, t1);
+#if defined(TARGET_PPC64)
+ tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1);
+#else
+ tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0);
+#endif
+
+ tcg_gen_sari_i32(t0, t0, 31);
+ tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_ov, t0);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulli */
+static void gen_mulli(DisasContext *ctx)
+{
+ tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ SIMM(ctx->opcode));
+}
+
+#if defined(TARGET_PPC64)
+/* mulhd mulhd. */
+static void gen_mulhd(DisasContext *ctx)
+{
+ TCGv lo = tcg_temp_new();
+ tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(lo);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulhdu mulhdu. */
+static void gen_mulhdu(DisasContext *ctx)
+{
+ TCGv lo = tcg_temp_new();
+ tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(lo);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+
+/* mulld mulld. */
+static void gen_mulld(DisasContext *ctx)
+{
+ tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulldo mulldo. */
+static void gen_mulldo(DisasContext *ctx)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0);
+
+ tcg_gen_sari_i64(t0, t0, 63);
+ tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1);
+ tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+ }
+}
+#endif
+
+/* Common subf function */
+static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1,
+ TCGv arg2, bool add_ca, bool compute_ca,
+ bool compute_ov, bool compute_rc0)
+{
+ TCGv t0 = ret;
+
+ if (compute_ca || compute_ov) {
+ t0 = tcg_temp_new();
+ }
+
+ if (compute_ca) {
+ /* dest = ~arg1 + arg2 [+ ca]. */
+ if (NARROW_MODE(ctx)) {
+ /* Caution: a non-obvious corner case of the spec is that we
+ must produce the *entire* 64-bit addition, but produce the
+ carry into bit 32. */
+ TCGv inv1 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_not_tl(inv1, arg1);
+ if (add_ca) {
+ tcg_gen_add_tl(t0, arg2, cpu_ca);
+ } else {
+ tcg_gen_addi_tl(t0, arg2, 1);
+ }
+ tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */
+ tcg_gen_add_tl(t0, t0, inv1);
+ tcg_temp_free(inv1);
+ tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */
+ tcg_temp_free(t1);
+ tcg_gen_shri_tl(cpu_ca, cpu_ca, 32); /* extract bit 32 */
+ tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
+ } else if (add_ca) {
+ TCGv zero, inv1 = tcg_temp_new();
+ tcg_gen_not_tl(inv1, arg1);
+ zero = tcg_const_tl(0);
+ tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero);
+ tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero);
+ tcg_temp_free(zero);
+ tcg_temp_free(inv1);
+ } else {
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1);
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ }
+ } else if (add_ca) {
+ /* Since we're ignoring carry-out, we can simplify the
+ standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ tcg_gen_add_tl(t0, t0, cpu_ca);
+ tcg_gen_subi_tl(t0, t0, 1);
+ } else {
+ tcg_gen_sub_tl(t0, arg2, arg1);
+ }
+
+ if (compute_ov) {
+ gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1);
+ }
+ if (unlikely(compute_rc0)) {
+ gen_set_Rc0(ctx, t0);
+ }
+
+ if (!TCGV_EQUAL(t0, ret)) {
+ tcg_gen_mov_tl(ret, t0);
+ tcg_temp_free(t0);
+ }
+}
+/* Sub functions with Two operands functions */
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+}
+/* Sub functions with one operand and one immediate */
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0 = tcg_const_tl(const_val); \
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \
+ cpu_gpr[rA(ctx->opcode)], t0, \
+ add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \
+ tcg_temp_free(t0); \
+}
+/* subf subf. subfo subfo. */
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+/* subfc subfc. subfco subfco. */
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+/* subfe subfe. subfeo subfo. */
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+/* subfme subfme. subfmeo subfmeo. */
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+/* subfze subfze. subfzeo subfzeo.*/
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+
+/* subfic */
+static void gen_subfic(DisasContext *ctx)
+{
+ TCGv c = tcg_const_tl(SIMM(ctx->opcode));
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ c, 0, 1, 0, 0);
+ tcg_temp_free(c);
+}
+
+/* neg neg. nego nego. */
+static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov)
+{
+ TCGv zero = tcg_const_tl(0);
+ gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ zero, 0, 0, compute_ov, Rc(ctx->opcode));
+ tcg_temp_free(zero);
+}
+
+static void gen_neg(DisasContext *ctx)
+{
+ gen_op_arith_neg(ctx, 0);
+}
+
+static void gen_nego(DisasContext *ctx)
+{
+ gen_op_arith_neg(ctx, 1);
+}
+
+/*** Integer logical ***/
+#define GEN_LOGICAL2(name, tcg_op, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
+}
+
+#define GEN_LOGICAL1(name, tcg_op, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \
+ if (unlikely(Rc(ctx->opcode) != 0)) \
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \
+}
+
+/* and & and. */
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER);
+/* andc & andc. */
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER);
+
+/* andi. */
+static void gen_andi_(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode));
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* andis. */
+static void gen_andis_(DisasContext *ctx)
+{
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16);
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* cntlzw */
+static void gen_cntlzw(DisasContext *ctx)
+{
+ gen_helper_cntlzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* cnttzw */
+static void gen_cnttzw(DisasContext *ctx)
+{
+ gen_helper_cnttzw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* eqv & eqv. */
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER);
+/* extsb & extsb. */
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER);
+/* extsh & extsh. */
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER);
+/* nand & nand. */
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER);
+/* nor & nor. */
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER);
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+static void gen_pause(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(0);
+ tcg_gen_st_i32(t0, cpu_env,
+ -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
+
+ /* Stop translation, this gives other CPUs a chance to run */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
+}
+#endif /* defined(TARGET_PPC64) */
+
+/* or & or. */
+static void gen_or(DisasContext *ctx)
+{
+ int rs, ra, rb;
+
+ rs = rS(ctx->opcode);
+ ra = rA(ctx->opcode);
+ rb = rB(ctx->opcode);
+ /* Optimisation for mr. ri case */
+ if (rs != ra || rs != rb) {
+ if (rs != rb)
+ tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]);
+ else
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[ra]);
+ } else if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rs]);
+#if defined(TARGET_PPC64)
+ } else if (rs != 0) { /* 0 is nop */
+ int prio = 0;
+
+ switch (rs) {
+ case 1:
+ /* Set process priority to low */
+ prio = 2;
+ break;
+ case 6:
+ /* Set process priority to medium-low */
+ prio = 3;
+ break;
+ case 2:
+ /* Set process priority to normal */
+ prio = 4;
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case 31:
+ if (!ctx->pr) {
+ /* Set process priority to very low */
+ prio = 1;
+ }
+ break;
+ case 5:
+ if (!ctx->pr) {
+ /* Set process priority to medium-hight */
+ prio = 5;
+ }
+ break;
+ case 3:
+ if (!ctx->pr) {
+ /* Set process priority to high */
+ prio = 6;
+ }
+ break;
+ case 7:
+ if (ctx->hv && !ctx->pr) {
+ /* Set process priority to very high */
+ prio = 7;
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+ if (prio) {
+ TCGv t0 = tcg_temp_new();
+ gen_load_spr(t0, SPR_PPR);
+ tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL);
+ tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50);
+ gen_store_spr(SPR_PPR, t0);
+ tcg_temp_free(t0);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ /* Pause out of TCG otherwise spin loops with smt_low eat too much
+ * CPU and the kernel hangs. This applies to all encodings other
+ * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30),
+ * and all currently undefined.
+ */
+ gen_pause(ctx);
+#endif
+#endif
+ }
+}
+/* orc & orc. */
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER);
+
+/* xor & xor. */
+static void gen_xor(DisasContext *ctx)
+{
+ /* Optimisation for "set to zero" case */
+ if (rS(ctx->opcode) != rB(ctx->opcode))
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ else
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* ori */
+static void gen_ori(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
+}
+
+/* oris */
+static void gen_oris(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
+}
+
+/* xori */
+static void gen_xori(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm);
+}
+
+/* xoris */
+static void gen_xoris(DisasContext *ctx)
+{
+ target_ulong uimm = UIMM(ctx->opcode);
+
+ if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) {
+ /* NOP */
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16);
+}
+
+/* popcntb : PowerPC 2.03 specification */
+static void gen_popcntb(DisasContext *ctx)
+{
+ gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+
+static void gen_popcntw(DisasContext *ctx)
+{
+ gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+
+#if defined(TARGET_PPC64)
+/* popcntd: PowerPC 2.06 specification */
+static void gen_popcntd(DisasContext *ctx)
+{
+ gen_helper_popcntd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+}
+#endif
+
+/* prtyw: PowerPC 2.05 specification */
+static void gen_prtyw(DisasContext *ctx)
+{
+ TCGv ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, rs, 16);
+ tcg_gen_xor_tl(ra, rs, t0);
+ tcg_gen_shri_tl(t0, ra, 8);
+ tcg_gen_xor_tl(ra, ra, t0);
+ tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL);
+ tcg_temp_free(t0);
+}
+
+#if defined(TARGET_PPC64)
+/* prtyd: PowerPC 2.05 specification */
+static void gen_prtyd(DisasContext *ctx)
+{
+ TCGv ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, rs, 32);
+ tcg_gen_xor_tl(ra, rs, t0);
+ tcg_gen_shri_tl(t0, ra, 16);
+ tcg_gen_xor_tl(ra, ra, t0);
+ tcg_gen_shri_tl(t0, ra, 8);
+ tcg_gen_xor_tl(ra, ra, t0);
+ tcg_gen_andi_tl(ra, ra, 1);
+ tcg_temp_free(t0);
+}
+#endif
+
+#if defined(TARGET_PPC64)
+/* bpermd */
+static void gen_bpermd(DisasContext *ctx)
+{
+ gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+#endif
+
+#if defined(TARGET_PPC64)
+/* extsw & extsw. */
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B);
+
+/* cntlzd */
+static void gen_cntlzd(DisasContext *ctx)
+{
+ gen_helper_cntlzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* cnttzd */
+static void gen_cnttzd(DisasContext *ctx)
+{
+ gen_helper_cnttzd(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+/* darn */
+static void gen_darn(DisasContext *ctx)
+{
+ int l = L(ctx->opcode);
+
+ if (l == 0) {
+ gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]);
+ } else if (l <= 2) {
+ /* Return 64-bit random for both CRN and RRN */
+ gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1);
+ }
+}
+#endif
+
+/*** Integer rotate ***/
+
+/* rlwimi & rlwimi. */
+static void gen_rlwimi(DisasContext *ctx)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode);
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+
+ if (sh == (31-me) && mb <= me) {
+ tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
+ } else {
+ target_ulong mask;
+ TCGv t1;
+
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+
+ t1 = tcg_temp_new();
+ if (mask <= 0xffffffffu) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ } else {
+#if defined(TARGET_PPC64)
+ tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32);
+ tcg_gen_rotli_i64(t1, t1, sh);
+#else
+ g_assert_not_reached();
+#endif
+ }
+
+ tcg_gen_andi_tl(t1, t1, mask);
+ tcg_gen_andi_tl(t_ra, t_ra, ~mask);
+ tcg_gen_or_tl(t_ra, t_ra, t1);
+ tcg_temp_free(t1);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rlwinm & rlwinm. */
+static void gen_rlwinm(DisasContext *ctx)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode);
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+
+ if (mb == 0 && me == (31 - sh)) {
+ tcg_gen_shli_tl(t_ra, t_rs, sh);
+ tcg_gen_ext32u_tl(t_ra, t_ra);
+ } else if (sh != 0 && me == 31 && sh == (32 - mb)) {
+ tcg_gen_ext32u_tl(t_ra, t_rs);
+ tcg_gen_shri_tl(t_ra, t_ra, mb);
+ } else {
+ target_ulong mask;
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+
+ if (mask <= 0xffffffffu) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_extu_i32_tl(t_ra, t0);
+ tcg_temp_free_i32(t0);
+ } else {
+#if defined(TARGET_PPC64)
+ tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
+ tcg_gen_rotli_i64(t_ra, t_ra, sh);
+ tcg_gen_andi_i64(t_ra, t_ra, mask);
+#else
+ g_assert_not_reached();
+#endif
+ }
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rlwnm & rlwnm. */
+static void gen_rlwnm(DisasContext *ctx)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ target_ulong mask;
+
+#if defined(TARGET_PPC64)
+ mb += 32;
+ me += 32;
+#endif
+ mask = MASK(mb, me);
+
+ if (mask <= 0xffffffffu) {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rb);
+ tcg_gen_trunc_tl_i32(t1, t_rs);
+ tcg_gen_andi_i32(t0, t0, 0x1f);
+ tcg_gen_rotl_i32(t1, t1, t0);
+ tcg_gen_extu_i32_tl(t_ra, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ } else {
+#if defined(TARGET_PPC64)
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_andi_i64(t0, t_rb, 0x1f);
+ tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
+ tcg_gen_rotl_i64(t_ra, t_ra, t0);
+ tcg_temp_free_i64(t0);
+#else
+ g_assert_not_reached();
+#endif
+ }
+
+ tcg_gen_andi_tl(t_ra, t_ra, mask);
+
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+#if defined(TARGET_PPC64)
+#define GEN_PPC64_R2(name, opc1, opc2) \
+static void glue(gen_, name##0)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0); \
+} \
+ \
+static void glue(gen_, name##1)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1); \
+}
+#define GEN_PPC64_R4(name, opc1, opc2) \
+static void glue(gen_, name##0)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0, 0); \
+} \
+ \
+static void glue(gen_, name##1)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 0, 1); \
+} \
+ \
+static void glue(gen_, name##2)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1, 0); \
+} \
+ \
+static void glue(gen_, name##3)(DisasContext *ctx) \
+{ \
+ gen_##name(ctx, 1, 1); \
+}
+
+static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+
+ if (sh != 0 && mb == 0 && me == (63 - sh)) {
+ tcg_gen_shli_tl(t_ra, t_rs, sh);
+ } else if (sh != 0 && me == 63 && sh == (64 - mb)) {
+ tcg_gen_shri_tl(t_ra, t_rs, mb);
+ } else {
+ tcg_gen_rotli_tl(t_ra, t_rs, sh);
+ tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rldicl - rldicl. */
+static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldinm(ctx, mb, 63, sh);
+}
+GEN_PPC64_R4(rldicl, 0x1E, 0x00);
+
+/* rldicr - rldicr. */
+static inline void gen_rldicr(DisasContext *ctx, int men, int shn)
+{
+ uint32_t sh, me;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ me = MB(ctx->opcode) | (men << 5);
+ gen_rldinm(ctx, 0, me, sh);
+}
+GEN_PPC64_R4(rldicr, 0x1E, 0x02);
+
+/* rldic - rldic. */
+static inline void gen_rldic(DisasContext *ctx, int mbn, int shn)
+{
+ uint32_t sh, mb;
+
+ sh = SH(ctx->opcode) | (shn << 5);
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldinm(ctx, mb, 63 - sh, sh);
+}
+GEN_PPC64_R4(rldic, 0x1E, 0x04);
+
+static void gen_rldnm(DisasContext *ctx, int mb, int me)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ TCGv t_rb = cpu_gpr[rB(ctx->opcode)];
+ TCGv t0;
+
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, t_rb, 0x3f);
+ tcg_gen_rotl_tl(t_ra, t_rs, t0);
+ tcg_temp_free(t0);
+
+ tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me));
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+
+/* rldcl - rldcl. */
+static inline void gen_rldcl(DisasContext *ctx, int mbn)
+{
+ uint32_t mb;
+
+ mb = MB(ctx->opcode) | (mbn << 5);
+ gen_rldnm(ctx, mb, 63);
+}
+GEN_PPC64_R2(rldcl, 0x1E, 0x08);
+
+/* rldcr - rldcr. */
+static inline void gen_rldcr(DisasContext *ctx, int men)
+{
+ uint32_t me;
+
+ me = MB(ctx->opcode) | (men << 5);
+ gen_rldnm(ctx, 0, me);
+}
+GEN_PPC64_R2(rldcr, 0x1E, 0x09);
+
+/* rldimi - rldimi. */
+static void gen_rldimi(DisasContext *ctx, int mbn, int shn)
+{
+ TCGv t_ra = cpu_gpr[rA(ctx->opcode)];
+ TCGv t_rs = cpu_gpr[rS(ctx->opcode)];
+ uint32_t sh = SH(ctx->opcode) | (shn << 5);
+ uint32_t mb = MB(ctx->opcode) | (mbn << 5);
+ uint32_t me = 63 - sh;
+
+ if (mb <= me) {
+ tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1);
+ } else {
+ target_ulong mask = MASK(mb, me);
+ TCGv t1 = tcg_temp_new();
+
+ tcg_gen_rotli_tl(t1, t_rs, sh);
+ tcg_gen_andi_tl(t1, t1, mask);
+ tcg_gen_andi_tl(t_ra, t_ra, ~mask);
+ tcg_gen_or_tl(t_ra, t_ra, t1);
+ tcg_temp_free(t1);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, t_ra);
+ }
+}
+GEN_PPC64_R4(rldimi, 0x1E, 0x06);
+#endif
+
+/*** Integer shift ***/
+
+/* slw & slw. */
+static void gen_slw(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x20 */
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+#else
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
+ tcg_gen_sari_tl(t0, t0, 0x1f);
+#endif
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sraw & sraw. */
+static void gen_sraw(DisasContext *ctx)
+{
+ gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srawi & srawi. */
+static void gen_srawi(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+ if (sh == 0) {
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ } else {
+ TCGv t0;
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_andi_tl(cpu_ca, dst, (1ULL << sh) - 1);
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, dst, TARGET_LONG_BITS - 1);
+ tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
+ tcg_temp_free(t0);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
+ tcg_gen_sari_tl(dst, dst, sh);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+/* srw & srw. */
+static void gen_srw(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x20 */
+#if defined(TARGET_PPC64)
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x3a);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+#else
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1a);
+ tcg_gen_sari_tl(t0, t0, 0x1f);
+#endif
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_ext32u_tl(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1f);
+ tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+#if defined(TARGET_PPC64)
+/* sld & sld. */
+static void gen_sld(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x40 */
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srad & srad. */
+static void gen_srad(DisasContext *ctx)
+{
+ gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+/* sradi & sradi. */
+static inline void gen_sradi(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+ if (sh == 0) {
+ tcg_gen_mov_tl(dst, src);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ } else {
+ TCGv t0;
+ tcg_gen_andi_tl(cpu_ca, src, (1ULL << sh) - 1);
+ t0 = tcg_temp_new();
+ tcg_gen_sari_tl(t0, src, TARGET_LONG_BITS - 1);
+ tcg_gen_and_tl(cpu_ca, cpu_ca, t0);
+ tcg_temp_free(t0);
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_ca, cpu_ca, 0);
+ tcg_gen_sari_tl(dst, src, sh);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+static void gen_sradi0(DisasContext *ctx)
+{
+ gen_sradi(ctx, 0);
+}
+
+static void gen_sradi1(DisasContext *ctx)
+{
+ gen_sradi(ctx, 1);
+}
+
+/* extswsli & extswsli. */
+static inline void gen_extswsli(DisasContext *ctx, int n)
+{
+ int sh = SH(ctx->opcode) + (n << 5);
+ TCGv dst = cpu_gpr[rA(ctx->opcode)];
+ TCGv src = cpu_gpr[rS(ctx->opcode)];
+
+ tcg_gen_ext32s_tl(dst, src);
+ tcg_gen_shli_tl(dst, dst, sh);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_Rc0(ctx, dst);
+ }
+}
+
+static void gen_extswsli0(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 0);
+}
+
+static void gen_extswsli1(DisasContext *ctx)
+{
+ gen_extswsli(ctx, 1);
+}
+
+/* srd & srd. */
+static void gen_srd(DisasContext *ctx)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_new();
+ /* AND rS with a mask that is 0 when rB >= 0x40 */
+ tcg_gen_shli_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x39);
+ tcg_gen_sari_tl(t0, t0, 0x3f);
+ tcg_gen_andc_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x3f);
+ tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+#endif
+
+/*** Addressing modes ***/
+/* Register indirect with immediate index : EA = (rA|0) + SIMM */
+static inline void gen_addr_imm_index(DisasContext *ctx, TCGv EA,
+ target_long maskl)
+{
+ target_long simm = SIMM(ctx->opcode);
+
+ simm &= ~maskl;
+ if (rA(ctx->opcode) == 0) {
+ if (NARROW_MODE(ctx)) {
+ simm = (uint32_t)simm;
+ }
+ tcg_gen_movi_tl(EA, simm);
+ } else if (likely(simm != 0)) {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ } else {
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+ }
+}
+
+static inline void gen_addr_reg_index(DisasContext *ctx, TCGv EA)
+{
+ if (rA(ctx->opcode) == 0) {
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ }
+ } else {
+ tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ }
+}
+
+static inline void gen_addr_register(DisasContext *ctx, TCGv EA)
+{
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, 0);
+ } else if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else {
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+static inline void gen_addr_add(DisasContext *ctx, TCGv ret, TCGv arg1,
+ target_long val)
+{
+ tcg_gen_addi_tl(ret, arg1, val);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(ret, ret);
+ }
+}
+
+static inline void gen_check_align(DisasContext *ctx, TCGv EA, int mask)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1, t2;
+ tcg_gen_andi_tl(t0, EA, mask);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ t1 = tcg_const_i32(POWERPC_EXCP_ALIGN);
+ t2 = tcg_const_i32(ctx->opcode & 0x03FF0000);
+ gen_update_nip(ctx, ctx->nip - 4);
+ gen_helper_raise_exception_err(cpu_env, t1, t2);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_align_no_le(DisasContext *ctx)
+{
+ gen_exception_err(ctx, POWERPC_EXCP_ALIGN,
+ (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE);
+}
+
+/*** Integer load ***/
+#define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask)
+#define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP))
+
+#define GEN_QEMU_LOAD_TL(ldop, op) \
+static void glue(gen_qemu_, ldop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_tl(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_LOAD_TL(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_TL(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld16s, DEF_MEMOP(MO_SW))
+GEN_QEMU_LOAD_TL(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_TL(ld32s, DEF_MEMOP(MO_SL))
+
+GEN_QEMU_LOAD_TL(ld16ur, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_LOAD_TL(ld32ur, BSWAP_MEMOP(MO_UL))
+
+#define GEN_QEMU_LOAD_64(ldop, op) \
+static void glue(gen_qemu_, glue(ldop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_LOAD_64(ld8u, DEF_MEMOP(MO_UB))
+GEN_QEMU_LOAD_64(ld16u, DEF_MEMOP(MO_UW))
+GEN_QEMU_LOAD_64(ld32u, DEF_MEMOP(MO_UL))
+GEN_QEMU_LOAD_64(ld32s, DEF_MEMOP(MO_SL))
+GEN_QEMU_LOAD_64(ld64, DEF_MEMOP(MO_Q))
+
+#if defined(TARGET_PPC64)
+GEN_QEMU_LOAD_64(ld64ur, BSWAP_MEMOP(MO_Q))
+#endif
+
+#define GEN_QEMU_STORE_TL(stop, op) \
+static void glue(gen_qemu_, stop)(DisasContext *ctx, \
+ TCGv val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_tl(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_STORE_TL(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_TL(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32, DEF_MEMOP(MO_UL))
+
+GEN_QEMU_STORE_TL(st16r, BSWAP_MEMOP(MO_UW))
+GEN_QEMU_STORE_TL(st32r, BSWAP_MEMOP(MO_UL))
+
+#define GEN_QEMU_STORE_64(stop, op) \
+static void glue(gen_qemu_, glue(stop, _i64))(DisasContext *ctx, \
+ TCGv_i64 val, \
+ TCGv addr) \
+{ \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, op); \
+}
+
+GEN_QEMU_STORE_64(st8, DEF_MEMOP(MO_UB))
+GEN_QEMU_STORE_64(st16, DEF_MEMOP(MO_UW))
+GEN_QEMU_STORE_64(st32, DEF_MEMOP(MO_UL))
+GEN_QEMU_STORE_64(st64, DEF_MEMOP(MO_Q))
+
+#if defined(TARGET_PPC64)
+GEN_QEMU_STORE_64(st64r, BSWAP_MEMOP(MO_Q))
+#endif
+
+#define GEN_LD(name, ldop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDU(name, ldop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0 || \
+ rA(ctx->opcode) == rD(ctx->opcode))) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ if (type == PPC_64B) \
+ gen_addr_imm_index(ctx, EA, 0x03); \
+ else \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUX(name, ldop, opc2, opc3, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0 || \
+ rA(ctx->opcode) == rD(ctx->opcode))) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ chk; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDX(name, ldop, opc2, opc3, type) \
+ GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_NONE)
+
+#define GEN_LDX_HVRM(name, ldop, opc2, opc3, type) \
+ GEN_LDX_E(name, ldop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
+
+#define GEN_LDS(name, ldop, op, type) \
+GEN_LD(name, ldop, op | 0x20, type); \
+GEN_LDU(name, ldop, op | 0x21, type); \
+GEN_LDUX(name, ldop, 0x17, op | 0x01, type); \
+GEN_LDX(name, ldop, 0x17, op | 0x00, type)
+
+/* lbz lbzu lbzux lbzx */
+GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER);
+/* lha lhau lhaux lhax */
+GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER);
+/* lhz lhzu lhzux lhzx */
+GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER);
+/* lwz lwzu lwzux lwzx */
+GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+/* lwaux */
+GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B);
+/* lwax */
+GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B);
+/* ldux */
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B);
+/* ldx */
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B);
+
+/* CI load/store variants */
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST)
+GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
+GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
+
+static void gen_ld(DisasContext *ctx)
+{
+ TCGv EA;
+ if (Rc(ctx->opcode)) {
+ if (unlikely(rA(ctx->opcode) == 0 ||
+ rA(ctx->opcode) == rD(ctx->opcode))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x03);
+ if (ctx->opcode & 0x02) {
+ /* lwa (lwau is undefined) */
+ gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ } else {
+ /* ld - ldu */
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
+ }
+ if (Rc(ctx->opcode))
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+}
+
+/* lq */
+static void gen_lq(DisasContext *ctx)
+{
+ int ra, rd;
+ TCGv EA;
+
+ /* lq is a legal user mode instruction starting in ISA 2.07 */
+ bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
+ bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
+
+ if (!legal_in_user_mode && ctx->pr) {
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+
+ if (!le_is_supported && ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ ra = rA(ctx->opcode);
+ rd = rD(ctx->opcode);
+ if (unlikely((rd & 1) || rd == ra)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x0F);
+
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
+ } else {
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_gpr[rd + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+#endif
+
+/*** Integer store ***/
+#define GEN_ST(name, stop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STU(name, stop, opc, type) \
+static void glue(gen_, stop##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ if (type == PPC_64B) \
+ gen_addr_imm_index(ctx, EA, 0x03); \
+ else \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUX(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ chk; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+#define GEN_STX(name, stop, opc2, opc3, type) \
+ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_NONE)
+
+#define GEN_STX_HVRM(name, stop, opc2, opc3, type) \
+ GEN_STX_E(name, stop, opc2, opc3, type, PPC_NONE, CHK_HVRM)
+
+#define GEN_STS(name, stop, op, type) \
+GEN_ST(name, stop, op | 0x20, type); \
+GEN_STU(name, stop, op | 0x21, type); \
+GEN_STUX(name, stop, 0x17, op | 0x01, type); \
+GEN_STX(name, stop, 0x17, op | 0x00, type)
+
+/* stb stbu stbux stbx */
+GEN_STS(stb, st8, 0x06, PPC_INTEGER);
+/* sth sthu sthux sthx */
+GEN_STS(sth, st16, 0x0C, PPC_INTEGER);
+/* stw stwu stwux stwx */
+GEN_STS(stw, st32, 0x04, PPC_INTEGER);
+#if defined(TARGET_PPC64)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B);
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B);
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
+GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
+GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
+GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
+
+static void gen_std(DisasContext *ctx)
+{
+ int rs;
+ TCGv EA;
+
+ rs = rS(ctx->opcode);
+ if ((ctx->opcode & 0x3) == 0x2) { /* stq */
+ bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
+ bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0;
+
+ if (!(ctx->insns_flags & PPC_64BX)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ }
+
+ if (!legal_in_user_mode && ctx->pr) {
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC);
+ return;
+ }
+
+ if (!le_is_supported && ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+
+ if (unlikely(rs & 1)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x03);
+
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
+ } else {
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs + 1], EA);
+ }
+ tcg_temp_free(EA);
+ } else {
+ /* std / stdu*/
+ if (Rc(ctx->opcode)) {
+ if (unlikely(rA(ctx->opcode) == 0)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA);
+ if (Rc(ctx->opcode))
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+ }
+}
+#endif
+/*** Integer load and store with byte reverse ***/
+
+/* lhbrx */
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
+
+/* lwbrx */
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
+
+#if defined(TARGET_PPC64)
+/* ldbrx */
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE);
+/* stdbrx */
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE);
+#endif /* TARGET_PPC64 */
+
+/* sthbrx */
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
+/* stwbrx */
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
+
+/*** Integer load and store multiple ***/
+
+/* lmw */
+static void gen_lmw(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_helper_lmw(cpu_env, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+/* stmw */
+static void gen_stmw(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rS(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_helper_stmw(cpu_env, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+/*** Integer load and store strings ***/
+
+/* lswi */
+/* PowerPC32 specification says we must generate an exception if
+ * rA is in the range of registers to be loaded.
+ * In an other hand, IBM says this is valid, but rA won't be loaded.
+ * For now, I'll follow the spec...
+ */
+static void gen_lswi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ int nb = NB(ctx->opcode);
+ int start = rD(ctx->opcode);
+ int ra = rA(ctx->opcode);
+ int nr;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ if (nb == 0)
+ nb = 32;
+ nr = (nb + 3) / 4;
+ if (unlikely(lsw_reg_in_range(start, nr, ra))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(start);
+ gen_helper_lsw(cpu_env, t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/* lswx */
+static void gen_lswx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2, t3;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ t2 = tcg_const_i32(rA(ctx->opcode));
+ t3 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_lswx(cpu_env, t0, t1, t2, t3);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+}
+
+/* stswi */
+static void gen_stswi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ int nb = NB(ctx->opcode);
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
+ if (nb == 0)
+ nb = 32;
+ t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_helper_stsw(cpu_env, t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/* stswx */
+static void gen_stswx(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1, t2;
+
+ if (ctx->le_mode) {
+ gen_align_no_le(ctx);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t1, cpu_xer);
+ tcg_gen_andi_i32(t1, t1, 0x7F);
+ t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_helper_stsw(cpu_env, t0, t1, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+}
+
+/*** Memory synchronisation ***/
+/* eieio */
+static void gen_eieio(DisasContext *ctx)
+{
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global)
+{
+ TCGv_i32 t;
+ TCGLabel *l;
+
+ if (!ctx->lazy_tlb_flush) {
+ return;
+ }
+ l = gen_new_label();
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, l);
+ if (global) {
+ gen_helper_check_tlb_flush_global(cpu_env);
+ } else {
+ gen_helper_check_tlb_flush_local(cpu_env);
+ }
+ gen_set_label(l);
+ tcg_temp_free_i32(t);
+}
+#else
+static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { }
+#endif
+
+/* isync */
+static void gen_isync(DisasContext *ctx)
+{
+ /*
+ * We need to check for a pending TLB flush. This can only happen in
+ * kernel mode however so check MSR_PR
+ */
+ if (!ctx->pr) {
+ gen_check_tlb_flush(ctx, false);
+ }
+ gen_stop_exception(ctx);
+}
+
+#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE))
+
+#define LARX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ TCGv gpr = cpu_gpr[rD(ctx->opcode)]; \
+ int len = MEMOP_GET_SIZE(memop); \
+ gen_set_access_type(ctx, ACCESS_RES); \
+ t0 = tcg_temp_local_new(); \
+ gen_addr_reg_index(ctx, t0); \
+ if ((len) > 1) { \
+ gen_check_align(ctx, t0, (len)-1); \
+ } \
+ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \
+ tcg_gen_mov_tl(cpu_reserve, t0); \
+ tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \
+ tcg_temp_free(t0); \
+}
+
+/* lwarx */
+LARX(lbarx, DEF_MEMOP(MO_UB))
+LARX(lharx, DEF_MEMOP(MO_UW))
+LARX(lwarx, DEF_MEMOP(MO_UL))
+
+#if defined(CONFIG_USER_ONLY)
+static void gen_conditional_store(DisasContext *ctx, TCGv EA,
+ int reg, int memop)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_st_tl(EA, cpu_env, offsetof(CPUPPCState, reserve_ea));
+ tcg_gen_movi_tl(t0, (MEMOP_GET_SIZE(memop) << 5) | reg);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, reserve_info));
+ tcg_temp_free(t0);
+ gen_exception_err(ctx, POWERPC_EXCP_STCX, 0);
+}
+#else
+static void gen_conditional_store(DisasContext *ctx, TCGv EA,
+ int reg, int memop)
+{
+ TCGLabel *l1;
+
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+}
+#endif
+
+#define STCX(name, memop) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ int len = MEMOP_GET_SIZE(memop); \
+ gen_set_access_type(ctx, ACCESS_RES); \
+ t0 = tcg_temp_local_new(); \
+ gen_addr_reg_index(ctx, t0); \
+ if (len > 1) { \
+ gen_check_align(ctx, t0, (len) - 1); \
+ } \
+ gen_conditional_store(ctx, t0, rS(ctx->opcode), memop); \
+ tcg_temp_free(t0); \
+}
+
+STCX(stbcx_, DEF_MEMOP(MO_UB))
+STCX(sthcx_, DEF_MEMOP(MO_UW))
+STCX(stwcx_, DEF_MEMOP(MO_UL))
+
+#if defined(TARGET_PPC64)
+/* ldarx */
+LARX(ldarx, DEF_MEMOP(MO_Q))
+/* stdcx. */
+STCX(stdcx_, DEF_MEMOP(MO_Q))
+
+/* lqarx */
+static void gen_lqarx(DisasContext *ctx)
+{
+ TCGv EA;
+ int rd = rD(ctx->opcode);
+ TCGv gpr1, gpr2;
+
+ if (unlikely((rd & 1) || (rd == rA(ctx->opcode)) ||
+ (rd == rB(ctx->opcode)))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_check_align(ctx, EA, 15);
+ if (unlikely(ctx->le_mode)) {
+ gpr1 = cpu_gpr[rd+1];
+ gpr2 = cpu_gpr[rd];
+ } else {
+ gpr1 = cpu_gpr[rd];
+ gpr2 = cpu_gpr[rd+1];
+ }
+ tcg_gen_qemu_ld_i64(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ tcg_gen_mov_tl(cpu_reserve, EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_ld_i64(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+
+ tcg_gen_st_tl(gpr1, cpu_env, offsetof(CPUPPCState, reserve_val));
+ tcg_gen_st_tl(gpr2, cpu_env, offsetof(CPUPPCState, reserve_val2));
+ tcg_temp_free(EA);
+}
+
+/* stqcx. */
+static void gen_stqcx_(DisasContext *ctx)
+{
+ TCGv EA;
+ int reg = rS(ctx->opcode);
+ int len = 16;
+#if !defined(CONFIG_USER_ONLY)
+ TCGLabel *l1;
+ TCGv gpr1, gpr2;
+#endif
+
+ if (unlikely((rD(ctx->opcode) & 1))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_RES);
+ EA = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, EA);
+ if (len > 1) {
+ gen_check_align(ctx, EA, (len) - 1);
+ }
+
+#if defined(CONFIG_USER_ONLY)
+ gen_conditional_store(ctx, EA, reg, 16);
+#else
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+
+ if (unlikely(ctx->le_mode)) {
+ gpr1 = cpu_gpr[reg + 1];
+ gpr2 = cpu_gpr[reg];
+ } else {
+ gpr1 = cpu_gpr[reg];
+ gpr2 = cpu_gpr[reg + 1];
+ }
+ tcg_gen_qemu_st_tl(gpr1, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+ gen_addr_add(ctx, EA, EA, 8);
+ tcg_gen_qemu_st_tl(gpr2, EA, ctx->mem_idx, DEF_MEMOP(MO_Q));
+
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_reserve, -1);
+#endif
+ tcg_temp_free(EA);
+}
+
+#endif /* defined(TARGET_PPC64) */
+
+/* sync */
+static void gen_sync(DisasContext *ctx)
+{
+ uint32_t l = (ctx->opcode >> 21) & 3;
+
+ /*
+ * We may need to check for a pending TLB flush.
+ *
+ * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32.
+ *
+ * Additionally, this can only happen in kernel mode however so
+ * check MSR_PR as well.
+ */
+ if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) {
+ gen_check_tlb_flush(ctx, true);
+ }
+}
+
+/* wait */
+static void gen_wait(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(1);
+ tcg_gen_st_i32(t0, cpu_env,
+ -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted));
+ tcg_temp_free_i32(t0);
+ /* Stop translation, as the CPU is supposed to sleep from now */
+ gen_exception_nip(ctx, EXCP_HLT, ctx->nip);
+}
+
+#if defined(TARGET_PPC64)
+static void gen_doze(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_DOZE);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ gen_stop_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_nap(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_NAP);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ gen_stop_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_sleep(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_SLEEP);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ gen_stop_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rvwinkle(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t;
+
+ CHK_HV;
+ t = tcg_const_i32(PPC_PM_RVWINKLE);
+ gen_helper_pminsn(cpu_env, t);
+ tcg_temp_free_i32(t);
+ gen_stop_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+#endif /* #if defined(TARGET_PPC64) */
+
+static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip)
+{
+#if defined(TARGET_PPC64)
+ if (ctx->has_cfar)
+ tcg_gen_movi_tl(cpu_cfar, nip);
+#endif
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+/*** Branch ***/
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (NARROW_MODE(ctx)) {
+ dest = (uint32_t) dest;
+ }
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_tl(cpu_nip, dest & ~3);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+ } else {
+ tcg_gen_movi_tl(cpu_nip, dest & ~3);
+ if (unlikely(ctx->singlestep_enabled)) {
+ if ((ctx->singlestep_enabled &
+ (CPU_BRANCH_STEP | CPU_SINGLE_STEP)) &&
+ (ctx->exception == POWERPC_EXCP_BRANCH ||
+ ctx->exception == POWERPC_EXCP_TRACE)) {
+ gen_exception_nip(ctx, POWERPC_EXCP_TRACE, dest);
+ }
+ if (ctx->singlestep_enabled & GDBSTUB_SINGLE_STEP) {
+ gen_debug_exception(ctx);
+ }
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void gen_setlr(DisasContext *ctx, target_ulong nip)
+{
+ if (NARROW_MODE(ctx)) {
+ nip = (uint32_t)nip;
+ }
+ tcg_gen_movi_tl(cpu_lr, nip);
+}
+
+/* b ba bl bla */
+static void gen_b(DisasContext *ctx)
+{
+ target_ulong li, target;
+
+ ctx->exception = POWERPC_EXCP_BRANCH;
+ /* sign extend LI */
+ li = LI(ctx->opcode);
+ li = (li ^ 0x02000000) - 0x02000000;
+ if (likely(AA(ctx->opcode) == 0)) {
+ target = ctx->nip + li - 4;
+ } else {
+ target = li;
+ }
+ if (LK(ctx->opcode)) {
+ gen_setlr(ctx, ctx->nip);
+ }
+ gen_update_cfar(ctx, ctx->nip - 4);
+ gen_goto_tb(ctx, 0, target);
+}
+
+#define BCOND_IM 0
+#define BCOND_LR 1
+#define BCOND_CTR 2
+#define BCOND_TAR 3
+
+static inline void gen_bcond(DisasContext *ctx, int type)
+{
+ uint32_t bo = BO(ctx->opcode);
+ TCGLabel *l1;
+ TCGv target;
+
+ ctx->exception = POWERPC_EXCP_BRANCH;
+ if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
+ target = tcg_temp_local_new();
+ if (type == BCOND_CTR)
+ tcg_gen_mov_tl(target, cpu_ctr);
+ else if (type == BCOND_TAR)
+ gen_load_spr(target, SPR_TAR);
+ else
+ tcg_gen_mov_tl(target, cpu_lr);
+ } else {
+ TCGV_UNUSED(target);
+ }
+ if (LK(ctx->opcode))
+ gen_setlr(ctx, ctx->nip);
+ l1 = gen_new_label();
+ if ((bo & 0x4) == 0) {
+ /* Decrement and test CTR */
+ TCGv temp = tcg_temp_new();
+ if (unlikely(type == BCOND_CTR)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ tcg_gen_subi_tl(cpu_ctr, cpu_ctr, 1);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(temp, cpu_ctr);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_ctr);
+ }
+ if (bo & 0x2) {
+ tcg_gen_brcondi_tl(TCG_COND_NE, temp, 0, l1);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1);
+ }
+ tcg_temp_free(temp);
+ }
+ if ((bo & 0x10) == 0) {
+ /* Test CR */
+ uint32_t bi = BI(ctx->opcode);
+ uint32_t mask = 0x08 >> (bi & 0x03);
+ TCGv_i32 temp = tcg_temp_new_i32();
+
+ if (bo & 0x8) {
+ tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, temp, 0, l1);
+ } else {
+ tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask);
+ tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1);
+ }
+ tcg_temp_free_i32(temp);
+ }
+ gen_update_cfar(ctx, ctx->nip - 4);
+ if (type == BCOND_IM) {
+ target_ulong li = (target_long)((int16_t)(BD(ctx->opcode)));
+ if (likely(AA(ctx->opcode) == 0)) {
+ gen_goto_tb(ctx, 0, ctx->nip + li - 4);
+ } else {
+ gen_goto_tb(ctx, 0, li);
+ }
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ctx->nip);
+ }
+ } else {
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_andi_tl(cpu_nip, target, (uint32_t)~3);
+ } else {
+ tcg_gen_andi_tl(cpu_nip, target, ~3);
+ }
+ tcg_gen_exit_tb(0);
+ if ((bo & 0x14) != 0x14) {
+ gen_set_label(l1);
+ gen_update_nip(ctx, ctx->nip);
+ tcg_gen_exit_tb(0);
+ }
+ }
+ if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) {
+ tcg_temp_free(target);
+ }
+}
+
+static void gen_bc(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_IM);
+}
+
+static void gen_bcctr(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_CTR);
+}
+
+static void gen_bclr(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_LR);
+}
+
+static void gen_bctar(DisasContext *ctx)
+{
+ gen_bcond(ctx, BCOND_TAR);
+}
+
+/*** Condition register logical ***/
+#define GEN_CRLOGIC(name, tcg_op, opc) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ uint8_t bitmask; \
+ int sh; \
+ TCGv_i32 t0, t1; \
+ sh = (crbD(ctx->opcode) & 0x03) - (crbA(ctx->opcode) & 0x03); \
+ t0 = tcg_temp_new_i32(); \
+ if (sh > 0) \
+ tcg_gen_shri_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], sh); \
+ else if (sh < 0) \
+ tcg_gen_shli_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2], -sh); \
+ else \
+ tcg_gen_mov_i32(t0, cpu_crf[crbA(ctx->opcode) >> 2]); \
+ t1 = tcg_temp_new_i32(); \
+ sh = (crbD(ctx->opcode) & 0x03) - (crbB(ctx->opcode) & 0x03); \
+ if (sh > 0) \
+ tcg_gen_shri_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], sh); \
+ else if (sh < 0) \
+ tcg_gen_shli_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2], -sh); \
+ else \
+ tcg_gen_mov_i32(t1, cpu_crf[crbB(ctx->opcode) >> 2]); \
+ tcg_op(t0, t0, t1); \
+ bitmask = 0x08 >> (crbD(ctx->opcode) & 0x03); \
+ tcg_gen_andi_i32(t0, t0, bitmask); \
+ tcg_gen_andi_i32(t1, cpu_crf[crbD(ctx->opcode) >> 2], ~bitmask); \
+ tcg_gen_or_i32(cpu_crf[crbD(ctx->opcode) >> 2], t0, t1); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+/* crand */
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08);
+/* crandc */
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04);
+/* creqv */
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09);
+/* crnand */
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07);
+/* crnor */
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01);
+/* cror */
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E);
+/* crorc */
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D);
+/* crxor */
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06);
+
+/* mcrf */
+static void gen_mcrf(DisasContext *ctx)
+{
+ tcg_gen_mov_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfS(ctx->opcode)]);
+}
+
+/*** System linkage ***/
+
+/* rfi (supervisor only) */
+static void gen_rfi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* This instruction doesn't exist anymore on 64-bit server
+ * processors compliant with arch 2.x
+ */
+ if (ctx->insns_flags & PPC_SEGMENT_64B) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ /* Restore CPU state */
+ CHK_SV;
+ gen_update_cfar(ctx, ctx->nip - 4);
+ gen_helper_rfi(cpu_env);
+ gen_sync_exception(ctx);
+#endif
+}
+
+#if defined(TARGET_PPC64)
+static void gen_rfid(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Restore CPU state */
+ CHK_SV;
+ gen_update_cfar(ctx, ctx->nip - 4);
+ gen_helper_rfid(cpu_env);
+ gen_sync_exception(ctx);
+#endif
+}
+
+static void gen_hrfid(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Restore CPU state */
+ CHK_HV;
+ gen_helper_hrfid(cpu_env);
+ gen_sync_exception(ctx);
+#endif
+}
+#endif
+
+/* sc */
+#if defined(CONFIG_USER_ONLY)
+#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL_USER
+#else
+#define POWERPC_SYSCALL POWERPC_EXCP_SYSCALL
+#endif
+static void gen_sc(DisasContext *ctx)
+{
+ uint32_t lev;
+
+ lev = (ctx->opcode >> 5) & 0x7F;
+ gen_exception_err(ctx, POWERPC_SYSCALL, lev);
+}
+
+/*** Trap ***/
+
+/* Check for unconditional traps (always or never) */
+static bool check_unconditional_trap(DisasContext *ctx)
+{
+ /* Trap never */
+ if (TO(ctx->opcode) == 0) {
+ return true;
+ }
+ /* Trap always */
+ if (TO(ctx->opcode) == 31) {
+ gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
+ return true;
+ }
+ return false;
+}
+
+/* tw */
+static void gen_tw(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* twi */
+static void gen_twi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_tw(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+
+#if defined(TARGET_PPC64)
+/* td */
+static void gen_td(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)],
+ t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* tdi */
+static void gen_tdi(DisasContext *ctx)
+{
+ TCGv t0;
+ TCGv_i32 t1;
+
+ if (check_unconditional_trap(ctx)) {
+ return;
+ }
+ t0 = tcg_const_tl(SIMM(ctx->opcode));
+ t1 = tcg_const_i32(TO(ctx->opcode));
+ gen_helper_td(cpu_env, cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+}
+#endif
+
+/*** Processor control ***/
+
+static void gen_read_xer(TCGv dst)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_mov_tl(dst, cpu_xer);
+ tcg_gen_shli_tl(t0, cpu_so, XER_SO);
+ tcg_gen_shli_tl(t1, cpu_ov, XER_OV);
+ tcg_gen_shli_tl(t2, cpu_ca, XER_CA);
+ tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_or_tl(dst, dst, t2);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_write_xer(TCGv src)
+{
+ tcg_gen_andi_tl(cpu_xer, src,
+ ~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));
+ tcg_gen_shri_tl(cpu_so, src, XER_SO);
+ tcg_gen_shri_tl(cpu_ov, src, XER_OV);
+ tcg_gen_shri_tl(cpu_ca, src, XER_CA);
+ tcg_gen_andi_tl(cpu_so, cpu_so, 1);
+ tcg_gen_andi_tl(cpu_ov, cpu_ov, 1);
+ tcg_gen_andi_tl(cpu_ca, cpu_ca, 1);
+}
+
+/* mcrxr */
+static void gen_mcrxr(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 dst = cpu_crf[crfD(ctx->opcode)];
+
+ tcg_gen_trunc_tl_i32(t0, cpu_so);
+ tcg_gen_trunc_tl_i32(t1, cpu_ov);
+ tcg_gen_trunc_tl_i32(dst, cpu_ca);
+ tcg_gen_shli_i32(t0, t0, 3);
+ tcg_gen_shli_i32(t1, t1, 2);
+ tcg_gen_shli_i32(dst, dst, 1);
+ tcg_gen_or_i32(dst, dst, t0);
+ tcg_gen_or_i32(dst, dst, t1);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+
+ tcg_gen_movi_tl(cpu_so, 0);
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_movi_tl(cpu_ca, 0);
+}
+
+/* mfcr mfocrf */
+static void gen_mfcr(DisasContext *ctx)
+{
+ uint32_t crm, crn;
+
+ if (likely(ctx->opcode & 0x00100000)) {
+ crm = CRM(ctx->opcode);
+ if (likely(crm && ((crm & (crm - 1)) == 0))) {
+ crn = ctz32 (crm);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rD(ctx->opcode)], crn * 4);
+ }
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_mov_i32(t0, cpu_crf[0]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[1]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[2]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[3]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[4]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[5]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[6]);
+ tcg_gen_shli_i32(t0, t0, 4);
+ tcg_gen_or_i32(t0, t0, cpu_crf[7]);
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ }
+}
+
+/* mfmsr */
+static void gen_mfmsr(DisasContext *ctx)
+{
+ CHK_SV;
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_msr);
+}
+
+static void spr_noaccess(DisasContext *ctx, int gprn, int sprn)
+{
+#if 0
+ sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5);
+ printf("ERROR: try to access SPR %d !\n", sprn);
+#endif
+}
+#define SPR_NOACCESS (&spr_noaccess)
+
+/* mfspr */
+static inline void gen_op_mfspr(DisasContext *ctx)
+{
+ void (*read_cb)(DisasContext *ctx, int gprn, int sprn);
+ uint32_t sprn = SPR(ctx->opcode);
+
+#if defined(CONFIG_USER_ONLY)
+ read_cb = ctx->spr_cb[sprn].uea_read;
+#else
+ if (ctx->pr) {
+ read_cb = ctx->spr_cb[sprn].uea_read;
+ } else if (ctx->hv) {
+ read_cb = ctx->spr_cb[sprn].hea_read;
+ } else {
+ read_cb = ctx->spr_cb[sprn].oea_read;
+ }
+#endif
+ if (likely(read_cb != NULL)) {
+ if (likely(read_cb != SPR_NOACCESS)) {
+ (*read_cb)(ctx, rD(ctx->opcode), sprn);
+ } else {
+ /* Privilege exception */
+ /* This is a hack to avoid warnings when running Linux:
+ * this OS breaks the PowerPC virtualisation model,
+ * allowing userland application to read the PVR
+ */
+ if (sprn != SPR_PVR) {
+ fprintf(stderr, "Trying to read privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to read privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
+ }
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+ } else {
+ /* ISA 2.07 defines these as no-ops */
+ if ((ctx->insns_flags2 & PPC2_ISA207S) &&
+ (sprn >= 808 && sprn <= 811)) {
+ /* This is a nop */
+ return;
+ }
+ /* Not defined */
+ fprintf(stderr, "Trying to read invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to read invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
+
+ /* The behaviour depends on MSR:PR and SPR# bit 0x10,
+ * it can generate a priv, a hv emu or a no-op
+ */
+ if (sprn & 0x10) {
+ if (ctx->pr) {
+ gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ } else {
+ if (ctx->pr || sprn == 0 || sprn == 4 || sprn == 5 || sprn == 6) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ }
+ }
+}
+
+static void gen_mfspr(DisasContext *ctx)
+{
+ gen_op_mfspr(ctx);
+}
+
+/* mftb */
+static void gen_mftb(DisasContext *ctx)
+{
+ gen_op_mfspr(ctx);
+}
+
+/* mtcrf mtocrf*/
+static void gen_mtcrf(DisasContext *ctx)
+{
+ uint32_t crm, crn;
+
+ crm = CRM(ctx->opcode);
+ if (likely((ctx->opcode & 0x00100000))) {
+ if (crm && ((crm & (crm - 1)) == 0)) {
+ TCGv_i32 temp = tcg_temp_new_i32();
+ crn = ctz32 (crm);
+ tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_shri_i32(temp, temp, crn * 4);
+ tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf);
+ tcg_temp_free_i32(temp);
+ }
+ } else {
+ TCGv_i32 temp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]);
+ for (crn = 0 ; crn < 8 ; crn++) {
+ if (crm & (1 << crn)) {
+ tcg_gen_shri_i32(cpu_crf[7 - crn], temp, crn * 4);
+ tcg_gen_andi_i32(cpu_crf[7 - crn], cpu_crf[7 - crn], 0xf);
+ }
+ }
+ tcg_temp_free_i32(temp);
+ }
+}
+
+/* mtmsr */
+#if defined(TARGET_PPC64)
+static void gen_mtmsrd(DisasContext *ctx)
+{
+ CHK_SV;
+
+#if !defined(CONFIG_USER_ONLY)
+ if (ctx->opcode & 0x00010000) {
+ /* Special form that does not need any synchronisation */
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ } else {
+ /* XXX: we need to update nip before the store
+ * if we enter power saving mode, we will exit the loop
+ * directly from ppc_store_msr
+ */
+ gen_update_nip(ctx, ctx->nip);
+ gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]);
+ /* Must stop the translation as machine state (may have) changed */
+ /* Note that mtmsr is not always defined as context-synchronizing */
+ gen_stop_exception(ctx);
+ }
+#endif /* !defined(CONFIG_USER_ONLY) */
+}
+#endif /* defined(TARGET_PPC64) */
+
+static void gen_mtmsr(DisasContext *ctx)
+{
+ CHK_SV;
+
+#if !defined(CONFIG_USER_ONLY)
+ if (ctx->opcode & 0x00010000) {
+ /* Special form that does not need any synchronisation */
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE)));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ } else {
+ TCGv msr = tcg_temp_new();
+
+ /* XXX: we need to update nip before the store
+ * if we enter power saving mode, we will exit the loop
+ * directly from ppc_store_msr
+ */
+ gen_update_nip(ctx, ctx->nip);
+#if defined(TARGET_PPC64)
+ tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32);
+#else
+ tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]);
+#endif
+ gen_helper_store_msr(cpu_env, msr);
+ tcg_temp_free(msr);
+ /* Must stop the translation as machine state (may have) changed */
+ /* Note that mtmsr is not always defined as context-synchronizing */
+ gen_stop_exception(ctx);
+ }
+#endif
+}
+
+/* mtspr */
+static void gen_mtspr(DisasContext *ctx)
+{
+ void (*write_cb)(DisasContext *ctx, int sprn, int gprn);
+ uint32_t sprn = SPR(ctx->opcode);
+
+#if defined(CONFIG_USER_ONLY)
+ write_cb = ctx->spr_cb[sprn].uea_write;
+#else
+ if (ctx->pr) {
+ write_cb = ctx->spr_cb[sprn].uea_write;
+ } else if (ctx->hv) {
+ write_cb = ctx->spr_cb[sprn].hea_write;
+ } else {
+ write_cb = ctx->spr_cb[sprn].oea_write;
+ }
+#endif
+ if (likely(write_cb != NULL)) {
+ if (likely(write_cb != SPR_NOACCESS)) {
+ (*write_cb)(ctx, sprn, rS(ctx->opcode));
+ } else {
+ /* Privilege exception */
+ fprintf(stderr, "Trying to write privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ if (qemu_log_separate()) {
+ qemu_log("Trying to write privileged spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ }
+ } else {
+ /* ISA 2.07 defines these as no-ops */
+ if ((ctx->insns_flags2 & PPC2_ISA207S) &&
+ (sprn >= 808 && sprn <= 811)) {
+ /* This is a nop */
+ return;
+ }
+
+ /* Not defined */
+ if (qemu_log_separate()) {
+ qemu_log("Trying to write invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+ }
+ fprintf(stderr, "Trying to write invalid spr %d (0x%03x) at "
+ TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4);
+
+
+ /* The behaviour depends on MSR:PR and SPR# bit 0x10,
+ * it can generate a priv, a hv emu or a no-op
+ */
+ if (sprn & 0x10) {
+ if (ctx->pr) {
+ gen_priv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ } else {
+ if (ctx->pr || sprn == 0) {
+ gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR);
+ }
+ }
+ }
+}
+
+#if defined(TARGET_PPC64)
+/* setb */
+static void gen_setb(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t8 = tcg_temp_new_i32();
+ TCGv_i32 tm1 = tcg_temp_new_i32();
+ int crf = crfS(ctx->opcode);
+
+ tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4);
+ tcg_gen_movi_i32(t8, 8);
+ tcg_gen_movi_i32(tm1, -1);
+ tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0);
+ tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0);
+
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t8);
+ tcg_temp_free_i32(tm1);
+}
+#endif
+
+/*** Cache management ***/
+
+/* dcbf */
+static void gen_dcbf(DisasContext *ctx)
+{
+ /* XXX: specification says this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbi (Supervisor only) */
+static void gen_dcbi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv EA, val;
+
+ CHK_SV;
+ EA = tcg_temp_new();
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ gen_addr_reg_index(ctx, EA);
+ val = tcg_temp_new();
+ /* XXX: specification says this should be treated as a store by the MMU */
+ gen_qemu_ld8u(ctx, val, EA);
+ gen_qemu_st8(ctx, val, EA);
+ tcg_temp_free(val);
+ tcg_temp_free(EA);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* dcdst */
+static void gen_dcbst(DisasContext *ctx)
+{
+ /* XXX: specification say this is treated as a load by the MMU */
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbt */
+static void gen_dcbt(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* dcbtst */
+static void gen_dcbtst(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* dcbtls */
+static void gen_dcbtls(DisasContext *ctx)
+{
+ /* Always fails locking the cache */
+ TCGv t0 = tcg_temp_new();
+ gen_load_spr(t0, SPR_Exxx_L1CSR0);
+ tcg_gen_ori_tl(t0, t0, L1CSR0_CUL);
+ gen_store_spr(SPR_Exxx_L1CSR0, t0);
+ tcg_temp_free(t0);
+}
+
+/* dcbz */
+static void gen_dcbz(DisasContext *ctx)
+{
+ TCGv tcgv_addr;
+ TCGv_i32 tcgv_op;
+
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ tcgv_addr = tcg_temp_new();
+ tcgv_op = tcg_const_i32(ctx->opcode & 0x03FF000);
+ gen_addr_reg_index(ctx, tcgv_addr);
+ gen_helper_dcbz(cpu_env, tcgv_addr, tcgv_op);
+ tcg_temp_free(tcgv_addr);
+ tcg_temp_free_i32(tcgv_op);
+}
+
+/* dst / dstt */
+static void gen_dst(DisasContext *ctx)
+{
+ if (rA(ctx->opcode) == 0) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ } else {
+ /* interpreted as no-op */
+ }
+}
+
+/* dstst /dststt */
+static void gen_dstst(DisasContext *ctx)
+{
+ if (rA(ctx->opcode) == 0) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ } else {
+ /* interpreted as no-op */
+ }
+
+}
+
+/* dss / dssall */
+static void gen_dss(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* icbi */
+static void gen_icbi(DisasContext *ctx)
+{
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_icbi(cpu_env, t0);
+ tcg_temp_free(t0);
+}
+
+/* Optional: */
+/* dcba */
+static void gen_dcba(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a store by the MMU
+ * but does not generate any exception
+ */
+}
+
+/*** Segment register manipulation ***/
+/* Supervisor only: */
+
+/* mfsr */
+static void gen_mfsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfsrin */
+static void gen_mfsrin(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsr */
+static void gen_mtsr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsrin */
+static void gen_mtsrin(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+ CHK_SV;
+
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rD(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#if defined(TARGET_PPC64)
+/* Specific implementation for PowerPC 64 "bridge" emulation using SLB */
+
+/* mfsr */
+static void gen_mfsr_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfsrin */
+static void gen_mfsrin_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsr */
+static void gen_mtsr_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_const_tl(SR(ctx->opcode));
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtsrin */
+static void gen_mtsrin_64b(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rB(ctx->opcode)], 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_store_sr(cpu_env, t0, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbmte */
+static void gen_slbmte(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_store_slb(cpu_env, cpu_gpr[rB(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_slbmfee(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_load_slb_esid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_slbmfev(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_load_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_slbfee_(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+#else
+ TCGLabel *l1, *l2;
+
+ if (unlikely(ctx->pr)) {
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+ gen_helper_find_slb_vsid(cpu_gpr[rS(ctx->opcode)], cpu_env,
+ cpu_gpr[rB(ctx->opcode)]);
+ l1 = gen_new_label();
+ l2 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rS(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rS(ctx->opcode)], 0);
+ gen_set_label(l2);
+#endif
+}
+#endif /* defined(TARGET_PPC64) */
+
+/*** Lookaside buffer management ***/
+/* Optional & supervisor only: */
+
+/* tlbia */
+static void gen_tlbia(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_HV;
+
+ gen_helper_tlbia(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbiel */
+static void gen_tlbiel(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbie */
+static void gen_tlbie(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv_i32 t1;
+ CHK_HV;
+
+ if (NARROW_MODE(ctx)) {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_tlbie(cpu_env, t0);
+ tcg_temp_free(t0);
+ } else {
+ gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ }
+ t1 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_gen_ori_i32(t1, t1, TLB_NEED_GLOBAL_FLUSH);
+ tcg_gen_st_i32(t1, cpu_env, offsetof(CPUPPCState, tlb_need_flush));
+ tcg_temp_free_i32(t1);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsync */
+static void gen_tlbsync(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_HV;
+
+ /* BookS does both ptesync and tlbsync make tlbsync a nop for server */
+ if (ctx->insns_flags & PPC_BOOKE) {
+ gen_check_tlb_flush(ctx, true);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+#if defined(TARGET_PPC64)
+/* slbia */
+static void gen_slbia(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_slbia(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* slbie */
+static void gen_slbie(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_slbie(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+#endif /* defined(TARGET_PPC64) */
+
+/*** External control ***/
+/* Optional: */
+
+/* eciwx */
+static void gen_eciwx(DisasContext *ctx)
+{
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+/* ecowx */
+static void gen_ecowx(DisasContext *ctx)
+{
+ TCGv t0;
+ /* Should check EAR[E] ! */
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_check_align(ctx, t0, 0x03);
+ gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+/* PowerPC 601 specific instructions */
+
+/* abs - abs. */
+static void gen_abs(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* abso - abso. */
+static void gen_abso(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rA(ctx->opcode)], 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_gpr[rA(ctx->opcode)], 0x80000000, l1);
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l3);
+ gen_set_label(l2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l3);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* clcs */
+static void gen_clcs(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(rA(ctx->opcode));
+ gen_helper_clcs(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ /* Rc=1 sets CR0 to an undefined state */
+}
+
+/* div - div. */
+static void gen_div(DisasContext *ctx)
+{
+ gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* divo - divo. */
+static void gen_divo(DisasContext *ctx)
+{
+ gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* divs - divs. */
+static void gen_divs(DisasContext *ctx)
+{
+ gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* divso - divso. */
+static void gen_divso(DisasContext *ctx)
+{
+ gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* doz - doz. */
+static void gen_doz(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* dozo - dozo. */
+static void gen_dozo(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1);
+ tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* dozi */
+static void gen_dozi(DisasContext *ctx)
+{
+ target_long simm = SIMM(ctx->opcode);
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_LT, cpu_gpr[rA(ctx->opcode)], simm, l1);
+ tcg_gen_subfi_tl(cpu_gpr[rD(ctx->opcode)], simm, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* lscbx - lscbx. */
+static void gen_lscbx(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
+ TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
+
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_lscbx(t0, cpu_env, t0, t1, t2, t3);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t3);
+ tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F);
+ tcg_gen_or_tl(cpu_xer, cpu_xer, t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, t0);
+ tcg_temp_free(t0);
+}
+
+/* maskg - maskg. */
+static void gen_maskg(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ tcg_gen_movi_tl(t3, 0xFFFFFFFF);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_andi_tl(t1, cpu_gpr[rS(ctx->opcode)], 0x1F);
+ tcg_gen_addi_tl(t2, t0, 1);
+ tcg_gen_shr_tl(t2, t3, t2);
+ tcg_gen_shr_tl(t3, t3, t1);
+ tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], t2, t3);
+ tcg_gen_brcond_tl(TCG_COND_GE, t0, t1, l1);
+ tcg_gen_neg_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* maskir - maskir. */
+static void gen_maskir(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_and_tl(t0, cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* mul - mul. */
+static void gen_mul(DisasContext *ctx)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* mulo - mulo. */
+static void gen_mulo(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv t2 = tcg_temp_new();
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mul_i64(t0, t0, t1);
+ tcg_gen_trunc_i64_tl(t2, t0);
+ gen_store_spr(SPR_MQ, t2);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_trunc_i64_tl(cpu_gpr[rD(ctx->opcode)], t1);
+ tcg_gen_ext32s_i64(t1, t0);
+ tcg_gen_brcond_i64(TCG_COND_EQ, t0, t1, l1);
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ gen_set_label(l1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* nabs - nabs. */
+static void gen_nabs(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* nabso - nabso. */
+static void gen_nabso(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GT, cpu_gpr[rA(ctx->opcode)], 0, l1);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ gen_set_label(l2);
+ /* nabs never overflows */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]);
+}
+
+/* rlmi - rlmi. */
+static void gen_rlmi(DisasContext *ctx)
+{
+ uint32_t mb = MB(ctx->opcode);
+ uint32_t me = ME(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_andi_tl(t0, t0, MASK(mb, me));
+ tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* rrib - rrib. */
+static void gen_rrib(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0x80000000);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t1, cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sle - sle. */
+static void gen_sle(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sleq - sleq. */
+static void gen_sleq(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t2, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t2, t2, t0);
+ tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t2);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sliq - sliq. */
+static void gen_sliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shri_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* slliq - slliq. */
+static void gen_slliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotli_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU << sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU << sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sllq - sllq. */
+static void gen_sllq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shl_tl(t1, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ gen_load_spr(t2, SPR_MQ);
+ tcg_gen_andc_tl(t1, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* slq - slq. */
+static void gen_slq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shl_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shr_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sraiq - sraiq. */
+static void gen_sraiq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t0, t0, t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, l1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_gpr[rS(ctx->opcode)], 0, l1);
+ tcg_gen_movi_tl(cpu_ca, 1);
+ gen_set_label(l1);
+ tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sraq - sraq. */
+static void gen_sraq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_sar_tl(t1, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_subfi_tl(t2, 32, t2);
+ tcg_gen_shl_tl(t2, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l1);
+ tcg_gen_mov_tl(t2, cpu_gpr[rS(ctx->opcode)]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rS(ctx->opcode)], 31);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t1);
+ tcg_gen_movi_tl(cpu_ca, 0);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l2);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, l2);
+ tcg_gen_movi_tl(cpu_ca, 1);
+ gen_set_label(l2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sre - sre. */
+static void gen_sre(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srea - srea. */
+static void gen_srea(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sreq */
+static void gen_sreq(DisasContext *ctx)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t1, t1, t0);
+ tcg_gen_rotr_tl(t0, cpu_gpr[rS(ctx->opcode)], t0);
+ gen_load_spr(t2, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_andc_tl(t2, t2, t1);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* sriq */
+static void gen_sriq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ tcg_gen_shli_tl(t1, cpu_gpr[rS(ctx->opcode)], 32 - sh);
+ tcg_gen_or_tl(t1, t0, t1);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srliq */
+static void gen_srliq(DisasContext *ctx)
+{
+ int sh = SH(ctx->opcode);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_rotri_tl(t0, cpu_gpr[rS(ctx->opcode)], sh);
+ gen_load_spr(t1, SPR_MQ);
+ gen_store_spr(SPR_MQ, t0);
+ tcg_gen_andi_tl(t0, t0, (0xFFFFFFFFU >> sh));
+ tcg_gen_andi_tl(t1, t1, ~(0xFFFFFFFFU >> sh));
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srlq */
+static void gen_srlq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv t0 = tcg_temp_local_new();
+ TCGv t1 = tcg_temp_local_new();
+ TCGv t2 = tcg_temp_local_new();
+ tcg_gen_andi_tl(t2, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_movi_tl(t1, 0xFFFFFFFF);
+ tcg_gen_shr_tl(t2, t1, t2);
+ tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ gen_load_spr(t0, SPR_MQ);
+ tcg_gen_and_tl(cpu_gpr[rA(ctx->opcode)], t0, t2);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t2);
+ tcg_gen_and_tl(t0, t0, t2);
+ gen_load_spr(t1, SPR_MQ);
+ tcg_gen_andc_tl(t1, t1, t2);
+ tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1);
+ gen_set_label(l2);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* srq */
+static void gen_srq(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x1F);
+ tcg_gen_shr_tl(t0, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_subfi_tl(t1, 32, t1);
+ tcg_gen_shl_tl(t1, cpu_gpr[rS(ctx->opcode)], t1);
+ tcg_gen_or_tl(t1, t0, t1);
+ gen_store_spr(SPR_MQ, t1);
+ tcg_gen_andi_tl(t1, cpu_gpr[rB(ctx->opcode)], 0x20);
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], t0);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0);
+ gen_set_label(l1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc(ctx->opcode) != 0))
+ gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]);
+}
+
+/* PowerPC 602 specific instructions */
+
+/* dsa */
+static void gen_dsa(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* esa */
+static void gen_esa(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* mfrom */
+static void gen_mfrom(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_602_mfrom(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* 602 - 603 - G2 TLB management */
+
+/* tlbld */
+static void gen_tlbld_6xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_6xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbli */
+static void gen_tlbli_6xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_6xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* 74xx TLB management */
+
+/* tlbld */
+static void gen_tlbld_74xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_74xx_tlbd(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbli */
+static void gen_tlbli_74xx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_74xx_tlbi(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* POWER instructions not in PowerPC 601 */
+
+/* clf */
+static void gen_clf(DisasContext *ctx)
+{
+ /* Cache line flush: implemented as no-op */
+}
+
+/* cli */
+static void gen_cli(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ /* Cache line invalidate: privileged and treated as no-op */
+ CHK_SV;
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* dclst */
+static void gen_dclst(DisasContext *ctx)
+{
+ /* Data cache line store: treated as no-op */
+}
+
+static void gen_mfsri(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ tcg_gen_shri_tl(t0, t0, 28);
+ tcg_gen_andi_tl(t0, t0, 0xF);
+ gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0);
+ tcg_temp_free(t0);
+ if (ra != 0 && ra != rd)
+ tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rac(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_rac(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rfsvc(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ gen_helper_rfsvc(cpu_env);
+ gen_sync_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* svc is not implemented for now */
+
+/* BookE specific instructions */
+
+/* XXX: not implemented on 440 ? */
+static void gen_mfapidi(DisasContext *ctx)
+{
+ /* XXX: TODO */
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* XXX: not implemented on 440 ? */
+static void gen_tlbiva(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_tlbiva(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* All 405 MAC instructions are translated here */
+static inline void gen_405_mulladd_insn(DisasContext *ctx, int opc2, int opc3,
+ int ra, int rb, int rt, int Rc)
+{
+ TCGv t0, t1;
+
+ t0 = tcg_temp_local_new();
+ t1 = tcg_temp_local_new();
+
+ switch (opc3 & 0x0D) {
+ case 0x05:
+ /* macchw - macchw. - macchwo - macchwo. */
+ /* macchws - macchws. - macchwso - macchwso. */
+ /* nmacchw - nmacchw. - nmacchwo - nmacchwo. */
+ /* nmacchws - nmacchws. - nmacchwso - nmacchwso. */
+ /* mulchw - mulchw. */
+ tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+ tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16s_tl(t1, t1);
+ break;
+ case 0x04:
+ /* macchwu - macchwu. - macchwuo - macchwuo. */
+ /* macchwsu - macchwsu. - macchwsuo - macchwsuo. */
+ /* mulchwu - mulchwu. */
+ tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+ tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16u_tl(t1, t1);
+ break;
+ case 0x01:
+ /* machhw - machhw. - machhwo - machhwo. */
+ /* machhws - machhws. - machhwso - machhwso. */
+ /* nmachhw - nmachhw. - nmachhwo - nmachhwo. */
+ /* nmachhws - nmachhws. - nmachhwso - nmachhwso. */
+ /* mulhhw - mulhhw. */
+ tcg_gen_sari_tl(t0, cpu_gpr[ra], 16);
+ tcg_gen_ext16s_tl(t0, t0);
+ tcg_gen_sari_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16s_tl(t1, t1);
+ break;
+ case 0x00:
+ /* machhwu - machhwu. - machhwuo - machhwuo. */
+ /* machhwsu - machhwsu. - machhwsuo - machhwsuo. */
+ /* mulhhwu - mulhhwu. */
+ tcg_gen_shri_tl(t0, cpu_gpr[ra], 16);
+ tcg_gen_ext16u_tl(t0, t0);
+ tcg_gen_shri_tl(t1, cpu_gpr[rb], 16);
+ tcg_gen_ext16u_tl(t1, t1);
+ break;
+ case 0x0D:
+ /* maclhw - maclhw. - maclhwo - maclhwo. */
+ /* maclhws - maclhws. - maclhwso - maclhwso. */
+ /* nmaclhw - nmaclhw. - nmaclhwo - nmaclhwo. */
+ /* nmaclhws - nmaclhws. - nmaclhwso - nmaclhwso. */
+ /* mullhw - mullhw. */
+ tcg_gen_ext16s_tl(t0, cpu_gpr[ra]);
+ tcg_gen_ext16s_tl(t1, cpu_gpr[rb]);
+ break;
+ case 0x0C:
+ /* maclhwu - maclhwu. - maclhwuo - maclhwuo. */
+ /* maclhwsu - maclhwsu. - maclhwsuo - maclhwsuo. */
+ /* mullhwu - mullhwu. */
+ tcg_gen_ext16u_tl(t0, cpu_gpr[ra]);
+ tcg_gen_ext16u_tl(t1, cpu_gpr[rb]);
+ break;
+ }
+ if (opc2 & 0x04) {
+ /* (n)multiply-and-accumulate (0x0C / 0x0E) */
+ tcg_gen_mul_tl(t1, t0, t1);
+ if (opc2 & 0x02) {
+ /* nmultiply-and-accumulate (0x0E) */
+ tcg_gen_sub_tl(t0, cpu_gpr[rt], t1);
+ } else {
+ /* multiply-and-accumulate (0x0C) */
+ tcg_gen_add_tl(t0, cpu_gpr[rt], t1);
+ }
+
+ if (opc3 & 0x12) {
+ /* Check overflow and/or saturate */
+ TCGLabel *l1 = gen_new_label();
+
+ if (opc3 & 0x10) {
+ /* Start with XER OV disabled, the most likely case */
+ tcg_gen_movi_tl(cpu_ov, 0);
+ }
+ if (opc3 & 0x01) {
+ /* Signed */
+ tcg_gen_xor_tl(t1, cpu_gpr[rt], t1);
+ tcg_gen_brcondi_tl(TCG_COND_GE, t1, 0, l1);
+ tcg_gen_xor_tl(t1, cpu_gpr[rt], t0);
+ tcg_gen_brcondi_tl(TCG_COND_LT, t1, 0, l1);
+ if (opc3 & 0x02) {
+ /* Saturate */
+ tcg_gen_sari_tl(t0, cpu_gpr[rt], 31);
+ tcg_gen_xori_tl(t0, t0, 0x7fffffff);
+ }
+ } else {
+ /* Unsigned */
+ tcg_gen_brcond_tl(TCG_COND_GEU, t0, t1, l1);
+ if (opc3 & 0x02) {
+ /* Saturate */
+ tcg_gen_movi_tl(t0, UINT32_MAX);
+ }
+ }
+ if (opc3 & 0x10) {
+ /* Check overflow */
+ tcg_gen_movi_tl(cpu_ov, 1);
+ tcg_gen_movi_tl(cpu_so, 1);
+ }
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gpr[rt], t0);
+ }
+ } else {
+ tcg_gen_mul_tl(cpu_gpr[rt], t0, t1);
+ }
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ if (unlikely(Rc) != 0) {
+ /* Update Rc0 */
+ gen_set_Rc0(ctx, cpu_gpr[rt]);
+ }
+}
+
+#define GEN_MAC_HANDLER(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ gen_405_mulladd_insn(ctx, opc2, opc3, rA(ctx->opcode), rB(ctx->opcode), \
+ rD(ctx->opcode), Rc(ctx->opcode)); \
+}
+
+/* macchw - macchw. */
+GEN_MAC_HANDLER(macchw, 0x0C, 0x05);
+/* macchwo - macchwo. */
+GEN_MAC_HANDLER(macchwo, 0x0C, 0x15);
+/* macchws - macchws. */
+GEN_MAC_HANDLER(macchws, 0x0C, 0x07);
+/* macchwso - macchwso. */
+GEN_MAC_HANDLER(macchwso, 0x0C, 0x17);
+/* macchwsu - macchwsu. */
+GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06);
+/* macchwsuo - macchwsuo. */
+GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16);
+/* macchwu - macchwu. */
+GEN_MAC_HANDLER(macchwu, 0x0C, 0x04);
+/* macchwuo - macchwuo. */
+GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14);
+/* machhw - machhw. */
+GEN_MAC_HANDLER(machhw, 0x0C, 0x01);
+/* machhwo - machhwo. */
+GEN_MAC_HANDLER(machhwo, 0x0C, 0x11);
+/* machhws - machhws. */
+GEN_MAC_HANDLER(machhws, 0x0C, 0x03);
+/* machhwso - machhwso. */
+GEN_MAC_HANDLER(machhwso, 0x0C, 0x13);
+/* machhwsu - machhwsu. */
+GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02);
+/* machhwsuo - machhwsuo. */
+GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12);
+/* machhwu - machhwu. */
+GEN_MAC_HANDLER(machhwu, 0x0C, 0x00);
+/* machhwuo - machhwuo. */
+GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10);
+/* maclhw - maclhw. */
+GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D);
+/* maclhwo - maclhwo. */
+GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D);
+/* maclhws - maclhws. */
+GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F);
+/* maclhwso - maclhwso. */
+GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F);
+/* maclhwu - maclhwu. */
+GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C);
+/* maclhwuo - maclhwuo. */
+GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C);
+/* maclhwsu - maclhwsu. */
+GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E);
+/* maclhwsuo - maclhwsuo. */
+GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E);
+/* nmacchw - nmacchw. */
+GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05);
+/* nmacchwo - nmacchwo. */
+GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15);
+/* nmacchws - nmacchws. */
+GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07);
+/* nmacchwso - nmacchwso. */
+GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17);
+/* nmachhw - nmachhw. */
+GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01);
+/* nmachhwo - nmachhwo. */
+GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11);
+/* nmachhws - nmachhws. */
+GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03);
+/* nmachhwso - nmachhwso. */
+GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13);
+/* nmaclhw - nmaclhw. */
+GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D);
+/* nmaclhwo - nmaclhwo. */
+GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D);
+/* nmaclhws - nmaclhws. */
+GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F);
+/* nmaclhwso - nmaclhwso. */
+GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F);
+
+/* mulchw - mulchw. */
+GEN_MAC_HANDLER(mulchw, 0x08, 0x05);
+/* mulchwu - mulchwu. */
+GEN_MAC_HANDLER(mulchwu, 0x08, 0x04);
+/* mulhhw - mulhhw. */
+GEN_MAC_HANDLER(mulhhw, 0x08, 0x01);
+/* mulhhwu - mulhhwu. */
+GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00);
+/* mullhw - mullhw. */
+GEN_MAC_HANDLER(mullhw, 0x08, 0x0D);
+/* mullhwu - mullhwu. */
+GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C);
+
+/* mfdcr */
+static void gen_mfdcr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv dcrn;
+
+ CHK_SV;
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env, dcrn);
+ tcg_temp_free(dcrn);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtdcr */
+static void gen_mtdcr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv dcrn;
+
+ CHK_SV;
+ dcrn = tcg_const_tl(SPR(ctx->opcode));
+ gen_helper_store_dcr(cpu_env, dcrn, cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free(dcrn);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfdcrx */
+/* XXX: not implemented on 440 ? */
+static void gen_mfdcrx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mtdcrx */
+/* XXX: not implemented on 440 ? */
+static void gen_mtdcrx(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* mfdcrux (PPC 460) : user-mode access to DCR */
+static void gen_mfdcrux(DisasContext *ctx)
+{
+ gen_helper_load_dcr(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+}
+
+/* mtdcrux (PPC 460) : user-mode access to DCR */
+static void gen_mtdcrux(DisasContext *ctx)
+{
+ gen_helper_store_dcr(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ /* Note: Rc update flag set leads to undefined state of Rc0 */
+}
+
+/* dccci */
+static void gen_dccci(DisasContext *ctx)
+{
+ CHK_SV;
+ /* interpreted as no-op */
+}
+
+/* dcread */
+static void gen_dcread(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv EA, val;
+
+ CHK_SV;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ val = tcg_temp_new();
+ gen_qemu_ld32u(ctx, val, EA);
+ tcg_temp_free(val);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* icbt */
+static void gen_icbt_40x(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* iccci */
+static void gen_iccci(DisasContext *ctx)
+{
+ CHK_SV;
+ /* interpreted as no-op */
+}
+
+/* icread */
+static void gen_icread(DisasContext *ctx)
+{
+ CHK_SV;
+ /* interpreted as no-op */
+}
+
+/* rfci (supervisor only) */
+static void gen_rfci_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_40x_rfci(cpu_env);
+ gen_sync_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_rfci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_rfci(cpu_env);
+ gen_sync_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* BookE specific */
+
+/* XXX: not implemented on 440 ? */
+static void gen_rfdi(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_rfdi(cpu_env);
+ gen_sync_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* XXX: not implemented on 440 ? */
+static void gen_rfmci(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ /* Restore CPU state */
+ gen_helper_rfmci(cpu_env);
+ gen_sync_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* TLB management - PowerPC 405 implementation */
+
+/* tlbre */
+static void gen_tlbre_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ switch (rB(ctx->opcode)) {
+ case 0:
+ gen_helper_4xx_tlbre_hi(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ break;
+ case 1:
+ gen_helper_4xx_tlbre_lo(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ cpu_gpr[rA(ctx->opcode)]);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbwe */
+static void gen_tlbwe_40x(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ switch (rB(ctx->opcode)) {
+ case 0:
+ gen_helper_4xx_tlbwe_hi(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ break;
+ case 1:
+ gen_helper_4xx_tlbwe_lo(cpu_env, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* TLB management - PowerPC 440 implementation */
+
+/* tlbre */
+static void gen_tlbre_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+
+ switch (rB(ctx->opcode)) {
+ case 0:
+ case 1:
+ case 2:
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbre(cpu_gpr[rD(ctx->opcode)], cpu_env,
+ t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], cpu_env, t0);
+ tcg_temp_free(t0);
+ if (Rc(ctx->opcode)) {
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr[rD(ctx->opcode)], -1, l1);
+ tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 0x02);
+ gen_set_label(l1);
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbwe */
+static void gen_tlbwe_440(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ switch (rB(ctx->opcode)) {
+ case 0:
+ case 1:
+ case 2:
+ {
+ TCGv_i32 t0 = tcg_const_i32(rB(ctx->opcode));
+ gen_helper_440_tlbwe(cpu_env, t0, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rS(ctx->opcode)]);
+ tcg_temp_free_i32(t0);
+ }
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* TLB management - PowerPC BookE 2.06 implementation */
+
+/* tlbre */
+static void gen_tlbre_booke206(DisasContext *ctx)
+{
+ #if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_booke206_tlbre(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbsx - tlbsx. */
+static void gen_tlbsx_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ if (rA(ctx->opcode)) {
+ t0 = tcg_temp_new();
+ tcg_gen_mov_tl(t0, cpu_gpr[rD(ctx->opcode)]);
+ } else {
+ t0 = tcg_const_tl(0);
+ }
+
+ tcg_gen_add_tl(t0, t0, cpu_gpr[rB(ctx->opcode)]);
+ gen_helper_booke206_tlbsx(cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* tlbwe */
+static void gen_tlbwe_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_booke206_tlbwe(cpu_env);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_tlbivax_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_helper_booke206_tlbivax(cpu_env, t0);
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_tlbilx_booke206(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+
+ switch((ctx->opcode >> 21) & 0x3) {
+ case 0:
+ gen_helper_booke206_tlbilx0(cpu_env, t0);
+ break;
+ case 1:
+ gen_helper_booke206_tlbilx1(cpu_env, t0);
+ break;
+ case 3:
+ gen_helper_booke206_tlbilx3(cpu_env, t0);
+ break;
+ default:
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+
+ tcg_temp_free(t0);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+
+/* wrtee */
+static void gen_wrtee(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ TCGv t0;
+
+ CHK_SV;
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[rD(ctx->opcode)], (1 << MSR_EE));
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ tcg_gen_or_tl(cpu_msr, cpu_msr, t0);
+ tcg_temp_free(t0);
+ /* Stop translation to have a chance to raise an exception
+ * if we just set msr_ee to 1
+ */
+ gen_stop_exception(ctx);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* wrteei */
+static void gen_wrteei(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ if (ctx->opcode & 0x00008000) {
+ tcg_gen_ori_tl(cpu_msr, cpu_msr, (1 << MSR_EE));
+ /* Stop translation to have a chance to raise an exception */
+ gen_stop_exception(ctx);
+ } else {
+ tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE));
+ }
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+/* PowerPC 440 specific instructions */
+
+/* dlmzb */
+static void gen_dlmzb(DisasContext *ctx)
+{
+ TCGv_i32 t0 = tcg_const_i32(Rc(ctx->opcode));
+ gen_helper_dlmzb(cpu_gpr[rA(ctx->opcode)], cpu_env,
+ cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+}
+
+/* mbar replaces eieio on 440 */
+static void gen_mbar(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* msync replaces sync on 440 */
+static void gen_msync_4xx(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+}
+
+/* icbt */
+static void gen_icbt_440(DisasContext *ctx)
+{
+ /* interpreted as no-op */
+ /* XXX: specification say this is treated as a load by the MMU
+ * but does not generate any exception
+ */
+}
+
+/* Embedded.Processor Control */
+
+static void gen_msgclr(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_msgclr(cpu_env, cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+static void gen_msgsnd(DisasContext *ctx)
+{
+#if defined(CONFIG_USER_ONLY)
+ GEN_PRIV;
+#else
+ CHK_SV;
+ gen_helper_msgsnd(cpu_gpr[rB(ctx->opcode)]);
+#endif /* defined(CONFIG_USER_ONLY) */
+}
+
+
+#if defined(TARGET_PPC64)
+static void gen_maddld(DisasContext *ctx)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]);
+ tcg_temp_free_i64(t1);
+}
+
+/* maddhd maddhdu */
+static void gen_maddhd_maddhdu(DisasContext *ctx)
+{
+ TCGv_i64 lo = tcg_temp_new_i64();
+ TCGv_i64 hi = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ if (Rc(ctx->opcode)) {
+ tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_movi_i64(t1, 0);
+ } else {
+ tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63);
+ }
+ tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi,
+ cpu_gpr[rC(ctx->opcode)], t1);
+ tcg_temp_free_i64(lo);
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(t1);
+}
+#endif /* defined(TARGET_PPC64) */
+
+static void gen_tbegin(DisasContext *ctx)
+{
+ if (unlikely(!ctx->tm_enabled)) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
+ return;
+ }
+ gen_helper_tbegin(cpu_env);
+}
+
+#define GEN_TM_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->tm_enabled)) { \
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
+ return; \
+ } \
+ /* Because tbegin always fails in QEMU, these user \
+ * space instructions all have a simple implementation: \
+ * \
+ * CR[0] = 0b0 || MSR[TS] || 0b0 \
+ * = 0b0 || 0b00 || 0b0 \
+ */ \
+ tcg_gen_movi_i32(cpu_crf[0], 0); \
+}
+
+GEN_TM_NOOP(tend);
+GEN_TM_NOOP(tabort);
+GEN_TM_NOOP(tabortwc);
+GEN_TM_NOOP(tabortwci);
+GEN_TM_NOOP(tabortdc);
+GEN_TM_NOOP(tabortdci);
+GEN_TM_NOOP(tsr);
+
+static void gen_tcheck(DisasContext *ctx)
+{
+ if (unlikely(!ctx->tm_enabled)) {
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM);
+ return;
+ }
+ /* Because tbegin always fails, the tcheck implementation
+ * is simple:
+ *
+ * CR[CRF] = TDOOMED || MSR[TS] || 0b0
+ * = 0b1 || 0b00 || 0b0
+ */
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0x8);
+}
+
+#if defined(CONFIG_USER_ONLY)
+#define GEN_TM_PRIV_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \
+}
+
+#else
+
+#define GEN_TM_PRIV_NOOP(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ CHK_SV; \
+ if (unlikely(!ctx->tm_enabled)) { \
+ gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \
+ return; \
+ } \
+ /* Because tbegin always fails, the implementation is \
+ * simple: \
+ * \
+ * CR[0] = 0b0 || MSR[TS] || 0b0 \
+ * = 0b0 || 0b00 | 0b0 \
+ */ \
+ tcg_gen_movi_i32(cpu_crf[0], 0); \
+}
+
+#endif
+
+GEN_TM_PRIV_NOOP(treclaim);
+GEN_TM_PRIV_NOOP(trechkpt);
+
+#include "translate/fp-impl.inc.c"
+
+#include "translate/vmx-impl.inc.c"
+
+#include "translate/vsx-impl.inc.c"
+
+#include "translate/dfp-impl.inc.c"
+
+#include "translate/spe-impl.inc.c"
+
+static opcode_t opcodes[] = {
+GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE),
+GEN_HANDLER(cmp, 0x1F, 0x00, 0x00, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpi, 0x0B, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
+GEN_HANDLER(cmpl, 0x1F, 0x00, 0x01, 0x00400001, PPC_INTEGER),
+GEN_HANDLER(cmpli, 0x0A, 0xFF, 0xFF, 0x00400000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300),
+#endif
+GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL),
+GEN_HANDLER(addi, 0x0E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(addic, 0x0C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(addic_, "addic.", 0x0D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(addis, 0x0F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER_E(addpcis, 0x13, 0x2, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER),
+GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER),
+GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER),
+GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER),
+GEN_HANDLER(subfic, 0x08, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB),
+GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD),
+GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD),
+GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B),
+GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206),
+#endif
+GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(slw, 0x1F, 0x18, 0x00, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(sraw, 0x1F, 0x18, 0x18, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(srawi, 0x1F, 0x18, 0x19, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(srw, 0x1F, 0x18, 0x10, 0x00000000, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(sld, 0x1F, 0x1B, 0x00, 0x00000000, PPC_64B),
+GEN_HANDLER(srad, 0x1F, 0x1A, 0x18, 0x00000000, PPC_64B),
+GEN_HANDLER2(sradi0, "sradi", 0x1F, 0x1A, 0x19, 0x00000000, PPC_64B),
+GEN_HANDLER2(sradi1, "sradi", 0x1F, 0x1B, 0x19, 0x00000000, PPC_64B),
+GEN_HANDLER(srd, 0x1F, 0x1B, 0x10, 0x00000000, PPC_64B),
+GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
+GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000,
+ PPC_NONE, PPC2_ISA300),
+#endif
+#if defined(TARGET_PPC64)
+GEN_HANDLER(ld, 0x3A, 0xFF, 0xFF, 0x00000000, PPC_64B),
+GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX),
+GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER),
+GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING),
+GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING),
+GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING),
+GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING),
+GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x03FFF801, PPC_MEM_EIEIO),
+GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM),
+GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000000, PPC_RES),
+GEN_HANDLER_E(stbcx_, 0x1F, 0x16, 0x15, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER_E(sthcx_, 0x1F, 0x16, 0x16, 0, PPC_NONE, PPC2_ATOMIC_ISA206),
+GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000000, PPC_64B),
+GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207),
+GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B),
+GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207),
+#endif
+GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC),
+GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x03FFF801, PPC_WAIT),
+GEN_HANDLER(b, 0x12, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bc, 0x10, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bcctr, 0x13, 0x10, 0x10, 0x00000000, PPC_FLOW),
+GEN_HANDLER(bclr, 0x13, 0x10, 0x00, 0x00000000, PPC_FLOW),
+GEN_HANDLER_E(bctar, 0x13, 0x10, 0x11, 0x0000E000, PPC_NONE, PPC2_BCTAR_ISA207),
+GEN_HANDLER(mcrf, 0x13, 0x00, 0xFF, 0x00000001, PPC_INTEGER),
+GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(rfid, 0x13, 0x12, 0x00, 0x03FF8001, PPC_64B),
+GEN_HANDLER_E(doze, 0x13, 0x12, 0x0c, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER_E(nap, 0x13, 0x12, 0x0d, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER_E(sleep, 0x13, 0x12, 0x0e, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER_E(rvwinkle, 0x13, 0x12, 0x0f, 0x03FFF801, PPC_NONE, PPC2_PM_ISA206),
+GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H),
+#endif
+GEN_HANDLER(sc, 0x11, 0xFF, 0xFF, 0x03FFF01D, PPC_FLOW),
+GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW),
+GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B),
+GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B),
+#endif
+GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC),
+GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC),
+GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC),
+GEN_HANDLER(mfspr, 0x1F, 0x13, 0x0A, 0x00000001, PPC_MISC),
+GEN_HANDLER(mftb, 0x1F, 0x13, 0x0B, 0x00000001, PPC_MFTB),
+GEN_HANDLER(mtcrf, 0x1F, 0x10, 0x04, 0x00000801, PPC_MISC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(mtmsrd, 0x1F, 0x12, 0x05, 0x001EF801, PPC_64B),
+GEN_HANDLER_E(setb, 0x1F, 0x00, 0x04, 0x0003F801, PPC_NONE, PPC2_ISA300),
+#endif
+GEN_HANDLER(mtmsr, 0x1F, 0x12, 0x04, 0x001EF801, PPC_MISC),
+GEN_HANDLER(mtspr, 0x1F, 0x13, 0x0E, 0x00000000, PPC_MISC),
+GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE),
+GEN_HANDLER(dcbi, 0x1F, 0x16, 0x0E, 0x03E00001, PPC_CACHE),
+GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE),
+GEN_HANDLER(dcbt, 0x1F, 0x16, 0x08, 0x00000001, PPC_CACHE),
+GEN_HANDLER(dcbtst, 0x1F, 0x16, 0x07, 0x00000001, PPC_CACHE),
+GEN_HANDLER_E(dcbtls, 0x1F, 0x06, 0x05, 0x02000001, PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZ),
+GEN_HANDLER(dst, 0x1F, 0x16, 0x0A, 0x01800001, PPC_ALTIVEC),
+GEN_HANDLER(dstst, 0x1F, 0x16, 0x0B, 0x02000001, PPC_ALTIVEC),
+GEN_HANDLER(dss, 0x1F, 0x16, 0x19, 0x019FF801, PPC_ALTIVEC),
+GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI),
+GEN_HANDLER(dcba, 0x1F, 0x16, 0x17, 0x03E00001, PPC_CACHE_DCBA),
+GEN_HANDLER(mfsr, 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT),
+GEN_HANDLER(mfsrin, 0x1F, 0x13, 0x14, 0x001F0001, PPC_SEGMENT),
+GEN_HANDLER(mtsr, 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT),
+GEN_HANDLER(mtsrin, 0x1F, 0x12, 0x07, 0x001F0001, PPC_SEGMENT),
+#if defined(TARGET_PPC64)
+GEN_HANDLER2(mfsr_64b, "mfsr", 0x1F, 0x13, 0x12, 0x0010F801, PPC_SEGMENT_64B),
+GEN_HANDLER2(mfsrin_64b, "mfsrin", 0x1F, 0x13, 0x14, 0x001F0001,
+ PPC_SEGMENT_64B),
+GEN_HANDLER2(mtsr_64b, "mtsr", 0x1F, 0x12, 0x06, 0x0010F801, PPC_SEGMENT_64B),
+GEN_HANDLER2(mtsrin_64b, "mtsrin", 0x1F, 0x12, 0x07, 0x001F0001,
+ PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmte, "slbmte", 0x1F, 0x12, 0x0C, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmfee, "slbmfee", 0x1F, 0x13, 0x1C, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbmfev, "slbmfev", 0x1F, 0x13, 0x1A, 0x001F0001, PPC_SEGMENT_64B),
+GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B),
+#endif
+GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA),
+/* XXX Those instructions will need to be handled differently for
+ * different ISA versions */
+GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE),
+GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE),
+GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI),
+GEN_HANDLER(slbie, 0x1F, 0x12, 0x0D, 0x03FF0001, PPC_SLBI),
+#endif
+GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN),
+GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN),
+GEN_HANDLER(abs, 0x1F, 0x08, 0x0B, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(abso, 0x1F, 0x08, 0x1B, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(clcs, 0x1F, 0x10, 0x13, 0x0000F800, PPC_POWER_BR),
+GEN_HANDLER(div, 0x1F, 0x0B, 0x0A, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divo, 0x1F, 0x0B, 0x1A, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divs, 0x1F, 0x0B, 0x0B, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(divso, 0x1F, 0x0B, 0x1B, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(doz, 0x1F, 0x08, 0x08, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dozo, 0x1F, 0x08, 0x18, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dozi, 0x09, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(lscbx, 0x1F, 0x15, 0x08, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(maskg, 0x1F, 0x1D, 0x00, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(maskir, 0x1F, 0x1D, 0x10, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(mul, 0x1F, 0x0B, 0x03, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(mulo, 0x1F, 0x0B, 0x13, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(nabs, 0x1F, 0x08, 0x0F, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(nabso, 0x1F, 0x08, 0x1F, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(rlmi, 0x16, 0xFF, 0xFF, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(rrib, 0x1F, 0x19, 0x10, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sle, 0x1F, 0x19, 0x04, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sleq, 0x1F, 0x19, 0x06, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sliq, 0x1F, 0x18, 0x05, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(slliq, 0x1F, 0x18, 0x07, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sllq, 0x1F, 0x18, 0x06, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(slq, 0x1F, 0x18, 0x04, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sraiq, 0x1F, 0x18, 0x1D, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sraq, 0x1F, 0x18, 0x1C, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sre, 0x1F, 0x19, 0x14, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srea, 0x1F, 0x19, 0x1C, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sreq, 0x1F, 0x19, 0x16, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(sriq, 0x1F, 0x18, 0x15, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srliq, 0x1F, 0x18, 0x17, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srlq, 0x1F, 0x18, 0x16, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(srq, 0x1F, 0x18, 0x14, 0x00000000, PPC_POWER_BR),
+GEN_HANDLER(dsa, 0x1F, 0x14, 0x13, 0x03FFF801, PPC_602_SPEC),
+GEN_HANDLER(esa, 0x1F, 0x14, 0x12, 0x03FFF801, PPC_602_SPEC),
+GEN_HANDLER(mfrom, 0x1F, 0x09, 0x08, 0x03E0F801, PPC_602_SPEC),
+GEN_HANDLER2(tlbld_6xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_6xx_TLB),
+GEN_HANDLER2(tlbli_6xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_6xx_TLB),
+GEN_HANDLER2(tlbld_74xx, "tlbld", 0x1F, 0x12, 0x1E, 0x03FF0001, PPC_74xx_TLB),
+GEN_HANDLER2(tlbli_74xx, "tlbli", 0x1F, 0x12, 0x1F, 0x03FF0001, PPC_74xx_TLB),
+GEN_HANDLER(clf, 0x1F, 0x16, 0x03, 0x03E00000, PPC_POWER),
+GEN_HANDLER(cli, 0x1F, 0x16, 0x0F, 0x03E00000, PPC_POWER),
+GEN_HANDLER(dclst, 0x1F, 0x16, 0x13, 0x03E00000, PPC_POWER),
+GEN_HANDLER(mfsri, 0x1F, 0x13, 0x13, 0x00000001, PPC_POWER),
+GEN_HANDLER(rac, 0x1F, 0x12, 0x19, 0x00000001, PPC_POWER),
+GEN_HANDLER(rfsvc, 0x13, 0x12, 0x02, 0x03FFF0001, PPC_POWER),
+GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(lfqu, 0x39, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(lfqux, 0x1F, 0x17, 0x19, 0x00000001, PPC_POWER2),
+GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2),
+GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(stfqu, 0x3D, 0xFF, 0xFF, 0x00000003, PPC_POWER2),
+GEN_HANDLER(stfqux, 0x1F, 0x17, 0x1D, 0x00000001, PPC_POWER2),
+GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2),
+GEN_HANDLER(mfapidi, 0x1F, 0x13, 0x08, 0x0000F801, PPC_MFAPIDI),
+GEN_HANDLER(tlbiva, 0x1F, 0x12, 0x18, 0x03FFF801, PPC_TLBIVA),
+GEN_HANDLER(mfdcr, 0x1F, 0x03, 0x0A, 0x00000001, PPC_DCR),
+GEN_HANDLER(mtdcr, 0x1F, 0x03, 0x0E, 0x00000001, PPC_DCR),
+GEN_HANDLER(mfdcrx, 0x1F, 0x03, 0x08, 0x00000000, PPC_DCRX),
+GEN_HANDLER(mtdcrx, 0x1F, 0x03, 0x0C, 0x00000000, PPC_DCRX),
+GEN_HANDLER(mfdcrux, 0x1F, 0x03, 0x09, 0x00000000, PPC_DCRUX),
+GEN_HANDLER(mtdcrux, 0x1F, 0x03, 0x0D, 0x00000000, PPC_DCRUX),
+GEN_HANDLER(dccci, 0x1F, 0x06, 0x0E, 0x03E00001, PPC_4xx_COMMON),
+GEN_HANDLER(dcread, 0x1F, 0x06, 0x0F, 0x00000001, PPC_4xx_COMMON),
+GEN_HANDLER2(icbt_40x, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_40x_ICBT),
+GEN_HANDLER(iccci, 0x1F, 0x06, 0x1E, 0x00000001, PPC_4xx_COMMON),
+GEN_HANDLER(icread, 0x1F, 0x06, 0x1F, 0x03E00001, PPC_4xx_COMMON),
+GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP),
+GEN_HANDLER_E(rfci, 0x13, 0x13, 0x01, 0x03FF8001, PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(rfdi, 0x13, 0x07, 0x01, 0x03FF8001, PPC_RFDI),
+GEN_HANDLER(rfmci, 0x13, 0x06, 0x01, 0x03FF8001, PPC_RFMCI),
+GEN_HANDLER2(tlbre_40x, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_40x_TLB),
+GEN_HANDLER2(tlbsx_40x, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_40x_TLB),
+GEN_HANDLER2(tlbwe_40x, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_40x_TLB),
+GEN_HANDLER2(tlbre_440, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001, PPC_BOOKE),
+GEN_HANDLER2(tlbsx_440, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000, PPC_BOOKE),
+GEN_HANDLER2(tlbwe_440, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001, PPC_BOOKE),
+GEN_HANDLER2_E(tlbre_booke206, "tlbre", 0x1F, 0x12, 0x1D, 0x00000001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbsx_booke206, "tlbsx", 0x1F, 0x12, 0x1C, 0x00000000,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbwe_booke206, "tlbwe", 0x1F, 0x12, 0x1E, 0x00000001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbivax_booke206, "tlbivax", 0x1F, 0x12, 0x18, 0x00000001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001,
+ PPC_NONE, PPC2_BOOKE206),
+GEN_HANDLER2_E(msgsnd, "msgsnd", 0x1F, 0x0E, 0x06, 0x03ff0001,
+ PPC_NONE, PPC2_PRCNTL),
+GEN_HANDLER2_E(msgclr, "msgclr", 0x1F, 0x0E, 0x07, 0x03ff0001,
+ PPC_NONE, PPC2_PRCNTL),
+GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE),
+GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE),
+GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC),
+GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801,
+ PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x03FFF801, PPC_BOOKE),
+GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001,
+ PPC_BOOKE, PPC2_BOOKE206),
+GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC),
+GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC),
+GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC),
+GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC),
+GEN_HANDLER(vmladduhm, 0x04, 0x11, 0xFF, 0x00000000, PPC_ALTIVEC),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE,
+ PPC2_ISA300),
+GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300),
+#endif
+
+#undef GEN_INT_ARITH_ADD
+#undef GEN_INT_ARITH_ADD_CONST
+#define GEN_INT_ARITH_ADD(name, opc3, add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x00000000, PPC_INTEGER),
+#define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0A, opc3, 0x0000F800, PPC_INTEGER),
+GEN_INT_ARITH_ADD(add, 0x08, 0, 0, 0)
+GEN_INT_ARITH_ADD(addo, 0x18, 0, 0, 1)
+GEN_INT_ARITH_ADD(addc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_ADD(addco, 0x10, 0, 1, 1)
+GEN_INT_ARITH_ADD(adde, 0x04, 1, 1, 0)
+GEN_INT_ARITH_ADD(addeo, 0x14, 1, 1, 1)
+GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, 1, 1, 1)
+GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, 1, 1, 1)
+
+#undef GEN_INT_ARITH_DIVW
+#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER)
+GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0),
+GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1),
+GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0),
+GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1),
+GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
+
+#if defined(TARGET_PPC64)
+#undef GEN_INT_ARITH_DIVD
+#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
+GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0),
+GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1),
+GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0),
+GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1),
+
+GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206),
+GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300),
+
+#undef GEN_INT_ARITH_MUL_HELPER
+#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \
+GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B)
+GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00),
+GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02),
+GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17),
+#endif
+
+#undef GEN_INT_ARITH_SUBF
+#undef GEN_INT_ARITH_SUBF_CONST
+#define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x00000000, PPC_INTEGER),
+#define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \
+ add_ca, compute_ca, compute_ov) \
+GEN_HANDLER(name, 0x1F, 0x08, opc3, 0x0000F800, PPC_INTEGER),
+GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0)
+GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1)
+GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0)
+GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1)
+GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0)
+GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1)
+GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1)
+GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0)
+GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1)
+
+#undef GEN_LOGICAL1
+#undef GEN_LOGICAL2
+#define GEN_LOGICAL2(name, tcg_op, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type)
+#define GEN_LOGICAL1(name, tcg_op, opc, type) \
+GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type)
+GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER),
+GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER),
+GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER),
+GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER),
+GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER),
+GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER),
+GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER),
+GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER),
+#if defined(TARGET_PPC64)
+GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B),
+#endif
+
+#if defined(TARGET_PPC64)
+#undef GEN_PPC64_R2
+#undef GEN_PPC64_R4
+#define GEN_PPC64_R2(name, opc1, opc2) \
+GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
+GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
+ PPC_64B)
+#define GEN_PPC64_R4(name, opc1, opc2) \
+GEN_HANDLER2(name##0, stringify(name), opc1, opc2, 0xFF, 0x00000000, PPC_64B),\
+GEN_HANDLER2(name##1, stringify(name), opc1, opc2 | 0x01, 0xFF, 0x00000000, \
+ PPC_64B), \
+GEN_HANDLER2(name##2, stringify(name), opc1, opc2 | 0x10, 0xFF, 0x00000000, \
+ PPC_64B), \
+GEN_HANDLER2(name##3, stringify(name), opc1, opc2 | 0x11, 0xFF, 0x00000000, \
+ PPC_64B)
+GEN_PPC64_R4(rldicl, 0x1E, 0x00),
+GEN_PPC64_R4(rldicr, 0x1E, 0x02),
+GEN_PPC64_R4(rldic, 0x1E, 0x04),
+GEN_PPC64_R2(rldcl, 0x1E, 0x08),
+GEN_PPC64_R2(rldcr, 0x1E, 0x09),
+GEN_PPC64_R4(rldimi, 0x1E, 0x06),
+#endif
+
+#undef GEN_LD
+#undef GEN_LDU
+#undef GEN_LDUX
+#undef GEN_LDX_E
+#undef GEN_LDS
+#define GEN_LD(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDU(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUX(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_LDX_E(name, ldop, opc2, opc3, type, type2, chk) \
+GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
+#define GEN_LDS(name, ldop, op, type) \
+GEN_LD(name, ldop, op | 0x20, type) \
+GEN_LDU(name, ldop, op | 0x21, type) \
+GEN_LDUX(name, ldop, 0x17, op | 0x01, type) \
+GEN_LDX(name, ldop, 0x17, op | 0x00, type)
+
+GEN_LDS(lbz, ld8u, 0x02, PPC_INTEGER)
+GEN_LDS(lha, ld16s, 0x0A, PPC_INTEGER)
+GEN_LDS(lhz, ld16u, 0x08, PPC_INTEGER)
+GEN_LDS(lwz, ld32u, 0x00, PPC_INTEGER)
+#if defined(TARGET_PPC64)
+GEN_LDUX(lwa, ld32s, 0x15, 0x0B, PPC_64B)
+GEN_LDX(lwa, ld32s, 0x15, 0x0A, PPC_64B)
+GEN_LDUX(ld, ld64_i64, 0x15, 0x01, PPC_64B)
+GEN_LDX(ld, ld64_i64, 0x15, 0x00, PPC_64B)
+GEN_LDX_E(ldbr, ld64ur_i64, 0x14, 0x10, PPC_NONE, PPC2_DBRX, CHK_NONE)
+
+/* HV/P7 and later only */
+GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST)
+GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x18, PPC_CILDST)
+GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST)
+GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST)
+#endif
+GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER)
+GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER)
+
+#undef GEN_ST
+#undef GEN_STU
+#undef GEN_STUX
+#undef GEN_STX_E
+#undef GEN_STS
+#define GEN_ST(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STU(name, stop, opc, type) \
+GEN_HANDLER(stop##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUX(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##ux, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_STX_E(name, stop, opc2, opc3, type, type2, chk) \
+GEN_HANDLER_E(name##x, 0x1F, opc2, opc3, 0x00000001, type, type2),
+#define GEN_STS(name, stop, op, type) \
+GEN_ST(name, stop, op | 0x20, type) \
+GEN_STU(name, stop, op | 0x21, type) \
+GEN_STUX(name, stop, 0x17, op | 0x01, type) \
+GEN_STX(name, stop, 0x17, op | 0x00, type)
+
+GEN_STS(stb, st8, 0x06, PPC_INTEGER)
+GEN_STS(sth, st16, 0x0C, PPC_INTEGER)
+GEN_STS(stw, st32, 0x04, PPC_INTEGER)
+#if defined(TARGET_PPC64)
+GEN_STUX(std, st64_i64, 0x15, 0x05, PPC_64B)
+GEN_STX(std, st64_i64, 0x15, 0x04, PPC_64B)
+GEN_STX_E(stdbr, st64r_i64, 0x14, 0x14, PPC_NONE, PPC2_DBRX, CHK_NONE)
+GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST)
+GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST)
+GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST)
+GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST)
+#endif
+GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER)
+GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER)
+
+#undef GEN_CRLOGIC
+#define GEN_CRLOGIC(name, tcg_op, opc) \
+GEN_HANDLER(name, 0x13, 0x01, opc, 0x00000001, PPC_INTEGER)
+GEN_CRLOGIC(crand, tcg_gen_and_i32, 0x08),
+GEN_CRLOGIC(crandc, tcg_gen_andc_i32, 0x04),
+GEN_CRLOGIC(creqv, tcg_gen_eqv_i32, 0x09),
+GEN_CRLOGIC(crnand, tcg_gen_nand_i32, 0x07),
+GEN_CRLOGIC(crnor, tcg_gen_nor_i32, 0x01),
+GEN_CRLOGIC(cror, tcg_gen_or_i32, 0x0E),
+GEN_CRLOGIC(crorc, tcg_gen_orc_i32, 0x0D),
+GEN_CRLOGIC(crxor, tcg_gen_xor_i32, 0x06),
+
+#undef GEN_MAC_HANDLER
+#define GEN_MAC_HANDLER(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_405_MAC)
+GEN_MAC_HANDLER(macchw, 0x0C, 0x05),
+GEN_MAC_HANDLER(macchwo, 0x0C, 0x15),
+GEN_MAC_HANDLER(macchws, 0x0C, 0x07),
+GEN_MAC_HANDLER(macchwso, 0x0C, 0x17),
+GEN_MAC_HANDLER(macchwsu, 0x0C, 0x06),
+GEN_MAC_HANDLER(macchwsuo, 0x0C, 0x16),
+GEN_MAC_HANDLER(macchwu, 0x0C, 0x04),
+GEN_MAC_HANDLER(macchwuo, 0x0C, 0x14),
+GEN_MAC_HANDLER(machhw, 0x0C, 0x01),
+GEN_MAC_HANDLER(machhwo, 0x0C, 0x11),
+GEN_MAC_HANDLER(machhws, 0x0C, 0x03),
+GEN_MAC_HANDLER(machhwso, 0x0C, 0x13),
+GEN_MAC_HANDLER(machhwsu, 0x0C, 0x02),
+GEN_MAC_HANDLER(machhwsuo, 0x0C, 0x12),
+GEN_MAC_HANDLER(machhwu, 0x0C, 0x00),
+GEN_MAC_HANDLER(machhwuo, 0x0C, 0x10),
+GEN_MAC_HANDLER(maclhw, 0x0C, 0x0D),
+GEN_MAC_HANDLER(maclhwo, 0x0C, 0x1D),
+GEN_MAC_HANDLER(maclhws, 0x0C, 0x0F),
+GEN_MAC_HANDLER(maclhwso, 0x0C, 0x1F),
+GEN_MAC_HANDLER(maclhwu, 0x0C, 0x0C),
+GEN_MAC_HANDLER(maclhwuo, 0x0C, 0x1C),
+GEN_MAC_HANDLER(maclhwsu, 0x0C, 0x0E),
+GEN_MAC_HANDLER(maclhwsuo, 0x0C, 0x1E),
+GEN_MAC_HANDLER(nmacchw, 0x0E, 0x05),
+GEN_MAC_HANDLER(nmacchwo, 0x0E, 0x15),
+GEN_MAC_HANDLER(nmacchws, 0x0E, 0x07),
+GEN_MAC_HANDLER(nmacchwso, 0x0E, 0x17),
+GEN_MAC_HANDLER(nmachhw, 0x0E, 0x01),
+GEN_MAC_HANDLER(nmachhwo, 0x0E, 0x11),
+GEN_MAC_HANDLER(nmachhws, 0x0E, 0x03),
+GEN_MAC_HANDLER(nmachhwso, 0x0E, 0x13),
+GEN_MAC_HANDLER(nmaclhw, 0x0E, 0x0D),
+GEN_MAC_HANDLER(nmaclhwo, 0x0E, 0x1D),
+GEN_MAC_HANDLER(nmaclhws, 0x0E, 0x0F),
+GEN_MAC_HANDLER(nmaclhwso, 0x0E, 0x1F),
+GEN_MAC_HANDLER(mulchw, 0x08, 0x05),
+GEN_MAC_HANDLER(mulchwu, 0x08, 0x04),
+GEN_MAC_HANDLER(mulhhw, 0x08, 0x01),
+GEN_MAC_HANDLER(mulhhwu, 0x08, 0x00),
+GEN_MAC_HANDLER(mullhw, 0x08, 0x0D),
+GEN_MAC_HANDLER(mullhwu, 0x08, 0x0C),
+
+GEN_HANDLER2_E(tbegin, "tbegin", 0x1F, 0x0E, 0x14, 0x01DFF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tend, "tend", 0x1F, 0x0E, 0x15, 0x01FFF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabort, "tabort", 0x1F, 0x0E, 0x1C, 0x03E0F800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortwc, "tabortwc", 0x1F, 0x0E, 0x18, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortwci, "tabortwci", 0x1F, 0x0E, 0x1A, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortdc, "tabortdc", 0x1F, 0x0E, 0x19, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tabortdci, "tabortdci", 0x1F, 0x0E, 0x1B, 0x00000000, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tsr, "tsr", 0x1F, 0x0E, 0x17, 0x03DFF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(tcheck, "tcheck", 0x1F, 0x0E, 0x16, 0x007FF800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(treclaim, "treclaim", 0x1F, 0x0E, 0x1D, 0x03E0F800, \
+ PPC_NONE, PPC2_TM),
+GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \
+ PPC_NONE, PPC2_TM),
+
+#include "translate/fp-ops.inc.c"
+
+#include "translate/vmx-ops.inc.c"
+
+#include "translate/vsx-ops.inc.c"
+
+#include "translate/dfp-ops.inc.c"
+
+#include "translate/spe-ops.inc.c"
+};
+
+#include "helper_regs.h"
+#include "translate_init.c"
+
+/*****************************************************************************/
+/* Misc PowerPC helpers */
+void ppc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+#define RGPL 4
+#define RFPL 4
+
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int i;
+
+ cpu_fprintf(f, "NIP " TARGET_FMT_lx " LR " TARGET_FMT_lx " CTR "
+ TARGET_FMT_lx " XER " TARGET_FMT_lx " CPU#%d\n",
+ env->nip, env->lr, env->ctr, cpu_read_xer(env),
+ cs->cpu_index);
+ cpu_fprintf(f, "MSR " TARGET_FMT_lx " HID0 " TARGET_FMT_lx " HF "
+ TARGET_FMT_lx " iidx %d didx %d\n",
+ env->msr, env->spr[SPR_HID0],
+ env->hflags, env->immu_idx, env->dmmu_idx);
+#if !defined(NO_TIMER_DUMP)
+ cpu_fprintf(f, "TB %08" PRIu32 " %08" PRIu64
+#if !defined(CONFIG_USER_ONLY)
+ " DECR %08" PRIu32
+#endif
+ "\n",
+ cpu_ppc_load_tbu(env), cpu_ppc_load_tbl(env)
+#if !defined(CONFIG_USER_ONLY)
+ , cpu_ppc_load_decr(env)
+#endif
+ );
+#endif
+ for (i = 0; i < 32; i++) {
+ if ((i & (RGPL - 1)) == 0)
+ cpu_fprintf(f, "GPR%02d", i);
+ cpu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i));
+ if ((i & (RGPL - 1)) == (RGPL - 1))
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "CR ");
+ for (i = 0; i < 8; i++)
+ cpu_fprintf(f, "%01x", env->crf[i]);
+ cpu_fprintf(f, " [");
+ for (i = 0; i < 8; i++) {
+ char a = '-';
+ if (env->crf[i] & 0x08)
+ a = 'L';
+ else if (env->crf[i] & 0x04)
+ a = 'G';
+ else if (env->crf[i] & 0x02)
+ a = 'E';
+ cpu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' ');
+ }
+ cpu_fprintf(f, " ] RES " TARGET_FMT_lx "\n",
+ env->reserve_addr);
+ for (i = 0; i < 32; i++) {
+ if ((i & (RFPL - 1)) == 0)
+ cpu_fprintf(f, "FPR%02d", i);
+ cpu_fprintf(f, " %016" PRIx64, *((uint64_t *)&env->fpr[i]));
+ if ((i & (RFPL - 1)) == (RFPL - 1))
+ cpu_fprintf(f, "\n");
+ }
+ cpu_fprintf(f, "FPSCR " TARGET_FMT_lx "\n", env->fpscr);
+#if !defined(CONFIG_USER_ONLY)
+ cpu_fprintf(f, " SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx
+ " PVR " TARGET_FMT_lx " VRSAVE " TARGET_FMT_lx "\n",
+ env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+ env->spr[SPR_PVR], env->spr[SPR_VRSAVE]);
+
+ cpu_fprintf(f, "SPRG0 " TARGET_FMT_lx " SPRG1 " TARGET_FMT_lx
+ " SPRG2 " TARGET_FMT_lx " SPRG3 " TARGET_FMT_lx "\n",
+ env->spr[SPR_SPRG0], env->spr[SPR_SPRG1],
+ env->spr[SPR_SPRG2], env->spr[SPR_SPRG3]);
+
+ cpu_fprintf(f, "SPRG4 " TARGET_FMT_lx " SPRG5 " TARGET_FMT_lx
+ " SPRG6 " TARGET_FMT_lx " SPRG7 " TARGET_FMT_lx "\n",
+ env->spr[SPR_SPRG4], env->spr[SPR_SPRG5],
+ env->spr[SPR_SPRG6], env->spr[SPR_SPRG7]);
+
+#if defined(TARGET_PPC64)
+ if (env->excp_model == POWERPC_EXCP_POWER7 ||
+ env->excp_model == POWERPC_EXCP_POWER8) {
+ cpu_fprintf(f, "HSRR0 " TARGET_FMT_lx " HSRR1 " TARGET_FMT_lx "\n",
+ env->spr[SPR_HSRR0], env->spr[SPR_HSRR1]);
+ }
+#endif
+ if (env->excp_model == POWERPC_EXCP_BOOKE) {
+ cpu_fprintf(f, "CSRR0 " TARGET_FMT_lx " CSRR1 " TARGET_FMT_lx
+ " MCSRR0 " TARGET_FMT_lx " MCSRR1 " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1],
+ env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1]);
+
+ cpu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx
+ " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_TCR], env->spr[SPR_BOOKE_TSR],
+ env->spr[SPR_BOOKE_ESR], env->spr[SPR_BOOKE_DEAR]);
+
+ cpu_fprintf(f, " PIR " TARGET_FMT_lx " DECAR " TARGET_FMT_lx
+ " IVPR " TARGET_FMT_lx " EPCR " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_PIR], env->spr[SPR_BOOKE_DECAR],
+ env->spr[SPR_BOOKE_IVPR], env->spr[SPR_BOOKE_EPCR]);
+
+ cpu_fprintf(f, " MCSR " TARGET_FMT_lx " SPRG8 " TARGET_FMT_lx
+ " EPR " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_MCSR], env->spr[SPR_BOOKE_SPRG8],
+ env->spr[SPR_BOOKE_EPR]);
+
+ /* FSL-specific */
+ cpu_fprintf(f, " MCAR " TARGET_FMT_lx " PID1 " TARGET_FMT_lx
+ " PID2 " TARGET_FMT_lx " SVR " TARGET_FMT_lx "\n",
+ env->spr[SPR_Exxx_MCAR], env->spr[SPR_BOOKE_PID1],
+ env->spr[SPR_BOOKE_PID2], env->spr[SPR_E500_SVR]);
+
+ /*
+ * IVORs are left out as they are large and do not change often --
+ * they can be read with "p $ivor0", "p $ivor1", etc.
+ */
+ }
+
+#if defined(TARGET_PPC64)
+ if (env->flags & POWERPC_FLAG_CFAR) {
+ cpu_fprintf(f, " CFAR " TARGET_FMT_lx"\n", env->cfar);
+ }
+#endif
+
+ switch (env->mmu_model) {
+ case POWERPC_MMU_32B:
+ case POWERPC_MMU_601:
+ case POWERPC_MMU_SOFT_6xx:
+ case POWERPC_MMU_SOFT_74xx:
+#if defined(TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ case POWERPC_MMU_2_03:
+ case POWERPC_MMU_2_06:
+ case POWERPC_MMU_2_06a:
+ case POWERPC_MMU_2_07:
+ case POWERPC_MMU_2_07a:
+#endif
+ cpu_fprintf(f, " SDR1 " TARGET_FMT_lx " DAR " TARGET_FMT_lx
+ " DSISR " TARGET_FMT_lx "\n", env->spr[SPR_SDR1],
+ env->spr[SPR_DAR], env->spr[SPR_DSISR]);
+ break;
+ case POWERPC_MMU_BOOKE206:
+ cpu_fprintf(f, " MAS0 " TARGET_FMT_lx " MAS1 " TARGET_FMT_lx
+ " MAS2 " TARGET_FMT_lx " MAS3 " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_MAS0], env->spr[SPR_BOOKE_MAS1],
+ env->spr[SPR_BOOKE_MAS2], env->spr[SPR_BOOKE_MAS3]);
+
+ cpu_fprintf(f, " MAS4 " TARGET_FMT_lx " MAS6 " TARGET_FMT_lx
+ " MAS7 " TARGET_FMT_lx " PID " TARGET_FMT_lx "\n",
+ env->spr[SPR_BOOKE_MAS4], env->spr[SPR_BOOKE_MAS6],
+ env->spr[SPR_BOOKE_MAS7], env->spr[SPR_BOOKE_PID]);
+
+ cpu_fprintf(f, "MMUCFG " TARGET_FMT_lx " TLB0CFG " TARGET_FMT_lx
+ " TLB1CFG " TARGET_FMT_lx "\n",
+ env->spr[SPR_MMUCFG], env->spr[SPR_BOOKE_TLB0CFG],
+ env->spr[SPR_BOOKE_TLB1CFG]);
+ break;
+ default:
+ break;
+ }
+#endif
+
+#undef RGPL
+#undef RFPL
+}
+
+void ppc_cpu_dump_statistics(CPUState *cs, FILE*f,
+ fprintf_function cpu_fprintf, int flags)
+{
+#if defined(DO_PPC_STATISTICS)
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ opc_handler_t **t1, **t2, **t3, *handler;
+ int op1, op2, op3;
+
+ t1 = cpu->env.opcodes;
+ for (op1 = 0; op1 < 64; op1++) {
+ handler = t1[op1];
+ if (is_indirect_opcode(handler)) {
+ t2 = ind_table(handler);
+ for (op2 = 0; op2 < 32; op2++) {
+ handler = t2[op2];
+ if (is_indirect_opcode(handler)) {
+ t3 = ind_table(handler);
+ for (op3 = 0; op3 < 32; op3++) {
+ handler = t3[op3];
+ if (handler->count == 0)
+ continue;
+ cpu_fprintf(f, "%02x %02x %02x (%02x %04d) %16s: "
+ "%016" PRIx64 " %" PRId64 "\n",
+ op1, op2, op3, op1, (op3 << 5) | op2,
+ handler->oname,
+ handler->count, handler->count);
+ }
+ } else {
+ if (handler->count == 0)
+ continue;
+ cpu_fprintf(f, "%02x %02x (%02x %04d) %16s: "
+ "%016" PRIx64 " %" PRId64 "\n",
+ op1, op2, op1, op2, handler->oname,
+ handler->count, handler->count);
+ }
+ }
+ } else {
+ if (handler->count == 0)
+ continue;
+ cpu_fprintf(f, "%02x (%02x ) %16s: %016" PRIx64
+ " %" PRId64 "\n",
+ op1, op1, handler->oname,
+ handler->count, handler->count);
+ }
+ }
+#endif
+}
+
+/*****************************************************************************/
+void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx, *ctxp = &ctx;
+ opc_handler_t **table, *handler;
+ target_ulong pc_start;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+ ctx.nip = pc_start;
+ ctx.tb = tb;
+ ctx.exception = POWERPC_EXCP_NONE;
+ ctx.spr_cb = env->spr_cb;
+ ctx.pr = msr_pr;
+ ctx.mem_idx = env->dmmu_idx;
+ ctx.dr = msr_dr;
+#if !defined(CONFIG_USER_ONLY)
+ ctx.hv = msr_hv || !env->has_hv_mode;
+#endif
+ ctx.insns_flags = env->insns_flags;
+ ctx.insns_flags2 = env->insns_flags2;
+ ctx.access_type = -1;
+ ctx.need_access_type = !(env->mmu_model & POWERPC_MMU_64B);
+ ctx.le_mode = !!(env->hflags & (1 << MSR_LE));
+ ctx.default_tcg_memop_mask = ctx.le_mode ? MO_LE : MO_BE;
+#if defined(TARGET_PPC64)
+ ctx.sf_mode = msr_is_64bit(env, env->msr);
+ ctx.has_cfar = !!(env->flags & POWERPC_FLAG_CFAR);
+#endif
+ if (env->mmu_model == POWERPC_MMU_32B ||
+ env->mmu_model == POWERPC_MMU_601 ||
+ (env->mmu_model & POWERPC_MMU_64B))
+ ctx.lazy_tlb_flush = true;
+
+ ctx.fpu_enabled = !!msr_fp;
+ if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
+ ctx.spe_enabled = !!msr_spe;
+ else
+ ctx.spe_enabled = false;
+ if ((env->flags & POWERPC_FLAG_VRE) && msr_vr)
+ ctx.altivec_enabled = !!msr_vr;
+ else
+ ctx.altivec_enabled = false;
+ if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) {
+ ctx.vsx_enabled = !!msr_vsx;
+ } else {
+ ctx.vsx_enabled = false;
+ }
+#if defined(TARGET_PPC64)
+ if ((env->flags & POWERPC_FLAG_TM) && msr_tm) {
+ ctx.tm_enabled = !!msr_tm;
+ } else {
+ ctx.tm_enabled = false;
+ }
+#endif
+ if ((env->flags & POWERPC_FLAG_SE) && msr_se)
+ ctx.singlestep_enabled = CPU_SINGLE_STEP;
+ else
+ ctx.singlestep_enabled = 0;
+ if ((env->flags & POWERPC_FLAG_BE) && msr_be)
+ ctx.singlestep_enabled |= CPU_BRANCH_STEP;
+ if (unlikely(cs->singlestep_enabled)) {
+ ctx.singlestep_enabled |= GDBSTUB_SINGLE_STEP;
+ }
+#if defined (DO_SINGLE_STEP) && 0
+ /* Single step trace mode */
+ msr_se = 1;
+#endif
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ tcg_clear_temp_count();
+ /* Set env in case of segfault during code fetch */
+ while (ctx.exception == POWERPC_EXCP_NONE && !tcg_op_buf_full()) {
+ tcg_gen_insn_start(ctx.nip);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.nip, BP_ANY))) {
+ gen_debug_exception(ctxp);
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.nip += 4;
+ break;
+ }
+
+ LOG_DISAS("----------------\n");
+ LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n",
+ ctx.nip, ctx.mem_idx, (int)msr_ir);
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO))
+ gen_io_start();
+ if (unlikely(need_byteswap(&ctx))) {
+ ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip));
+ } else {
+ ctx.opcode = cpu_ldl_code(env, ctx.nip);
+ }
+ LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n",
+ ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode),
+ ctx.le_mode ? "little" : "big");
+ ctx.nip += 4;
+ table = env->opcodes;
+ handler = table[opc1(ctx.opcode)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc2(ctx.opcode)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc3(ctx.opcode)];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ handler = table[opc4(ctx.opcode)];
+ }
+ }
+ }
+ /* Is opcode *REALLY* valid ? */
+ if (unlikely(handler->handler == &gen_invalid)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid/unsupported opcode: "
+ "%02x - %02x - %02x - %02x (%08x) "
+ TARGET_FMT_lx " %d\n",
+ opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode),
+ ctx.opcode, ctx.nip - 4, (int)msr_ir);
+ } else {
+ uint32_t inval;
+
+ if (unlikely(handler->type & (PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE) && Rc(ctx.opcode))) {
+ inval = handler->inval2;
+ } else {
+ inval = handler->inval1;
+ }
+
+ if (unlikely((ctx.opcode & inval) != 0)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "invalid bits: %08x for opcode: "
+ "%02x - %02x - %02x - %02x (%08x) "
+ TARGET_FMT_lx "\n", ctx.opcode & inval,
+ opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode),
+ ctx.opcode, ctx.nip - 4);
+ gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL);
+ break;
+ }
+ }
+ (*(handler->handler))(&ctx);
+#if defined(DO_PPC_STATISTICS)
+ handler->count++;
+#endif
+ /* Check trace mode exceptions */
+ if (unlikely(ctx.singlestep_enabled & CPU_SINGLE_STEP &&
+ (ctx.nip <= 0x100 || ctx.nip > 0xF00) &&
+ ctx.exception != POWERPC_SYSCALL &&
+ ctx.exception != POWERPC_EXCP_TRAP &&
+ ctx.exception != POWERPC_EXCP_BRANCH)) {
+ gen_exception_nip(ctxp, POWERPC_EXCP_TRACE, ctx.nip);
+ } else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) ||
+ (cs->singlestep_enabled) ||
+ singlestep ||
+ num_insns >= max_insns)) {
+ /* if we reach a page boundary or are single stepping, stop
+ * generation
+ */
+ break;
+ }
+ if (tcg_check_temp_count()) {
+ fprintf(stderr, "Opcode %02x %02x %02x %02x (%08x) leaked "
+ "temporaries\n", opc1(ctx.opcode), opc2(ctx.opcode),
+ opc3(ctx.opcode), opc4(ctx.opcode), ctx.opcode);
+ exit(1);
+ }
+ }
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ if (ctx.exception == POWERPC_EXCP_NONE) {
+ gen_goto_tb(&ctx, 0, ctx.nip);
+ } else if (ctx.exception != POWERPC_EXCP_BRANCH) {
+ if (unlikely(cs->singlestep_enabled)) {
+ gen_debug_exception(ctxp);
+ }
+ /* Generate the return instruction */
+ tcg_gen_exit_tb(0);
+ }
+ gen_tb_end(tb, num_insns);
+
+ tb->size = ctx.nip - pc_start;
+ tb->icount = num_insns;
+
+#if defined(DEBUG_DISAS)
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ int flags;
+ flags = env->bfd_mach;
+ flags |= ctx.le_mode << 16;
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, ctx.nip - pc_start, flags);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void restore_state_to_opc(CPUPPCState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->nip = data[0];
+}
diff --git a/target/ppc/translate/dfp-impl.inc.c b/target/ppc/translate/dfp-impl.inc.c
new file mode 100644
index 0000000000..178d3044a7
--- /dev/null
+++ b/target/ppc/translate/dfp-impl.inc.c
@@ -0,0 +1,232 @@
+/*** Decimal Floating Point ***/
+
+static inline TCGv_ptr gen_fprp_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, fpr[reg]));
+ return r;
+}
+
+#define GEN_DFP_T_A_B_Rc(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rd, ra, rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rd = gen_fprp_ptr(rD(ctx->opcode)); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, ra, rb); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+}
+
+#define GEN_DFP_BF_A_B(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_env, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+}
+
+#define GEN_DFP_BF_I_B(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 uim; \
+ TCGv_ptr rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ uim = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_env, uim, rb); \
+ tcg_temp_free_i32(uim); \
+ tcg_temp_free_ptr(rb); \
+}
+
+#define GEN_DFP_BF_A_DCM(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra; \
+ TCGv_i32 dcm; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ dcm = tcg_const_i32(DCM(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \
+ cpu_env, ra, dcm); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_i32(dcm); \
+}
+
+#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, rb; \
+ TCGv_i32 u32_1, u32_2; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \
+ u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_i32(u32_1); \
+ tcg_temp_free_i32(u32_2); \
+}
+
+#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, ra, rb; \
+ TCGv_i32 i32; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ ra = gen_fprp_ptr(rA(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ i32 = tcg_const_i32(i32fld(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, ra, rb, i32); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_i32(i32); \
+ }
+
+#define GEN_DFP_T_B_Rc(name) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, rb; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ rb = gen_fprp_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, rb); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rb); \
+ }
+
+#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_ptr rt, rs; \
+ TCGv_i32 i32; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_update_nip(ctx, ctx->nip - 4); \
+ rt = gen_fprp_ptr(rD(ctx->opcode)); \
+ rs = gen_fprp_ptr(fprfld(ctx->opcode)); \
+ i32 = tcg_const_i32(i32fld(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rt, rs, i32); \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+ tcg_temp_free_ptr(rt); \
+ tcg_temp_free_ptr(rs); \
+ tcg_temp_free_i32(i32); \
+}
+
+GEN_DFP_T_A_B_Rc(dadd)
+GEN_DFP_T_A_B_Rc(daddq)
+GEN_DFP_T_A_B_Rc(dsub)
+GEN_DFP_T_A_B_Rc(dsubq)
+GEN_DFP_T_A_B_Rc(dmul)
+GEN_DFP_T_A_B_Rc(dmulq)
+GEN_DFP_T_A_B_Rc(ddiv)
+GEN_DFP_T_A_B_Rc(ddivq)
+GEN_DFP_BF_A_B(dcmpu)
+GEN_DFP_BF_A_B(dcmpuq)
+GEN_DFP_BF_A_B(dcmpo)
+GEN_DFP_BF_A_B(dcmpoq)
+GEN_DFP_BF_A_DCM(dtstdc)
+GEN_DFP_BF_A_DCM(dtstdcq)
+GEN_DFP_BF_A_DCM(dtstdg)
+GEN_DFP_BF_A_DCM(dtstdgq)
+GEN_DFP_BF_A_B(dtstex)
+GEN_DFP_BF_A_B(dtstexq)
+GEN_DFP_BF_A_B(dtstsf)
+GEN_DFP_BF_A_B(dtstsfq)
+GEN_DFP_BF_I_B(dtstsfi)
+GEN_DFP_BF_I_B(dtstsfiq)
+GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC)
+GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC)
+GEN_DFP_T_A_B_I32_Rc(dqua, RMC)
+GEN_DFP_T_A_B_I32_Rc(dquaq, RMC)
+GEN_DFP_T_A_B_I32_Rc(drrnd, RMC)
+GEN_DFP_T_A_B_I32_Rc(drrndq, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC)
+GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC)
+GEN_DFP_T_B_Rc(dctdp)
+GEN_DFP_T_B_Rc(dctqpq)
+GEN_DFP_T_B_Rc(drsp)
+GEN_DFP_T_B_Rc(drdpq)
+GEN_DFP_T_B_Rc(dcffix)
+GEN_DFP_T_B_Rc(dcffixq)
+GEN_DFP_T_B_Rc(dctfix)
+GEN_DFP_T_B_Rc(dctfixq)
+GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP)
+GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP)
+GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP)
+GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP)
+GEN_DFP_T_B_Rc(dxex)
+GEN_DFP_T_B_Rc(dxexq)
+GEN_DFP_T_A_B_Rc(diex)
+GEN_DFP_T_A_B_Rc(diexq)
+GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM)
+GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM)
+GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM)
+GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM)
+
+#undef GEN_DFP_T_A_B_Rc
+#undef GEN_DFP_BF_A_B
+#undef GEN_DFP_BF_A_DCM
+#undef GEN_DFP_T_B_U32_U32_Rc
+#undef GEN_DFP_T_A_B_I32_Rc
+#undef GEN_DFP_T_B_Rc
+#undef GEN_DFP_T_FPR_I32_Rc
diff --git a/target/ppc/translate/dfp-ops.inc.c b/target/ppc/translate/dfp-ops.inc.c
new file mode 100644
index 0000000000..6ef38e5712
--- /dev/null
+++ b/target/ppc/translate/dfp-ops.inc.c
@@ -0,0 +1,165 @@
+#define _GEN_DFP_LONG(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_LONG_300(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300)
+
+#define _GEN_DFP_LONGx2(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_LONGx4(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_QUAD(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300)
+
+#define _GEN_DFP_QUADx2(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define _GEN_DFP_QUADx4(name, op1, op2, mask) \
+GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \
+GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP)
+
+#define GEN_DFP_T_A_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00210800)
+
+#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00200800)
+
+#define GEN_DFP_T_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x001F0000)
+
+#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x003F0800)
+
+#define GEN_DFP_Tp_B_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x003F0000)
+
+#define GEN_DFP_T_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x001F0800)
+
+#define GEN_DFP_BF_A_B(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x00000001)
+
+#define GEN_DFP_BF_A_B_300(name, op1, op2) \
+_GEN_DFP_LONG_300(name, op1, op2, 0x00400001)
+
+#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00610801)
+
+#define GEN_DFP_BF_A_Bp(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00600801)
+
+#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \
+_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001)
+
+#define GEN_DFP_BF_A_DCM(name, op1, op2) \
+_GEN_DFP_LONGx2(name, op1, op2, 0x00600001)
+
+#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \
+_GEN_DFP_QUADx2(name, op1, op2, 0x00610001)
+
+#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \
+_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x02010800)
+
+#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x02000800)
+
+#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \
+_GEN_DFP_LONGx4(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x00200800)
+
+#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \
+_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000)
+
+#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \
+_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800)
+
+#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x00070000)
+
+#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x00270800)
+
+#define GEN_DFP_S_T_B_Rc(name, op1, op2) \
+_GEN_DFP_LONG(name, op1, op2, 0x000F0000)
+
+#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \
+_GEN_DFP_QUAD(name, op1, op2, 0x002F0800)
+
+#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \
+_GEN_DFP_LONGx2(name, op1, op2, 0x00000000)
+
+#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \
+_GEN_DFP_QUADx2(name, op1, op2, 0x00210000)
+
+GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00),
+GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00),
+GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10),
+GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10),
+GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01),
+GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01),
+GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11),
+GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11),
+GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14),
+GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14),
+GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04),
+GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04),
+GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06),
+GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06),
+GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07),
+GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07),
+GEN_DFP_BF_A_B(dtstex, 0x02, 0x05),
+GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05),
+GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15),
+GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15),
+GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15),
+GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15),
+GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02),
+GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02),
+GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00),
+GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00),
+GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01),
+GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01),
+GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03),
+GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03),
+GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07),
+GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07),
+GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08),
+GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08),
+GEN_DFP_T_B_Rc(drsp, 0x02, 0x18),
+GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18),
+GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19),
+GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19),
+GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09),
+GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09),
+GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a),
+GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a),
+GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a),
+GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a),
+GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b),
+GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b),
+GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b),
+GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b),
+GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02),
+GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02),
+GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03),
+GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03),
diff --git a/target/ppc/translate/fp-impl.inc.c b/target/ppc/translate/fp-impl.inc.c
new file mode 100644
index 0000000000..872af7b56f
--- /dev/null
+++ b/target/ppc/translate/fp-impl.inc.c
@@ -0,0 +1,1070 @@
+/*
+ * translate-fp.c
+ *
+ * Standard FPU translation
+ */
+
+static inline void gen_reset_fpstatus(void)
+{
+ gen_helper_reset_fpstatus(cpu_env);
+}
+
+static inline void gen_compute_fprf(TCGv_i64 arg)
+{
+ gen_helper_compute_fprf(cpu_env, arg);
+ gen_helper_float_check_status(cpu_env);
+}
+
+#if defined(TARGET_PPC64)
+static void gen_set_cr1_from_fpscr(DisasContext *ctx)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(tmp, cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], tmp, 28);
+ tcg_temp_free_i32(tmp);
+}
+#else
+static void gen_set_cr1_from_fpscr(DisasContext *ctx)
+{
+ tcg_gen_shri_tl(cpu_crf[1], cpu_fpscr, 28);
+}
+#endif
+
+/*** Floating-Point arithmetic ***/
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type); \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##op(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rA(ctx->opcode)], \
+ cpu_fpr[rC(ctx->opcode)]); \
+ if (isfloat) { \
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type); \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type);
+
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+static void gen_f##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_reset_fpstatus(); \
+ gen_helper_f##name(cpu_fpr[rD(ctx->opcode)], cpu_env, \
+ cpu_fpr[rB(ctx->opcode)]); \
+ if (set_fprf) { \
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]); \
+ } \
+ if (unlikely(Rc(ctx->opcode) != 0)) { \
+ gen_set_cr1_from_fpscr(ctx); \
+ } \
+}
+
+/* fadd - fadds */
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT);
+/* fdiv - fdivs */
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT);
+/* fmul - fmuls */
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT);
+
+/* fre */
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT);
+
+/* fres */
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES);
+
+/* frsqrte */
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE);
+
+/* frsqrtes */
+static void gen_frsqrtes(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_frsqrte(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fsel */
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL);
+/* fsub - fsubs */
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT);
+/* Optional: */
+
+/* fsqrt */
+static void gen_fsqrt(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rB(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+static void gen_fsqrts(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ gen_helper_fsqrt(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rB(ctx->opcode)]);
+ gen_helper_frsp(cpu_fpr[rD(ctx->opcode)], cpu_env,
+ cpu_fpr[rD(ctx->opcode)]);
+ gen_compute_fprf(cpu_fpr[rD(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/*** Floating-Point multiply-and-add ***/
+/* fmadd - fmadds */
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT);
+/* fmsub - fmsubs */
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT);
+/* fnmadd - fnmadds */
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT);
+/* fnmsub - fnmsubs */
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT);
+
+/*** Floating-Point round & convert ***/
+/* fctiw */
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT);
+/* fctiwu */
+GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206);
+/* fctiwz */
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT);
+/* fctiwuz */
+GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206);
+/* frsp */
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT);
+/* fcfid */
+GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64);
+/* fcfids */
+GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206);
+/* fcfidu */
+GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
+/* fcfidus */
+GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206);
+/* fctid */
+GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64);
+/* fctidu */
+GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206);
+/* fctidz */
+GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64);
+/* fctidu */
+GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206);
+
+/* frin */
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT);
+/* friz */
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT);
+/* frip */
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT);
+/* frim */
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT);
+
+static void gen_ftdiv(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)]);
+}
+
+static void gen_ftsqrt(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+}
+
+
+
+/*** Floating-Point compare ***/
+
+/* fcmpo */
+static void gen_fcmpo(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ gen_helper_fcmpo(cpu_env, cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)], crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* fcmpu */
+static void gen_fcmpu(DisasContext *ctx)
+{
+ TCGv_i32 crf;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ crf = tcg_const_i32(crfD(ctx->opcode));
+ gen_helper_fcmpu(cpu_env, cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)], crf);
+ tcg_temp_free_i32(crf);
+ gen_helper_float_check_status(cpu_env);
+}
+
+/*** Floating-point move ***/
+/* fabs */
+/* XXX: beware that fabs never checks for NaNs nor update FPSCR */
+static void gen_fabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_andi_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
+ ~(1ULL << 63));
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fmr - fmr. */
+/* XXX: beware that fmr never checks for NaNs nor update FPSCR */
+static void gen_fmr(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_mov_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)]);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fnabs */
+/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */
+static void gen_fnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_ori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
+ 1ULL << 63);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fneg */
+/* XXX: beware that fneg never checks for NaNs nor update FPSCR */
+static void gen_fneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_xori_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rB(ctx->opcode)],
+ 1ULL << 63);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* fcpsgn: PowerPC 2.05 specification */
+/* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */
+static void gen_fcpsgn(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)], 0, 63);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+static void gen_fmrgew(DisasContext *ctx)
+{
+ TCGv_i64 b0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ b0 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(b0, cpu_fpr[rB(ctx->opcode)], 32);
+ tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpr[rA(ctx->opcode)],
+ b0, 0, 32);
+ tcg_temp_free_i64(b0);
+}
+
+static void gen_fmrgow(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ tcg_gen_deposit_i64(cpu_fpr[rD(ctx->opcode)],
+ cpu_fpr[rB(ctx->opcode)],
+ cpu_fpr[rA(ctx->opcode)],
+ 32, 32);
+}
+
+/*** Floating-Point status & ctrl register ***/
+
+/* mcrfs */
+static void gen_mcrfs(DisasContext *ctx)
+{
+ TCGv tmp = tcg_temp_new();
+ TCGv_i32 tmask;
+ TCGv_i64 tnew_fpscr = tcg_temp_new_i64();
+ int bfa;
+ int nibble;
+ int shift;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ bfa = crfS(ctx->opcode);
+ nibble = 7 - bfa;
+ shift = 4 * nibble;
+ tcg_gen_shri_tl(tmp, cpu_fpscr, shift);
+ tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp);
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf);
+ tcg_temp_free(tmp);
+ tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr);
+ /* Only the exception bits (including FX) should be cleared if read */
+ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS));
+ /* FEX and VX need to be updated, so don't set fpscr directly */
+ tmask = tcg_const_i32(1 << nibble);
+ gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask);
+ tcg_temp_free_i32(tmask);
+ tcg_temp_free_i64(tnew_fpscr);
+}
+
+/* mffs */
+static void gen_mffs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_reset_fpstatus();
+ tcg_gen_extu_tl_i64(cpu_fpr[rD(ctx->opcode)], cpu_fpscr);
+ if (unlikely(Rc(ctx->opcode))) {
+ gen_set_cr1_from_fpscr(ctx);
+ }
+}
+
+/* mtfsb0 */
+static void gen_mtfsb0(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX)) {
+ TCGv_i32 t0;
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_clrbit(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+}
+
+/* mtfsb1 */
+static void gen_mtfsb1(DisasContext *ctx)
+{
+ uint8_t crb;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ crb = 31 - crbD(ctx->opcode);
+ gen_reset_fpstatus();
+ /* XXX: we pretend we can only do IEEE floating-point computations */
+ if (likely(crb != FPSCR_FEX && crb != FPSCR_VX && crb != FPSCR_NI)) {
+ TCGv_i32 t0;
+ t0 = tcg_const_i32(crb);
+ gen_helper_fpscr_setbit(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+ }
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* mtfsf */
+static void gen_mtfsf(DisasContext *ctx)
+{
+ TCGv_i32 t0;
+ int flm, l, w;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ flm = FPFLM(ctx->opcode);
+ l = FPL(ctx->opcode);
+ w = FPW(ctx->opcode);
+ if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ gen_reset_fpstatus();
+ if (l) {
+ t0 = tcg_const_i32((ctx->insns_flags2 & PPC2_ISA205) ? 0xffff : 0xff);
+ } else {
+ t0 = tcg_const_i32(flm << (w * 8));
+ }
+ gen_helper_store_fpscr(cpu_env, cpu_fpr[rB(ctx->opcode)], t0);
+ tcg_temp_free_i32(t0);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/* mtfsfi */
+static void gen_mtfsfi(DisasContext *ctx)
+{
+ int bf, sh, w;
+ TCGv_i64 t0;
+ TCGv_i32 t1;
+
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ w = FPW(ctx->opcode);
+ bf = FPBF(ctx->opcode);
+ if (unlikely(w & !(ctx->insns_flags2 & PPC2_ISA205))) {
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+ return;
+ }
+ sh = (8 * w) + 7 - bf;
+ gen_reset_fpstatus();
+ t0 = tcg_const_i64(((uint64_t)FPIMM(ctx->opcode)) << (4 * sh));
+ t1 = tcg_const_i32(1 << sh);
+ gen_helper_store_fpscr(cpu_env, t0, t1);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i32(t1);
+ if (unlikely(Rc(ctx->opcode) != 0)) {
+ tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr);
+ tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX);
+ }
+ /* We can raise a differed exception */
+ gen_helper_float_check_status(cpu_env);
+}
+
+/*** Floating-point load ***/
+#define GEN_LDF(name, ldop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUF(name, ldop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDUXF(name, ldop, opc, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type); \
+GEN_LDUF(name, ldop, op | 0x21, type); \
+GEN_LDUXF(name, ldop, op | 0x01, type); \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+static inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ gen_qemu_ld32u(ctx, t0, arg2);
+ tcg_gen_trunc_tl_i32(t1, t0);
+ tcg_temp_free(t0);
+ gen_helper_float32_to_float64(arg1, cpu_env, t1);
+ tcg_temp_free_i32(t1);
+}
+
+ /* lfd lfdu lfdux lfdx */
+GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT);
+ /* lfs lfsu lfsux lfsx */
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT);
+
+/* lfdp */
+static void gen_lfdp(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0);
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* lfdpx */
+static void gen_lfdpx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* lfiwax */
+static void gen_lfiwax(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv t0;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32s(ctx, t0, EA);
+ tcg_gen_ext_tl_i64(cpu_fpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(EA);
+ tcg_temp_free(t0);
+}
+
+/* lfiwzx */
+static void gen_lfiwzx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld32u_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_temp_free(EA);
+}
+/*** Floating-point store ***/
+#define GEN_STF(name, stop, opc, type) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUF(name, stop, opc, type) \
+static void glue(gen_, name##u)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STUXF(name, stop, opc, type) \
+static void glue(gen_, name##ux)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ if (unlikely(rA(ctx->opcode) == 0)) { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+static void glue(gen_, name##x)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type); \
+GEN_STUF(name, stop, op | 0x21, type); \
+GEN_STUXF(name, stop, op | 0x01, type); \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+static inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv t1 = tcg_temp_new();
+ gen_helper_float64_to_float32(t0, cpu_env, arg1);
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ gen_qemu_st32(ctx, t1, arg2);
+ tcg_temp_free(t1);
+}
+
+/* stfd stfdu stfdux stfdx */
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT);
+/* stfs stfsu stfsux stfsx */
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
+
+/* stfdp */
+static void gen_stfdp(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_imm_index(ctx, EA, 0);
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* stfdpx */
+static void gen_stfdpx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->fpu_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_FPU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does
+ necessary 64-bit byteswap already. */
+ if (unlikely(ctx->le_mode)) {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ } else {
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode)], EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rD(ctx->opcode) + 1], EA);
+ }
+ tcg_temp_free(EA);
+}
+
+/* Optional: */
+static inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_trunc_i64_tl(t0, arg1),
+ gen_qemu_st32(ctx, t0, arg2);
+ tcg_temp_free(t0);
+}
+/* stfiwx */
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX);
+
+/* POWER2 specific instructions */
+/* Quad manipulation (load/store two floats at a time) */
+
+/* lfq */
+static void gen_lfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* lfqu */
+static void gen_lfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+/* lfqux */
+static void gen_lfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* lfqx */
+static void gen_lfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfq */
+static void gen_stfq(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqu */
+static void gen_stfqu(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqux */
+static void gen_stfqux(DisasContext *ctx)
+{
+ int ra = rA(ctx->opcode);
+ int rd = rD(ctx->opcode);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
+ if (ra != 0)
+ tcg_gen_mov_tl(cpu_gpr[ra], t0);
+ tcg_temp_free(t0);
+}
+
+/* stfqx */
+static void gen_stfqx(DisasContext *ctx)
+{
+ int rd = rD(ctx->opcode);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64_i64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64_i64(ctx, cpu_fpr[(rd + 1) % 32], t0);
+ tcg_temp_free(t0);
+}
+
+#undef _GEN_FLOAT_ACB
+#undef GEN_FLOAT_ACB
+#undef _GEN_FLOAT_AB
+#undef GEN_FLOAT_AB
+#undef _GEN_FLOAT_AC
+#undef GEN_FLOAT_AC
+#undef GEN_FLOAT_B
+#undef GEN_FLOAT_BS
+
+#undef GEN_LDF
+#undef GEN_LDUF
+#undef GEN_LDUXF
+#undef GEN_LDXF
+#undef GEN_LDFS
+
+#undef GEN_STF
+#undef GEN_STUF
+#undef GEN_STUXF
+#undef GEN_STXF
+#undef GEN_STFS
diff --git a/target/ppc/translate/fp-ops.inc.c b/target/ppc/translate/fp-ops.inc.c
new file mode 100644
index 0000000000..d36ab4ecc3
--- /dev/null
+++ b/target/ppc/translate/fp-ops.inc.c
@@ -0,0 +1,111 @@
+#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type)
+#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \
+_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \
+_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type)
+#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type)
+#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \
+_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \
+_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type)
+#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \
+GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type)
+#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \
+GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type)
+
+GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT),
+GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES),
+GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE),
+_GEN_FLOAT_ACB(sel, sel, 0x3F, 0x17, 0, 0, PPC_FLOAT_FSEL),
+GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT),
+GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT),
+GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206),
+GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206),
+GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT),
+GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT),
+GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT),
+GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64),
+GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT),
+GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT),
+
+#define GEN_LDF(name, ldop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUF(name, ldop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_LDUXF(name, ldop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
+#define GEN_LDXF(name, ldop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_LDFS(name, ldop, op, type) \
+GEN_LDF(name, ldop, op | 0x20, type) \
+GEN_LDUF(name, ldop, op | 0x21, type) \
+GEN_LDUXF(name, ldop, op | 0x01, type) \
+GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
+
+GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT)
+GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT)
+GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206),
+GEN_HANDLER_E(lfdp, 0x39, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205),
+
+#define GEN_STF(name, stop, opc, type) \
+GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUF(name, stop, opc, type) \
+GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type),
+#define GEN_STUXF(name, stop, opc, type) \
+GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type),
+#define GEN_STXF(name, stop, opc2, opc3, type) \
+GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type),
+#define GEN_STFS(name, stop, op, type) \
+GEN_STF(name, stop, op | 0x20, type) \
+GEN_STUF(name, stop, op | 0x21, type) \
+GEN_STUXF(name, stop, op | 0x01, type) \
+GEN_STXF(name, stop, 0x17, op | 0x00, type)
+
+GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT)
+GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT)
+GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX)
+GEN_HANDLER_E(stfdp, 0x3D, 0xFF, 0xFF, 0x00200003, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205),
+
+GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES),
+GEN_HANDLER(fsqrt, 0x3F, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fsqrts, 0x3B, 0x16, 0xFF, 0x001F07C0, PPC_FLOAT_FSQRT),
+GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT),
+GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT),
+GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205),
+GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT),
+GEN_HANDLER(mffs, 0x3F, 0x07, 0x12, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT),
+GEN_HANDLER(mtfsf, 0x3F, 0x07, 0x16, 0x00000000, PPC_FLOAT),
+GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006e0800, PPC_FLOAT),
diff --git a/target/ppc/translate/spe-impl.inc.c b/target/ppc/translate/spe-impl.inc.c
new file mode 100644
index 0000000000..8c1c16c63e
--- /dev/null
+++ b/target/ppc/translate/spe-impl.inc.c
@@ -0,0 +1,1229 @@
+/*
+ * translate-spe.c
+ *
+ * Freescale SPE extension translation
+ */
+
+/*** SPE extension ***/
+/* Register moves */
+
+static inline void gen_evmra(DisasContext *ctx)
+{
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ /* tmp := rA_lo + rA_hi << 32 */
+ tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+
+ /* spe_acc := tmp */
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_temp_free_i64(tmp);
+
+ /* rD := rA */
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+
+static inline void gen_load_gpr64(TCGv_i64 t, int reg)
+{
+ tcg_gen_concat_tl_i64(t, cpu_gpr[reg], cpu_gprh[reg]);
+}
+
+static inline void gen_store_gpr64(int reg, TCGv_i64 t)
+{
+ tcg_gen_extr_i64_tl(cpu_gpr[reg], cpu_gprh[reg], t);
+}
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if (Rc(ctx->opcode)) \
+ gen_##name1(ctx); \
+ else \
+ gen_##name0(ctx); \
+}
+
+/* Handler for undefined SPE opcodes */
+static inline void gen_speundef(DisasContext *ctx)
+{
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);
+}
+
+/* SPE logic */
+#define GEN_SPEOP_LOGIC2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ tcg_op(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)]); \
+}
+
+GEN_SPEOP_LOGIC2(evand, tcg_gen_and_tl);
+GEN_SPEOP_LOGIC2(evandc, tcg_gen_andc_tl);
+GEN_SPEOP_LOGIC2(evxor, tcg_gen_xor_tl);
+GEN_SPEOP_LOGIC2(evor, tcg_gen_or_tl);
+GEN_SPEOP_LOGIC2(evnor, tcg_gen_nor_tl);
+GEN_SPEOP_LOGIC2(eveqv, tcg_gen_eqv_tl);
+GEN_SPEOP_LOGIC2(evorc, tcg_gen_orc_tl);
+GEN_SPEOP_LOGIC2(evnand, tcg_gen_nand_tl);
+
+/* SPE logic immediate */
+#define GEN_SPEOP_TCG_LOGIC_IMM2(name, tcg_opi) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_opi(t0, t0, rB(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+GEN_SPEOP_TCG_LOGIC_IMM2(evslwi, tcg_gen_shli_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwiu, tcg_gen_shri_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evsrwis, tcg_gen_sari_i32);
+GEN_SPEOP_TCG_LOGIC_IMM2(evrlwi, tcg_gen_rotli_i32);
+
+/* SPE arithmetic */
+#define GEN_SPEOP_ARITH1(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_op(t0, t0); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+
+static inline void gen_op_evabs(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_GE, arg1, 0, l1);
+ tcg_gen_neg_i32(ret, arg1);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_i32(ret, arg1);
+ gen_set_label(l2);
+}
+GEN_SPEOP_ARITH1(evabs, gen_op_evabs);
+GEN_SPEOP_ARITH1(evneg, tcg_gen_neg_i32);
+GEN_SPEOP_ARITH1(evextsb, tcg_gen_ext8s_i32);
+GEN_SPEOP_ARITH1(evextsh, tcg_gen_ext16s_i32);
+static inline void gen_op_evrndw(TCGv_i32 ret, TCGv_i32 arg1)
+{
+ tcg_gen_addi_i32(ret, arg1, 0x8000);
+ tcg_gen_ext16u_i32(ret, ret);
+}
+GEN_SPEOP_ARITH1(evrndw, gen_op_evrndw);
+GEN_SPEOP_ARITH1(evcntlsw, gen_helper_cntlsw32);
+GEN_SPEOP_ARITH1(evcntlzw, gen_helper_cntlzw32);
+
+#define GEN_SPEOP_ARITH2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gprh[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+
+static inline void gen_op_evsrwu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shr_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrwu, gen_op_evsrwu);
+static inline void gen_op_evsrws(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_sar_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evsrws, gen_op_evsrws);
+static inline void gen_op_evslw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ /* No error here: 6 bits are used */
+ tcg_gen_andi_i32(t0, arg2, 0x3F);
+ tcg_gen_brcondi_i32(TCG_COND_GE, t0, 32, l1);
+ tcg_gen_shl_i32(ret, arg1, t0);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_movi_i32(ret, 0);
+ gen_set_label(l2);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evslw, gen_op_evslw);
+static inline void gen_op_evrlw(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t0, arg2, 0x1F);
+ tcg_gen_rotl_i32(ret, arg1, t0);
+ tcg_temp_free_i32(t0);
+}
+GEN_SPEOP_ARITH2(evrlw, gen_op_evrlw);
+static inline void gen_evmergehi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+GEN_SPEOP_ARITH2(evaddw, tcg_gen_add_i32);
+static inline void gen_op_evsubf(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
+{
+ tcg_gen_sub_i32(ret, arg2, arg1);
+}
+GEN_SPEOP_ARITH2(evsubfw, gen_op_evsubf);
+
+/* SPE arithmetic immediate */
+#define GEN_SPEOP_ARITH_IMM2(name, tcg_op) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gprh[rB(ctx->opcode)]); \
+ tcg_op(t0, t0, rA(ctx->opcode)); \
+ tcg_gen_extu_i32_tl(cpu_gprh[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+}
+GEN_SPEOP_ARITH_IMM2(evaddiw, tcg_gen_addi_i32);
+GEN_SPEOP_ARITH_IMM2(evsubifw, tcg_gen_subi_i32);
+
+/* SPE comparison */
+#define GEN_SPEOP_COMP(name, tcg_cond) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ TCGLabel *l1 = gen_new_label(); \
+ TCGLabel *l2 = gen_new_label(); \
+ TCGLabel *l3 = gen_new_label(); \
+ TCGLabel *l4 = gen_new_label(); \
+ \
+ tcg_gen_ext32s_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gpr[rB(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gprh[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); \
+ tcg_gen_ext32s_tl(cpu_gprh[rB(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]); \
+ \
+ tcg_gen_brcond_tl(tcg_cond, cpu_gpr[rA(ctx->opcode)], \
+ cpu_gpr[rB(ctx->opcode)], l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], 0); \
+ tcg_gen_br(l2); \
+ gen_set_label(l1); \
+ tcg_gen_movi_i32(cpu_crf[crfD(ctx->opcode)], \
+ CRF_CL | CRF_CH_OR_CL | CRF_CH_AND_CL); \
+ gen_set_label(l2); \
+ tcg_gen_brcond_tl(tcg_cond, cpu_gprh[rA(ctx->opcode)], \
+ cpu_gprh[rB(ctx->opcode)], l3); \
+ tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ ~(CRF_CH | CRF_CH_AND_CL)); \
+ tcg_gen_br(l4); \
+ gen_set_label(l3); \
+ tcg_gen_ori_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], \
+ CRF_CH | CRF_CH_OR_CL); \
+ gen_set_label(l4); \
+}
+GEN_SPEOP_COMP(evcmpgtu, TCG_COND_GTU);
+GEN_SPEOP_COMP(evcmpgts, TCG_COND_GT);
+GEN_SPEOP_COMP(evcmpltu, TCG_COND_LTU);
+GEN_SPEOP_COMP(evcmplts, TCG_COND_LT);
+GEN_SPEOP_COMP(evcmpeq, TCG_COND_EQ);
+
+/* SPE misc */
+static inline void gen_brinc(DisasContext *ctx)
+{
+ /* Note: brinc is usable even if SPE is disabled */
+ gen_helper_brinc(cpu_gpr[rD(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergelo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+}
+static inline void gen_evmergehilo(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+}
+static inline void gen_evmergelohi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ if (rD(ctx->opcode) == rA(ctx->opcode)) {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_tl(tmp, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], tmp);
+ tcg_temp_free(tmp);
+ } else {
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+static inline void gen_evsplati(DisasContext *ctx)
+{
+ uint64_t imm = ((int32_t)(rA(ctx->opcode) << 27)) >> 27;
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
+}
+static inline void gen_evsplatfi(DisasContext *ctx)
+{
+ uint64_t imm = rA(ctx->opcode) << 27;
+
+ tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], imm);
+ tcg_gen_movi_tl(cpu_gprh[rD(ctx->opcode)], imm);
+}
+
+static inline void gen_evsel(DisasContext *ctx)
+{
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+ TCGLabel *l3 = gen_new_label();
+ TCGLabel *l4 = gen_new_label();
+ TCGv_i32 t0 = tcg_temp_local_new_i32();
+
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 3);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l1);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]);
+ tcg_gen_br(l2);
+ gen_set_label(l1);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rB(ctx->opcode)]);
+ gen_set_label(l2);
+ tcg_gen_andi_i32(t0, cpu_crf[ctx->opcode & 0x07], 1 << 2);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, t0, 0, l3);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_br(l4);
+ gen_set_label(l3);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+ gen_set_label(l4);
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_evsel0(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel1(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel2(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+static void gen_evsel3(DisasContext *ctx)
+{
+ gen_evsel(ctx);
+}
+
+/* Multiply */
+
+static inline void gen_evmwumi(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* t0 := rA; t1 := rB */
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t0, t0);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32u_i64(t1, t1);
+
+ tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
+
+ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_evmwumia(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwumi(ctx); /* rD := rA * rB */
+
+ tmp = tcg_temp_new_i64();
+
+ /* acc := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwumiaa(DisasContext *ctx)
+{
+ TCGv_i64 acc;
+ TCGv_i64 tmp;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ gen_evmwumi(ctx); /* rD := rA * rB */
+
+ acc = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ /* tmp := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+
+ /* Load acc */
+ tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* acc := tmp + acc */
+ tcg_gen_add_i64(acc, acc, tmp);
+
+ /* Store acc */
+ tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* rD := acc */
+ gen_store_gpr64(rD(ctx->opcode), acc);
+
+ tcg_temp_free_i64(acc);
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwsmi(DisasContext *ctx)
+{
+ TCGv_i64 t0, t1;
+
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ /* t0 := rA; t1 := rB */
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t0, t0);
+ tcg_gen_extu_tl_i64(t1, cpu_gpr[rB(ctx->opcode)]);
+ tcg_gen_ext32s_i64(t1, t1);
+
+ tcg_gen_mul_i64(t0, t0, t1); /* t0 := rA * rB */
+
+ gen_store_gpr64(rD(ctx->opcode), t0); /* rD := t0 */
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_evmwsmia(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+
+ gen_evmwsmi(ctx); /* rD := rA * rB */
+
+ tmp = tcg_temp_new_i64();
+
+ /* acc := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+ tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ tcg_temp_free_i64(tmp);
+}
+
+static inline void gen_evmwsmiaa(DisasContext *ctx)
+{
+ TCGv_i64 acc = tcg_temp_new_i64();
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_evmwsmi(ctx); /* rD := rA * rB */
+
+ acc = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ /* tmp := rD */
+ gen_load_gpr64(tmp, rD(ctx->opcode));
+
+ /* Load acc */
+ tcg_gen_ld_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* acc := tmp + acc */
+ tcg_gen_add_i64(acc, acc, tmp);
+
+ /* Store acc */
+ tcg_gen_st_i64(acc, cpu_env, offsetof(CPUPPCState, spe_acc));
+
+ /* rD := acc */
+ gen_store_gpr64(rD(ctx->opcode), acc);
+
+ tcg_temp_free_i64(acc);
+ tcg_temp_free_i64(tmp);
+}
+
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE); ////
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE); //
+GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE); ////
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE); ////
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE); //
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE);
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE); ////
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE); ////
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE); ////
+
+/* SPE load and stores */
+static inline void gen_addr_spe_imm_index(DisasContext *ctx, TCGv EA, int sh)
+{
+ target_ulong uimm = rB(ctx->opcode);
+
+ if (rA(ctx->opcode) == 0) {
+ tcg_gen_movi_tl(EA, uimm << sh);
+ } else {
+ tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
+ if (NARROW_MODE(ctx)) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+ }
+}
+
+static inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_qemu_ld64_i64(ctx, t0, addr);
+ gen_store_gpr64(rD(ctx->opcode), t0);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_op_evldw(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evldh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(t0, t0, 16);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16s(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhou(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evlwhos(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld32u(ctx, t0, addr);
+ tcg_gen_mov_tl(cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evlwhsplat(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
+ tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
+ tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ gen_load_gpr64(t0, rS(ctx->opcode));
+ gen_qemu_st64_i64(ctx, t0, addr);
+ tcg_temp_free_i64(t0);
+}
+
+static inline void gen_op_evstdw(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
+ gen_qemu_st16(ctx, t0, addr);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_op_evstwho(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
+}
+
+static inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
+{
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
+}
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv t0; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ t0 = tcg_temp_new(); \
+ if (Rc(ctx->opcode)) { \
+ gen_addr_spe_imm_index(ctx, t0, sh); \
+ } else { \
+ gen_addr_reg_index(ctx, t0); \
+ } \
+ gen_op_##name(ctx, t0); \
+ tcg_temp_free(t0); \
+}
+
+GEN_SPEOP_LDST(evldd, 0x00, 3);
+GEN_SPEOP_LDST(evldw, 0x01, 3);
+GEN_SPEOP_LDST(evldh, 0x02, 3);
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1);
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1);
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1);
+GEN_SPEOP_LDST(evlwhe, 0x08, 2);
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2);
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2);
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2);
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2);
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3);
+GEN_SPEOP_LDST(evstdw, 0x11, 3);
+GEN_SPEOP_LDST(evstdh, 0x12, 3);
+GEN_SPEOP_LDST(evstwhe, 0x18, 2);
+GEN_SPEOP_LDST(evstwho, 0x1A, 2);
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2);
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2);
+
+/* Multiply and add - TODO */
+#if 0
+GEN_SPE(speundef, evmhessf, 0x01, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);//
+GEN_SPE(speundef, evmhossf, 0x03, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumi, evmhesmi, 0x04, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmf, 0x05, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumi, evmhosmi, 0x06, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmf, 0x07, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfa, 0x11, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfa, 0x13, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumia, evmhesmia, 0x14, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfa, 0x15, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumia, evmhosmia, 0x16, 0x10, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfa, 0x17, 0x10, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(speundef, evmwhssf, 0x03, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumi, speundef, 0x04, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evmwhumi, evmwhsmi, 0x06, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmf, 0x07, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssf, 0x09, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmf, 0x0D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhssfa, 0x13, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumia, speundef, 0x14, 0x11, 0x00000000, 0xFFFFFFFF, PPC_SPE);
+GEN_SPE(evmwhumia, evmwhsmia, 0x16, 0x11, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwhsmfa, 0x17, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfa, 0x19, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfa, 0x1D, 0x11, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evadduiaaw, evaddsiaaw, 0x00, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfusiaaw, evsubfssiaaw, 0x01, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evaddumiaaw, evaddsmiaaw, 0x04, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evsubfumiaaw, evsubfsmiaaw, 0x05, 0x13, 0x0000F800, 0x0000F800, PPC_SPE);
+GEN_SPE(evdivws, evdivwu, 0x06, 0x13, 0x00000000, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusiaaw, evmhessiaaw, 0x00, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfaaw, 0x01, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousiaaw, evmhossiaaw, 0x02, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfaaw, 0x03, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumiaaw, evmhesmiaaw, 0x04, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfaaw, 0x05, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumiaaw, evmhosmiaaw, 0x06, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfaaw, 0x07, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumiaa, evmhegsmiaa, 0x14, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfaa, 0x15, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhogumiaa, evmhogsmiaa, 0x16, 0x14, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfaa, 0x17, 0x14, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusiaaw, evmwlssiaaw, 0x00, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumiaaw, evmwlsmiaaw, 0x04, 0x15, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfaa, 0x09, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfaa, 0x0D, 0x15, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmheusianw, evmhessianw, 0x00, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhessfanw, 0x01, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhousianw, evmhossianw, 0x02, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhossfanw, 0x03, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmheumianw, evmhesmianw, 0x04, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhesmfanw, 0x05, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhoumianw, evmhosmianw, 0x06, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhosmfanw, 0x07, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhegumian, evmhegsmian, 0x14, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhegsmfan, 0x15, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmhigumian, evmhigsmian, 0x16, 0x16, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmhogsmfan, 0x17, 0x16, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+
+GEN_SPE(evmwlusianw, evmwlssianw, 0x00, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(evmwlumianw, evmwlsmianw, 0x04, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwssfan, 0x09, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+GEN_SPE(evmwumian, evmwsmian, 0x0C, 0x17, 0x00000000, 0x00000000, PPC_SPE);
+GEN_SPE(speundef, evmwsmfan, 0x0D, 0x17, 0xFFFFFFFF, 0x00000000, PPC_SPE);
+#endif
+
+/*** SPE floating-point extension ***/
+#define GEN_SPEFPUOP_CONV_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ tcg_temp_free_i32(t0); \
+}
+#define GEN_SPEFPUOP_CONV_32_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t1, cpu_env, t0); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i32 t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_CONV_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rB(ctx->opcode)); \
+ gen_helper_##name(t0, cpu_env, t0); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+}
+#define GEN_SPEFPUOP_ARITH2_32_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
+ tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_ARITH2_64_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(t0, cpu_env, t0, t1); \
+ gen_store_gpr64(rD(ctx->opcode), t0); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+#define GEN_SPEFPUOP_COMP_32(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i32 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i32(); \
+ t1 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); \
+ tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_i32(t1); \
+}
+#define GEN_SPEFPUOP_COMP_64(name) \
+static inline void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0, t1; \
+ if (unlikely(!ctx->spe_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_SPEU); \
+ return; \
+ } \
+ t0 = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ gen_load_gpr64(t0, rA(ctx->opcode)); \
+ gen_load_gpr64(t1, rB(ctx->opcode)); \
+ gen_helper_##name(cpu_crf[crfD(ctx->opcode)], cpu_env, t0, t1); \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+}
+
+/* Single precision floating-point vectors operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(evfsadd);
+GEN_SPEFPUOP_ARITH2_64_64(evfssub);
+GEN_SPEFPUOP_ARITH2_64_64(evfsmul);
+GEN_SPEFPUOP_ARITH2_64_64(evfsdiv);
+static inline void gen_evfsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ ~0x80000000);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ ~0x80000000);
+}
+static inline void gen_evfsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_evfsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)],
+ 0x80000000);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_64(evfscfui);
+GEN_SPEFPUOP_CONV_64_64(evfscfsi);
+GEN_SPEFPUOP_CONV_64_64(evfscfuf);
+GEN_SPEFPUOP_CONV_64_64(evfscfsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctui);
+GEN_SPEFPUOP_CONV_64_64(evfsctsi);
+GEN_SPEFPUOP_CONV_64_64(evfsctuf);
+GEN_SPEFPUOP_CONV_64_64(evfsctsf);
+GEN_SPEFPUOP_CONV_64_64(evfsctuiz);
+GEN_SPEFPUOP_CONV_64_64(evfsctsiz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(evfscmpgt);
+GEN_SPEFPUOP_COMP_64(evfscmplt);
+GEN_SPEFPUOP_COMP_64(evfscmpeq);
+GEN_SPEFPUOP_COMP_64(evfststgt);
+GEN_SPEFPUOP_COMP_64(evfststlt);
+GEN_SPEFPUOP_COMP_64(evfststeq);
+
+/* Opcodes definitions */
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+
+/* Single precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_32_32(efsadd);
+GEN_SPEFPUOP_ARITH2_32_32(efssub);
+GEN_SPEFPUOP_ARITH2_32_32(efsmul);
+GEN_SPEFPUOP_ARITH2_32_32(efsdiv);
+static inline void gen_efsabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL);
+}
+static inline void gen_efsnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+}
+static inline void gen_efsneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_32_32(efscfui);
+GEN_SPEFPUOP_CONV_32_32(efscfsi);
+GEN_SPEFPUOP_CONV_32_32(efscfuf);
+GEN_SPEFPUOP_CONV_32_32(efscfsf);
+GEN_SPEFPUOP_CONV_32_32(efsctui);
+GEN_SPEFPUOP_CONV_32_32(efsctsi);
+GEN_SPEFPUOP_CONV_32_32(efsctuf);
+GEN_SPEFPUOP_CONV_32_32(efsctsf);
+GEN_SPEFPUOP_CONV_32_32(efsctuiz);
+GEN_SPEFPUOP_CONV_32_32(efsctsiz);
+GEN_SPEFPUOP_CONV_32_64(efscfd);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_32(efscmpgt);
+GEN_SPEFPUOP_COMP_32(efscmplt);
+GEN_SPEFPUOP_COMP_32(efscmpeq);
+GEN_SPEFPUOP_COMP_32(efststgt);
+GEN_SPEFPUOP_COMP_32(efststlt);
+GEN_SPEFPUOP_COMP_32(efststeq);
+
+/* Opcodes definitions */
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE); //
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE); //
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE); //
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE); //
+
+/* Double precision floating-point operations */
+/* Arithmetic */
+GEN_SPEFPUOP_ARITH2_64_64(efdadd);
+GEN_SPEFPUOP_ARITH2_64_64(efdsub);
+GEN_SPEFPUOP_ARITH2_64_64(efdmul);
+GEN_SPEFPUOP_ARITH2_64_64(efddiv);
+static inline void gen_efdabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_andi_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ ~0x80000000);
+}
+static inline void gen_efdnabs(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_ori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+static inline void gen_efdneg(DisasContext *ctx)
+{
+ if (unlikely(!ctx->spe_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_SPEU);
+ return;
+ }
+ tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]);
+ tcg_gen_xori_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rA(ctx->opcode)],
+ 0x80000000);
+}
+
+/* Conversion */
+GEN_SPEFPUOP_CONV_64_32(efdcfui);
+GEN_SPEFPUOP_CONV_64_32(efdcfsi);
+GEN_SPEFPUOP_CONV_64_32(efdcfuf);
+GEN_SPEFPUOP_CONV_64_32(efdcfsf);
+GEN_SPEFPUOP_CONV_32_64(efdctui);
+GEN_SPEFPUOP_CONV_32_64(efdctsi);
+GEN_SPEFPUOP_CONV_32_64(efdctuf);
+GEN_SPEFPUOP_CONV_32_64(efdctsf);
+GEN_SPEFPUOP_CONV_32_64(efdctuiz);
+GEN_SPEFPUOP_CONV_32_64(efdctsiz);
+GEN_SPEFPUOP_CONV_64_32(efdcfs);
+GEN_SPEFPUOP_CONV_64_64(efdcfuid);
+GEN_SPEFPUOP_CONV_64_64(efdcfsid);
+GEN_SPEFPUOP_CONV_64_64(efdctuidz);
+GEN_SPEFPUOP_CONV_64_64(efdctsidz);
+
+/* Comparison */
+GEN_SPEFPUOP_COMP_64(efdcmpgt);
+GEN_SPEFPUOP_COMP_64(efdcmplt);
+GEN_SPEFPUOP_COMP_64(efdcmpeq);
+GEN_SPEFPUOP_COMP_64(efdtstgt);
+GEN_SPEFPUOP_COMP_64(efdtstlt);
+GEN_SPEFPUOP_COMP_64(efdtsteq);
+
+/* Opcodes definitions */
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE); //
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE); //
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE); //
+
+#undef GEN_SPE
+#undef GEN_SPEOP_LDST
diff --git a/target/ppc/translate/spe-ops.inc.c b/target/ppc/translate/spe-ops.inc.c
new file mode 100644
index 0000000000..7efe8b8746
--- /dev/null
+++ b/target/ppc/translate/spe-ops.inc.c
@@ -0,0 +1,105 @@
+GEN_HANDLER2(evsel0, "evsel", 0x04, 0x1c, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel1, "evsel", 0x04, 0x1d, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel2, "evsel", 0x04, 0x1e, 0x09, 0x00000000, PPC_SPE),
+GEN_HANDLER2(evsel3, "evsel", 0x04, 0x1f, 0x09, 0x00000000, PPC_SPE),
+
+#define GEN_SPE(name0, name1, opc2, opc3, inval0, inval1, type) \
+ GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, PPC_NONE)
+GEN_SPE(evaddw, speundef, 0x00, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evaddiw, speundef, 0x01, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsubfw, speundef, 0x02, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsubifw, speundef, 0x03, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evabs, evneg, 0x04, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evextsb, evextsh, 0x05, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evrndw, evcntlzw, 0x06, 0x08, 0x0000F800, 0x0000F800, PPC_SPE),
+GEN_SPE(evcntlsw, brinc, 0x07, 0x08, 0x0000F800, 0x00000000, PPC_SPE),
+GEN_SPE(evmra, speundef, 0x02, 0x13, 0x0000F800, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(speundef, evand, 0x08, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
+GEN_SPE(evandc, speundef, 0x09, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evxor, evor, 0x0B, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evnor, eveqv, 0x0C, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumi, evmwsmi, 0x0C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumia, evmwsmia, 0x1C, 0x11, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmwumiaa, evmwsmiaa, 0x0C, 0x15, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(speundef, evorc, 0x0D, 0x08, 0xFFFFFFFF, 0x00000000, PPC_SPE),
+GEN_SPE(evnand, speundef, 0x0F, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evsrwu, evsrws, 0x10, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evsrwiu, evsrwis, 0x11, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evslw, speundef, 0x12, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evslwi, speundef, 0x13, 0x08, 0x00000000, 0xFFFFFFFF, PPC_SPE),
+GEN_SPE(evrlw, evsplati, 0x14, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
+GEN_SPE(evrlwi, evsplatfi, 0x15, 0x08, 0x00000000, 0x0000F800, PPC_SPE),
+GEN_SPE(evmergehi, evmergelo, 0x16, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evmergehilo, evmergelohi, 0x17, 0x08, 0x00000000, 0x00000000, PPC_SPE),
+GEN_SPE(evcmpgtu, evcmpgts, 0x18, 0x08, 0x00600000, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpltu, evcmplts, 0x19, 0x08, 0x00600000, 0x00600000, PPC_SPE),
+GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, 0xFFFFFFFF, PPC_SPE),
+
+GEN_SPE(evfsadd, evfssub, 0x00, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfsabs, evfsnabs, 0x02, 0x0A, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(evfsneg, speundef, 0x03, 0x0A, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfsmul, evfsdiv, 0x04, 0x0A, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpgt, evfscmplt, 0x06, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfscmpeq, speundef, 0x07, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfscfui, evfscfsi, 0x08, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfscfuf, evfscfsf, 0x09, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctui, evfsctsi, 0x0A, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuf, evfsctsf, 0x0B, 0x0A, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(evfsctuiz, speundef, 0x0C, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfsctsiz, speundef, 0x0D, 0x0A, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(evfststgt, evfststlt, 0x0E, 0x0A, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(evfststeq, speundef, 0x0F, 0x0A, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+
+GEN_SPE(efsadd, efssub, 0x00, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efsabs, efsnabs, 0x02, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_SINGLE),
+GEN_SPE(efsneg, speundef, 0x03, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efsmul, efsdiv, 0x04, 0x0B, 0x00000000, 0x00000000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpgt, efscmplt, 0x06, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efscmpeq, efscfd, 0x07, 0x0B, 0x00600000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfui, efscfsi, 0x08, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efscfuf, efscfsf, 0x09, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctui, efsctsi, 0x0A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuf, efsctsf, 0x0B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_SINGLE),
+GEN_SPE(efsctuiz, speundef, 0x0C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efsctsiz, speundef, 0x0D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+GEN_SPE(efststgt, efststlt, 0x0E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_SINGLE),
+GEN_SPE(efststeq, speundef, 0x0F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_SINGLE),
+
+GEN_SPE(efdadd, efdsub, 0x10, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuid, efdcfsid, 0x11, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdabs, efdnabs, 0x12, 0x0B, 0x0000F800, 0x0000F800, PPC_SPE_DOUBLE),
+GEN_SPE(efdneg, speundef, 0x13, 0x0B, 0x0000F800, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdmul, efddiv, 0x14, 0x0B, 0x00000000, 0x00000000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuidz, efdctsidz, 0x15, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpgt, efdcmplt, 0x16, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcmpeq, efdcfs, 0x17, 0x0B, 0x00600000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfui, efdcfsi, 0x18, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdcfuf, efdcfsf, 0x19, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctui, efdctsi, 0x1A, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuf, efdctsf, 0x1B, 0x0B, 0x00180000, 0x00180000, PPC_SPE_DOUBLE),
+GEN_SPE(efdctuiz, speundef, 0x1C, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdctsiz, speundef, 0x1D, 0x0B, 0x00180000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+GEN_SPE(efdtstgt, efdtstlt, 0x1E, 0x0B, 0x00600000, 0x00600000, PPC_SPE_DOUBLE),
+GEN_SPE(efdtsteq, speundef, 0x1F, 0x0B, 0x00600000, 0xFFFFFFFF, PPC_SPE_DOUBLE),
+
+#define GEN_SPEOP_LDST(name, opc2, sh) \
+GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE)
+GEN_SPEOP_LDST(evldd, 0x00, 3),
+GEN_SPEOP_LDST(evldw, 0x01, 3),
+GEN_SPEOP_LDST(evldh, 0x02, 3),
+GEN_SPEOP_LDST(evlhhesplat, 0x04, 1),
+GEN_SPEOP_LDST(evlhhousplat, 0x06, 1),
+GEN_SPEOP_LDST(evlhhossplat, 0x07, 1),
+GEN_SPEOP_LDST(evlwhe, 0x08, 2),
+GEN_SPEOP_LDST(evlwhou, 0x0A, 2),
+GEN_SPEOP_LDST(evlwhos, 0x0B, 2),
+GEN_SPEOP_LDST(evlwwsplat, 0x0C, 2),
+GEN_SPEOP_LDST(evlwhsplat, 0x0E, 2),
+
+GEN_SPEOP_LDST(evstdd, 0x10, 3),
+GEN_SPEOP_LDST(evstdw, 0x11, 3),
+GEN_SPEOP_LDST(evstdh, 0x12, 3),
+GEN_SPEOP_LDST(evstwhe, 0x18, 2),
+GEN_SPEOP_LDST(evstwho, 0x1A, 2),
+GEN_SPEOP_LDST(evstwwe, 0x1C, 2),
+GEN_SPEOP_LDST(evstwwo, 0x1E, 2),
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
new file mode 100644
index 0000000000..7143eb3a39
--- /dev/null
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -0,0 +1,1113 @@
+/*
+ * translate/vmx-impl.c
+ *
+ * Altivec/VMX translation
+ */
+
+/*** Altivec vector extension ***/
+/* Altivec registers moves */
+
+static inline TCGv_ptr gen_avr_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, offsetof(CPUPPCState, avr[reg]));
+ return r;
+}
+
+#define GEN_VR_LDX(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ /* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
+ necessary 64-bit byteswap already. */ \
+ if (ctx->le_mode) { \
+ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ } else { \
+ gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ } \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_VR_STX(name, opc2, opc3) \
+static void gen_st##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ tcg_gen_andi_tl(EA, EA, ~0xf); \
+ /* We only need to swap high and low halves. gen_qemu_st64_i64 does \
+ necessary 64-bit byteswap already. */ \
+ if (ctx->le_mode) { \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ } else { \
+ gen_qemu_st64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
+ tcg_gen_addi_tl(EA, EA, 8); \
+ gen_qemu_st64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
+ } \
+ tcg_temp_free(EA); \
+}
+
+#define GEN_VR_LVE(name, opc2, opc3, size) \
+static void gen_lve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ if (size > 1) { \
+ tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
+ } \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_lve##name(cpu_env, rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+#define GEN_VR_STVE(name, opc2, opc3, size) \
+static void gen_stve##name(DisasContext *ctx) \
+ { \
+ TCGv EA; \
+ TCGv_ptr rs; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ if (size > 1) { \
+ tcg_gen_andi_tl(EA, EA, ~(size - 1)); \
+ } \
+ rs = gen_avr_ptr(rS(ctx->opcode)); \
+ gen_helper_stve##name(cpu_env, rs, EA); \
+ tcg_temp_free(EA); \
+ tcg_temp_free_ptr(rs); \
+ }
+
+GEN_VR_LDX(lvx, 0x07, 0x03);
+/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */
+GEN_VR_LDX(lvxl, 0x07, 0x0B);
+
+GEN_VR_LVE(bx, 0x07, 0x00, 1);
+GEN_VR_LVE(hx, 0x07, 0x01, 2);
+GEN_VR_LVE(wx, 0x07, 0x02, 4);
+
+GEN_VR_STX(svx, 0x07, 0x07);
+/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */
+GEN_VR_STX(svxl, 0x07, 0x0F);
+
+GEN_VR_STVE(bx, 0x07, 0x04, 1);
+GEN_VR_STVE(hx, 0x07, 0x05, 2);
+GEN_VR_STVE(wx, 0x07, 0x06, 4);
+
+static void gen_lvsl(DisasContext *ctx)
+{
+ TCGv_ptr rd;
+ TCGv EA;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_lvsl(rd, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_lvsr(DisasContext *ctx)
+{
+ TCGv_ptr rd;
+ TCGv EA;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_lvsr(rd, EA);
+ tcg_temp_free(EA);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_mfvscr(DisasContext *ctx)
+{
+ TCGv_i32 t;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0);
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUPPCState, vscr));
+ tcg_gen_extu_i32_i64(cpu_avrl[rD(ctx->opcode)], t);
+ tcg_temp_free_i32(t);
+}
+
+static void gen_mtvscr(DisasContext *ctx)
+{
+ TCGv_ptr p;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ p = gen_avr_ptr(rB(ctx->opcode));
+ gen_helper_mtvscr(cpu_env, p);
+ tcg_temp_free_ptr(p);
+}
+
+#define GEN_VX_VMUL10(name, add_cin, ret_carry) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_i64 t0 = tcg_temp_new_i64(); \
+ TCGv_i64 t1 = tcg_temp_new_i64(); \
+ TCGv_i64 t2 = tcg_temp_new_i64(); \
+ TCGv_i64 ten, z; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ ten = tcg_const_i64(10); \
+ z = tcg_const_i64(0); \
+ \
+ if (add_cin) { \
+ tcg_gen_mulu2_i64(t0, t1, cpu_avrl[rA(ctx->opcode)], ten); \
+ tcg_gen_andi_i64(t2, cpu_avrl[rB(ctx->opcode)], 0xF); \
+ tcg_gen_add2_i64(cpu_avrl[rD(ctx->opcode)], t2, t0, t1, t2, z); \
+ } else { \
+ tcg_gen_mulu2_i64(cpu_avrl[rD(ctx->opcode)], t2, \
+ cpu_avrl[rA(ctx->opcode)], ten); \
+ } \
+ \
+ if (ret_carry) { \
+ tcg_gen_mulu2_i64(t0, t1, cpu_avrh[rA(ctx->opcode)], ten); \
+ tcg_gen_add2_i64(t0, cpu_avrl[rD(ctx->opcode)], t0, t1, t2, z); \
+ tcg_gen_movi_i64(cpu_avrh[rD(ctx->opcode)], 0); \
+ } else { \
+ tcg_gen_mul_i64(t0, cpu_avrh[rA(ctx->opcode)], ten); \
+ tcg_gen_add_i64(cpu_avrh[rD(ctx->opcode)], t0, t2); \
+ } \
+ \
+ tcg_temp_free_i64(t0); \
+ tcg_temp_free_i64(t1); \
+ tcg_temp_free_i64(t2); \
+ tcg_temp_free_i64(ten); \
+ tcg_temp_free_i64(z); \
+} \
+
+GEN_VX_VMUL10(vmul10uq, 0, 0);
+GEN_VX_VMUL10(vmul10euq, 1, 0);
+GEN_VX_VMUL10(vmul10cuq, 0, 1);
+GEN_VX_VMUL10(vmul10ecuq, 1, 1);
+
+/* Logical operations */
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ tcg_op(cpu_avrh[rD(ctx->opcode)], cpu_avrh[rA(ctx->opcode)], cpu_avrh[rB(ctx->opcode)]); \
+ tcg_op(cpu_avrl[rD(ctx->opcode)], cpu_avrl[rA(ctx->opcode)], cpu_avrl[rB(ctx->opcode)]); \
+}
+
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16);
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17);
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18);
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19);
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20);
+GEN_VX_LOGICAL(veqv, tcg_gen_eqv_i64, 2, 26);
+GEN_VX_LOGICAL(vnand, tcg_gen_nand_i64, 2, 22);
+GEN_VX_LOGICAL(vorc, tcg_gen_orc_i64, 2, 21);
+
+#define GEN_VXFORM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+#define GEN_VXFORM_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+}
+
+#define GEN_VXFORM3(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, ra, rb, rc); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+}
+
+/*
+ * Support for Altivec instruction pairs that use bit 31 (Rc) as
+ * an opcode bit. In general, these pairs come from different
+ * versions of the ISA, so we must also support a pair of flags for
+ * each instruction.
+ */
+#define GEN_VXFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ gen_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+/* Adds support to provide invalid mask */
+#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0, \
+ name1, flg1, flg2_1, inval1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0)) && \
+ !(ctx->opcode & inval0)) { \
+ gen_##name0(ctx); \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1)) && \
+ !(ctx->opcode & inval1)) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+GEN_VXFORM(vaddubm, 0, 0);
+GEN_VXFORM_DUAL_EXT(vaddubm, PPC_ALTIVEC, PPC_NONE, 0, \
+ vmul10cuq, PPC_NONE, PPC2_ISA300, 0x0000F800)
+GEN_VXFORM(vadduhm, 0, 1);
+GEN_VXFORM_DUAL(vadduhm, PPC_ALTIVEC, PPC_NONE, \
+ vmul10ecuq, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vadduwm, 0, 2);
+GEN_VXFORM(vaddudm, 0, 3);
+GEN_VXFORM(vsububm, 0, 16);
+GEN_VXFORM(vsubuhm, 0, 17);
+GEN_VXFORM(vsubuwm, 0, 18);
+GEN_VXFORM(vsubudm, 0, 19);
+GEN_VXFORM(vmaxub, 1, 0);
+GEN_VXFORM(vmaxuh, 1, 1);
+GEN_VXFORM(vmaxuw, 1, 2);
+GEN_VXFORM(vmaxud, 1, 3);
+GEN_VXFORM(vmaxsb, 1, 4);
+GEN_VXFORM(vmaxsh, 1, 5);
+GEN_VXFORM(vmaxsw, 1, 6);
+GEN_VXFORM(vmaxsd, 1, 7);
+GEN_VXFORM(vminub, 1, 8);
+GEN_VXFORM(vminuh, 1, 9);
+GEN_VXFORM(vminuw, 1, 10);
+GEN_VXFORM(vminud, 1, 11);
+GEN_VXFORM(vminsb, 1, 12);
+GEN_VXFORM(vminsh, 1, 13);
+GEN_VXFORM(vminsw, 1, 14);
+GEN_VXFORM(vminsd, 1, 15);
+GEN_VXFORM(vavgub, 1, 16);
+GEN_VXFORM(vabsdub, 1, 16);
+GEN_VXFORM_DUAL(vavgub, PPC_ALTIVEC, PPC_NONE, \
+ vabsdub, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavguh, 1, 17);
+GEN_VXFORM(vabsduh, 1, 17);
+GEN_VXFORM_DUAL(vavguh, PPC_ALTIVEC, PPC_NONE, \
+ vabsduh, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavguw, 1, 18);
+GEN_VXFORM(vabsduw, 1, 18);
+GEN_VXFORM_DUAL(vavguw, PPC_ALTIVEC, PPC_NONE, \
+ vabsduw, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vavgsb, 1, 20);
+GEN_VXFORM(vavgsh, 1, 21);
+GEN_VXFORM(vavgsw, 1, 22);
+GEN_VXFORM(vmrghb, 6, 0);
+GEN_VXFORM(vmrghh, 6, 1);
+GEN_VXFORM(vmrghw, 6, 2);
+GEN_VXFORM(vmrglb, 6, 4);
+GEN_VXFORM(vmrglh, 6, 5);
+GEN_VXFORM(vmrglw, 6, 6);
+
+static void gen_vmrgew(DisasContext *ctx)
+{
+ TCGv_i64 tmp;
+ int VT, VA, VB;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ VT = rD(ctx->opcode);
+ VA = rA(ctx->opcode);
+ VB = rB(ctx->opcode);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_shri_i64(tmp, cpu_avrh[VB], 32);
+ tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VA], tmp, 0, 32);
+ tcg_gen_shri_i64(tmp, cpu_avrl[VB], 32);
+ tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VA], tmp, 0, 32);
+ tcg_temp_free_i64(tmp);
+}
+
+static void gen_vmrgow(DisasContext *ctx)
+{
+ int VT, VA, VB;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ VT = rD(ctx->opcode);
+ VA = rA(ctx->opcode);
+ VB = rB(ctx->opcode);
+
+ tcg_gen_deposit_i64(cpu_avrh[VT], cpu_avrh[VB], cpu_avrh[VA], 32, 32);
+ tcg_gen_deposit_i64(cpu_avrl[VT], cpu_avrl[VB], cpu_avrl[VA], 32, 32);
+}
+
+GEN_VXFORM(vmuloub, 4, 0);
+GEN_VXFORM(vmulouh, 4, 1);
+GEN_VXFORM(vmulouw, 4, 2);
+GEN_VXFORM(vmuluwm, 4, 2);
+GEN_VXFORM_DUAL(vmulouw, PPC_ALTIVEC, PPC_NONE,
+ vmuluwm, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vmulosb, 4, 4);
+GEN_VXFORM(vmulosh, 4, 5);
+GEN_VXFORM(vmulosw, 4, 6);
+GEN_VXFORM(vmuleub, 4, 8);
+GEN_VXFORM(vmuleuh, 4, 9);
+GEN_VXFORM(vmuleuw, 4, 10);
+GEN_VXFORM(vmulesb, 4, 12);
+GEN_VXFORM(vmulesh, 4, 13);
+GEN_VXFORM(vmulesw, 4, 14);
+GEN_VXFORM(vslb, 2, 4);
+GEN_VXFORM(vslh, 2, 5);
+GEN_VXFORM(vslw, 2, 6);
+GEN_VXFORM(vrlwnm, 2, 6);
+GEN_VXFORM_DUAL(vslw, PPC_ALTIVEC, PPC_NONE, \
+ vrlwnm, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vsld, 2, 23);
+GEN_VXFORM(vsrb, 2, 8);
+GEN_VXFORM(vsrh, 2, 9);
+GEN_VXFORM(vsrw, 2, 10);
+GEN_VXFORM(vsrd, 2, 27);
+GEN_VXFORM(vsrab, 2, 12);
+GEN_VXFORM(vsrah, 2, 13);
+GEN_VXFORM(vsraw, 2, 14);
+GEN_VXFORM(vsrad, 2, 15);
+GEN_VXFORM(vsrv, 2, 28);
+GEN_VXFORM(vslv, 2, 29);
+GEN_VXFORM(vslo, 6, 16);
+GEN_VXFORM(vsro, 6, 17);
+GEN_VXFORM(vaddcuw, 0, 6);
+GEN_VXFORM(vsubcuw, 0, 22);
+GEN_VXFORM_ENV(vaddubs, 0, 8);
+GEN_VXFORM_DUAL_EXT(vaddubs, PPC_ALTIVEC, PPC_NONE, 0, \
+ vmul10uq, PPC_NONE, PPC2_ISA300, 0x0000F800)
+GEN_VXFORM_ENV(vadduhs, 0, 9);
+GEN_VXFORM_DUAL(vadduhs, PPC_ALTIVEC, PPC_NONE, \
+ vmul10euq, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_ENV(vadduws, 0, 10);
+GEN_VXFORM_ENV(vaddsbs, 0, 12);
+GEN_VXFORM_ENV(vaddshs, 0, 13);
+GEN_VXFORM_ENV(vaddsws, 0, 14);
+GEN_VXFORM_ENV(vsububs, 0, 24);
+GEN_VXFORM_ENV(vsubuhs, 0, 25);
+GEN_VXFORM_ENV(vsubuws, 0, 26);
+GEN_VXFORM_ENV(vsubsbs, 0, 28);
+GEN_VXFORM_ENV(vsubshs, 0, 29);
+GEN_VXFORM_ENV(vsubsws, 0, 30);
+GEN_VXFORM(vadduqm, 0, 4);
+GEN_VXFORM(vaddcuq, 0, 5);
+GEN_VXFORM3(vaddeuqm, 30, 0);
+GEN_VXFORM3(vaddecuq, 30, 0);
+GEN_VXFORM_DUAL(vaddeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
+ vaddecuq, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vsubuqm, 0, 20);
+GEN_VXFORM(vsubcuq, 0, 21);
+GEN_VXFORM3(vsubeuqm, 31, 0);
+GEN_VXFORM3(vsubecuq, 31, 0);
+GEN_VXFORM_DUAL(vsubeuqm, PPC_NONE, PPC2_ALTIVEC_207, \
+ vsubecuq, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vrlb, 2, 0);
+GEN_VXFORM(vrlh, 2, 1);
+GEN_VXFORM(vrlw, 2, 2);
+GEN_VXFORM(vrlwmi, 2, 2);
+GEN_VXFORM_DUAL(vrlw, PPC_ALTIVEC, PPC_NONE, \
+ vrlwmi, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vrld, 2, 3);
+GEN_VXFORM(vrldmi, 2, 3);
+GEN_VXFORM_DUAL(vrld, PPC_NONE, PPC2_ALTIVEC_207, \
+ vrldmi, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vsl, 2, 7);
+GEN_VXFORM(vrldnm, 2, 7);
+GEN_VXFORM_DUAL(vsl, PPC_ALTIVEC, PPC_NONE, \
+ vrldnm, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM(vsr, 2, 11);
+GEN_VXFORM_ENV(vpkuhum, 7, 0);
+GEN_VXFORM_ENV(vpkuwum, 7, 1);
+GEN_VXFORM_ENV(vpkudum, 7, 17);
+GEN_VXFORM_ENV(vpkuhus, 7, 2);
+GEN_VXFORM_ENV(vpkuwus, 7, 3);
+GEN_VXFORM_ENV(vpkudus, 7, 19);
+GEN_VXFORM_ENV(vpkshus, 7, 4);
+GEN_VXFORM_ENV(vpkswus, 7, 5);
+GEN_VXFORM_ENV(vpksdus, 7, 21);
+GEN_VXFORM_ENV(vpkshss, 7, 6);
+GEN_VXFORM_ENV(vpkswss, 7, 7);
+GEN_VXFORM_ENV(vpksdss, 7, 23);
+GEN_VXFORM(vpkpx, 7, 12);
+GEN_VXFORM_ENV(vsum4ubs, 4, 24);
+GEN_VXFORM_ENV(vsum4sbs, 4, 28);
+GEN_VXFORM_ENV(vsum4shs, 4, 25);
+GEN_VXFORM_ENV(vsum2sws, 4, 26);
+GEN_VXFORM_ENV(vsumsws, 4, 30);
+GEN_VXFORM_ENV(vaddfp, 5, 0);
+GEN_VXFORM_ENV(vsubfp, 5, 1);
+GEN_VXFORM_ENV(vmaxfp, 5, 16);
+GEN_VXFORM_ENV(vminfp, 5, 17);
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##opname(cpu_env, rd, ra, rb); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+/*
+ * Support for Altivec instructions that use bit 31 (Rc) as an opcode
+ * bit but also use bit 21 as an actual Rc bit. In general, thse pairs
+ * come from different versions of the ISA, so we must also support a
+ * pair of flags for each instruction.
+ */
+#define GEN_VXRFORM_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+ if ((Rc(ctx->opcode) == 0) && \
+ ((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+ if (Rc21(ctx->opcode) == 0) { \
+ gen_##name0(ctx); \
+ } else { \
+ gen_##name0##_(ctx); \
+ } \
+ } else if ((Rc(ctx->opcode) == 1) && \
+ ((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+ if (Rc21(ctx->opcode) == 0) { \
+ gen_##name1(ctx); \
+ } else { \
+ gen_##name1##_(ctx); \
+ } \
+ } else { \
+ gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \
+ } \
+}
+
+GEN_VXRFORM(vcmpequb, 3, 0)
+GEN_VXRFORM(vcmpequh, 3, 1)
+GEN_VXRFORM(vcmpequw, 3, 2)
+GEN_VXRFORM(vcmpequd, 3, 3)
+GEN_VXRFORM(vcmpnezb, 3, 4)
+GEN_VXRFORM(vcmpnezh, 3, 5)
+GEN_VXRFORM(vcmpnezw, 3, 6)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtsd, 3, 15)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM(vcmpgtud, 3, 11)
+GEN_VXRFORM(vcmpeqfp, 3, 3)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM(vcmpgtfp, 3, 11)
+GEN_VXRFORM(vcmpbfp, 3, 15)
+GEN_VXRFORM(vcmpneb, 3, 0)
+GEN_VXRFORM(vcmpneh, 3, 1)
+GEN_VXRFORM(vcmpnew, 3, 2)
+
+GEN_VXRFORM_DUAL(vcmpequb, PPC_ALTIVEC, PPC_NONE, \
+ vcmpneb, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpequh, PPC_ALTIVEC, PPC_NONE, \
+ vcmpneh, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpequw, PPC_ALTIVEC, PPC_NONE, \
+ vcmpnew, PPC_NONE, PPC2_ISA300)
+GEN_VXRFORM_DUAL(vcmpeqfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpequd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpgtsd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
+ vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rd; \
+ TCGv_i32 simm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ simm = tcg_const_i32(SIMM5(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, simm); \
+ tcg_temp_free_i32(simm); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_SIMM(vspltisb, 6, 12);
+GEN_VXFORM_SIMM(vspltish, 6, 13);
+GEN_VXFORM_SIMM(vspltisw, 6, 14);
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_2(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_NOA_3(name, opc2, opc3, opc4) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ gen_helper_##name(cpu_gpr[rD(ctx->opcode)], rb); \
+ tcg_temp_free_ptr(rb); \
+ }
+GEN_VXFORM_NOA(vupkhsb, 7, 8);
+GEN_VXFORM_NOA(vupkhsh, 7, 9);
+GEN_VXFORM_NOA(vupkhsw, 7, 25);
+GEN_VXFORM_NOA(vupklsb, 7, 10);
+GEN_VXFORM_NOA(vupklsh, 7, 11);
+GEN_VXFORM_NOA(vupklsw, 7, 27);
+GEN_VXFORM_NOA(vupkhpx, 7, 13);
+GEN_VXFORM_NOA(vupklpx, 7, 15);
+GEN_VXFORM_NOA_ENV(vrefp, 5, 4);
+GEN_VXFORM_NOA_ENV(vrsqrtefp, 5, 5);
+GEN_VXFORM_NOA_ENV(vexptefp, 5, 6);
+GEN_VXFORM_NOA_ENV(vlogefp, 5, 7);
+GEN_VXFORM_NOA_ENV(vrfim, 5, 11);
+GEN_VXFORM_NOA_ENV(vrfin, 5, 8);
+GEN_VXFORM_NOA_ENV(vrfip, 5, 10);
+GEN_VXFORM_NOA_ENV(vrfiz, 5, 9);
+GEN_VXFORM_NOA(vprtybw, 1, 24);
+GEN_VXFORM_NOA(vprtybd, 1, 24);
+GEN_VXFORM_NOA(vprtybq, 1, 24);
+
+#define GEN_VXFORM_SIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rd; \
+ TCGv_i32 simm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ simm = tcg_const_i32(SIMM5(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, simm); \
+ tcg_temp_free_i32(simm); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ TCGv_i32 uimm; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name (rd, rb, uimm); \
+ tcg_temp_free_i32(uimm); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM_ENV(name, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ TCGv_i32 uimm; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ uimm = tcg_const_i32(UIMM5(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(cpu_env, rd, rb, uimm); \
+ tcg_temp_free_i32(uimm); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+#define GEN_VXFORM_UIMM_SPLAT(name, opc2, opc3, splat_max) \
+static void glue(gen_, name)(DisasContext *ctx) \
+ { \
+ TCGv_ptr rb, rd; \
+ uint8_t uimm = UIMM4(ctx->opcode); \
+ TCGv_i32 t0 = tcg_temp_new_i32(); \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ if (uimm > splat_max) { \
+ uimm = 0; \
+ } \
+ tcg_gen_movi_i32(t0, uimm); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ gen_helper_##name(rd, rb, t0); \
+ tcg_temp_free_i32(t0); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VXFORM_UIMM(vspltb, 6, 8);
+GEN_VXFORM_UIMM(vsplth, 6, 9);
+GEN_VXFORM_UIMM(vspltw, 6, 10);
+GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15);
+GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14);
+GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12);
+GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8);
+GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15);
+GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14);
+GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12);
+GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8);
+GEN_VXFORM_UIMM_ENV(vcfux, 5, 12);
+GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13);
+GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14);
+GEN_VXFORM_UIMM_ENV(vctsxs, 5, 15);
+GEN_VXFORM_DUAL(vspltb, PPC_ALTIVEC, PPC_NONE,
+ vextractub, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE,
+ vextractuh, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE,
+ vextractuw, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE,
+ vinsertb, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE,
+ vinserth, PPC_NONE, PPC2_ISA300);
+GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE,
+ vinsertw, PPC_NONE, PPC2_ISA300);
+
+static void gen_vsldoi(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rd;
+ TCGv_i32 sh;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ sh = tcg_const_i32(VSH(ctx->opcode));
+ gen_helper_vsldoi (rd, ra, rb, sh);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rd);
+ tcg_temp_free_i32(sh);
+}
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+ { \
+ TCGv_ptr ra, rb, rc, rd; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rc = gen_avr_ptr(rC(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ if (Rc(ctx->opcode)) { \
+ gen_helper_##name1(cpu_env, rd, ra, rb, rc); \
+ } else { \
+ gen_helper_##name0(cpu_env, rd, ra, rb, rc); \
+ } \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rc); \
+ tcg_temp_free_ptr(rd); \
+ }
+
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16)
+
+static void gen_vmladduhm(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vmladduhm(rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+static void gen_vpermr(DisasContext *ctx)
+{
+ TCGv_ptr ra, rb, rc, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rb = gen_avr_ptr(rB(ctx->opcode));
+ rc = gen_avr_ptr(rC(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vpermr(cpu_env, rd, ra, rb, rc);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rb);
+ tcg_temp_free_ptr(rc);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18)
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19)
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20)
+GEN_VAFORM_PAIRED(vsel, vperm, 21)
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23)
+
+GEN_VXFORM_NOA(vclzb, 1, 28)
+GEN_VXFORM_NOA(vclzh, 1, 29)
+GEN_VXFORM_NOA(vclzw, 1, 30)
+GEN_VXFORM_NOA(vclzd, 1, 31)
+GEN_VXFORM_NOA_2(vnegw, 1, 24, 6)
+GEN_VXFORM_NOA_2(vnegd, 1, 24, 7)
+GEN_VXFORM_NOA_2(vextsb2w, 1, 24, 16)
+GEN_VXFORM_NOA_2(vextsh2w, 1, 24, 17)
+GEN_VXFORM_NOA_2(vextsb2d, 1, 24, 24)
+GEN_VXFORM_NOA_2(vextsh2d, 1, 24, 25)
+GEN_VXFORM_NOA_2(vextsw2d, 1, 24, 26)
+GEN_VXFORM_NOA_2(vctzb, 1, 24, 28)
+GEN_VXFORM_NOA_2(vctzh, 1, 24, 29)
+GEN_VXFORM_NOA_2(vctzw, 1, 24, 30)
+GEN_VXFORM_NOA_2(vctzd, 1, 24, 31)
+GEN_VXFORM_NOA_3(vclzlsbb, 1, 24, 0)
+GEN_VXFORM_NOA_3(vctzlsbb, 1, 24, 1)
+GEN_VXFORM_NOA(vpopcntb, 1, 28)
+GEN_VXFORM_NOA(vpopcnth, 1, 29)
+GEN_VXFORM_NOA(vpopcntw, 1, 30)
+GEN_VXFORM_NOA(vpopcntd, 1, 31)
+GEN_VXFORM_DUAL(vclzb, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntb, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzh, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcnth, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzw, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntw, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vclzd, PPC_NONE, PPC2_ALTIVEC_207, \
+ vpopcntd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM(vbpermd, 6, 23);
+GEN_VXFORM(vbpermq, 6, 21);
+GEN_VXFORM_NOA(vgbbd, 6, 20);
+GEN_VXFORM(vpmsumb, 4, 16)
+GEN_VXFORM(vpmsumh, 4, 17)
+GEN_VXFORM(vpmsumw, 4, 18)
+GEN_VXFORM(vpmsumd, 4, 19)
+
+#define GEN_BCD(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rb, rd; \
+ TCGv_i32 ps; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ \
+ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ \
+ gen_helper_##op(cpu_crf[6], rd, ra, rb, ps); \
+ \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(ps); \
+}
+
+#define GEN_BCD2(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr rd, rb; \
+ TCGv_i32 ps; \
+ \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ \
+ rb = gen_avr_ptr(rB(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ \
+ ps = tcg_const_i32((ctx->opcode & 0x200) != 0); \
+ \
+ gen_helper_##op(cpu_crf[6], rd, rb, ps); \
+ \
+ tcg_temp_free_ptr(rb); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(ps); \
+}
+
+GEN_BCD(bcdadd)
+GEN_BCD(bcdsub)
+GEN_BCD2(bcdcfn)
+GEN_BCD2(bcdctn)
+GEN_BCD2(bcdcfz)
+GEN_BCD2(bcdctz)
+
+static void gen_xpnd04_1(DisasContext *ctx)
+{
+ switch (opc4(ctx->opcode)) {
+ case 4:
+ gen_bcdctz(ctx);
+ break;
+ case 5:
+ gen_bcdctn(ctx);
+ break;
+ case 6:
+ gen_bcdcfz(ctx);
+ break;
+ case 7:
+ gen_bcdcfn(ctx);
+ break;
+ default:
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+static void gen_xpnd04_2(DisasContext *ctx)
+{
+ switch (opc4(ctx->opcode)) {
+ case 4:
+ gen_bcdctz(ctx);
+ break;
+ case 6:
+ gen_bcdcfz(ctx);
+ break;
+ case 7:
+ gen_bcdcfn(ctx);
+ break;
+ default:
+ gen_invalid(ctx);
+ break;
+ }
+}
+
+GEN_VXFORM_DUAL(vsubcuw, PPC_ALTIVEC, PPC_NONE, \
+ xpnd04_1, PPC_NONE, PPC2_ISA300)
+GEN_VXFORM_DUAL(vsubsws, PPC_ALTIVEC, PPC_NONE, \
+ xpnd04_2, PPC_NONE, PPC2_ISA300)
+
+GEN_VXFORM_DUAL(vsububm, PPC_ALTIVEC, PPC_NONE, \
+ bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsububs, PPC_ALTIVEC, PPC_NONE, \
+ bcdadd, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsubuhm, PPC_ALTIVEC, PPC_NONE, \
+ bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vsubuhs, PPC_ALTIVEC, PPC_NONE, \
+ bcdsub, PPC_NONE, PPC2_ALTIVEC_207)
+
+static void gen_vsbox(DisasContext *ctx)
+{
+ TCGv_ptr ra, rd;
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ ra = gen_avr_ptr(rA(ctx->opcode));
+ rd = gen_avr_ptr(rD(ctx->opcode));
+ gen_helper_vsbox(rd, ra);
+ tcg_temp_free_ptr(ra);
+ tcg_temp_free_ptr(rd);
+}
+
+GEN_VXFORM(vcipher, 4, 20)
+GEN_VXFORM(vcipherlast, 4, 20)
+GEN_VXFORM(vncipher, 4, 21)
+GEN_VXFORM(vncipherlast, 4, 21)
+
+GEN_VXFORM_DUAL(vcipher, PPC_NONE, PPC2_ALTIVEC_207,
+ vcipherlast, PPC_NONE, PPC2_ALTIVEC_207)
+GEN_VXFORM_DUAL(vncipher, PPC_NONE, PPC2_ALTIVEC_207,
+ vncipherlast, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define VSHASIGMA(op) \
+static void gen_##op(DisasContext *ctx) \
+{ \
+ TCGv_ptr ra, rd; \
+ TCGv_i32 st_six; \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ ra = gen_avr_ptr(rA(ctx->opcode)); \
+ rd = gen_avr_ptr(rD(ctx->opcode)); \
+ st_six = tcg_const_i32(rB(ctx->opcode)); \
+ gen_helper_##op(rd, ra, st_six); \
+ tcg_temp_free_ptr(ra); \
+ tcg_temp_free_ptr(rd); \
+ tcg_temp_free_i32(st_six); \
+}
+
+VSHASIGMA(vshasigmaw)
+VSHASIGMA(vshasigmad)
+
+GEN_VXFORM3(vpermxor, 22, 0xFF)
+GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
+ vpermxor, PPC_NONE, PPC2_ALTIVEC_207)
+
+#undef GEN_VR_LDX
+#undef GEN_VR_STX
+#undef GEN_VR_LVE
+#undef GEN_VR_STVE
+
+#undef GEN_VX_LOGICAL
+#undef GEN_VX_LOGICAL_207
+#undef GEN_VXFORM
+#undef GEN_VXFORM_207
+#undef GEN_VXFORM_DUAL
+#undef GEN_VXRFORM_DUAL
+#undef GEN_VXRFORM1
+#undef GEN_VXRFORM
+#undef GEN_VXFORM_SIMM
+#undef GEN_VXFORM_NOA
+#undef GEN_VXFORM_UIMM
+#undef GEN_VAFORM_PAIRED
+
+#undef GEN_BCD2
diff --git a/target/ppc/translate/vmx-ops.inc.c b/target/ppc/translate/vmx-ops.inc.c
new file mode 100644
index 0000000000..f02b3bed50
--- /dev/null
+++ b/target/ppc/translate/vmx-ops.inc.c
@@ -0,0 +1,294 @@
+#define GEN_VR_LDX(name, opc2, opc3) \
+GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STX(name, opc2, opc3) \
+GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_LVE(name, opc2, opc3) \
+ GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+#define GEN_VR_STVE(name, opc2, opc3) \
+ GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC)
+GEN_VR_LDX(lvx, 0x07, 0x03),
+GEN_VR_LDX(lvxl, 0x07, 0x0B),
+GEN_VR_LVE(bx, 0x07, 0x00),
+GEN_VR_LVE(hx, 0x07, 0x01),
+GEN_VR_LVE(wx, 0x07, 0x02),
+GEN_VR_STX(svx, 0x07, 0x07),
+GEN_VR_STX(svxl, 0x07, 0x0F),
+GEN_VR_STVE(bx, 0x07, 0x04),
+GEN_VR_STVE(hx, 0x07, 0x05),
+GEN_VR_STVE(wx, 0x07, 0x06),
+
+#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+
+#define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
+
+GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16),
+GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17),
+GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18),
+GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19),
+GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20),
+GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26),
+GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22),
+GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21),
+
+#define GEN_VXFORM(name, opc2, opc3) \
+GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+
+#define GEN_VXFORM_207(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207)
+
+#define GEN_VXFORM_300(name, opc2, opc3) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EXT(name, opc2, opc3, inval) \
+GEN_HANDLER_E(name, 0x04, opc2, opc3, inval, PPC_NONE, PPC2_ISA300)
+
+#define GEN_VXFORM_300_EO(name, opc2, opc3, opc4) \
+GEN_HANDLER_E_2(name, 0x04, opc2, opc3, opc4, 0x00000000, PPC_NONE, \
+ PPC2_ISA300)
+
+#define GEN_VXFORM_DUAL(name0, name1, opc2, opc3, type0, type1) \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, type0, type1)
+
+#define GEN_VXRFORM_DUAL(name0, name1, opc2, opc3, tp0, tp1) \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, opc3, 0x00000000, tp0, tp1), \
+GEN_HANDLER_E(name0##_##name1, 0x4, opc2, (opc3 | 0x10), 0x00000000, tp0, tp1),
+
+GEN_VXFORM_DUAL(vaddubm, vmul10cuq, 0, 0, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vadduhm, vmul10ecuq, 0, 1, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vadduwm, 0, 2),
+GEN_VXFORM_207(vaddudm, 0, 3),
+GEN_VXFORM_DUAL(vsububm, bcdadd, 0, 16, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsubuwm, 0, 18),
+GEN_VXFORM_207(vsubudm, 0, 19),
+GEN_VXFORM(vmaxub, 1, 0),
+GEN_VXFORM(vmaxuh, 1, 1),
+GEN_VXFORM(vmaxuw, 1, 2),
+GEN_VXFORM_207(vmaxud, 1, 3),
+GEN_VXFORM(vmaxsb, 1, 4),
+GEN_VXFORM(vmaxsh, 1, 5),
+GEN_VXFORM(vmaxsw, 1, 6),
+GEN_VXFORM_207(vmaxsd, 1, 7),
+GEN_VXFORM(vminub, 1, 8),
+GEN_VXFORM(vminuh, 1, 9),
+GEN_VXFORM(vminuw, 1, 10),
+GEN_VXFORM_207(vminud, 1, 11),
+GEN_VXFORM(vminsb, 1, 12),
+GEN_VXFORM(vminsh, 1, 13),
+GEN_VXFORM(vminsw, 1, 14),
+GEN_VXFORM_207(vminsd, 1, 15),
+GEN_VXFORM_DUAL(vavgub, vabsdub, 1, 16, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vavguh, vabsduh, 1, 17, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vavguw, vabsduw, 1, 18, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vavgsb, 1, 20),
+GEN_VXFORM(vavgsh, 1, 21),
+GEN_VXFORM(vavgsw, 1, 22),
+GEN_VXFORM(vmrghb, 6, 0),
+GEN_VXFORM(vmrghh, 6, 1),
+GEN_VXFORM(vmrghw, 6, 2),
+GEN_VXFORM(vmrglb, 6, 4),
+GEN_VXFORM(vmrglh, 6, 5),
+GEN_VXFORM(vmrglw, 6, 6),
+GEN_VXFORM_207(vmrgew, 6, 30),
+GEN_VXFORM_207(vmrgow, 6, 26),
+GEN_VXFORM(vmuloub, 4, 0),
+GEN_VXFORM(vmulouh, 4, 1),
+GEN_VXFORM_DUAL(vmulouw, vmuluwm, 4, 2, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vmulosb, 4, 4),
+GEN_VXFORM(vmulosh, 4, 5),
+GEN_VXFORM_207(vmulosw, 4, 6),
+GEN_VXFORM(vmuleub, 4, 8),
+GEN_VXFORM(vmuleuh, 4, 9),
+GEN_VXFORM_207(vmuleuw, 4, 10),
+GEN_VXFORM(vmulesb, 4, 12),
+GEN_VXFORM(vmulesh, 4, 13),
+GEN_VXFORM_207(vmulesw, 4, 14),
+GEN_VXFORM(vslb, 2, 4),
+GEN_VXFORM(vslh, 2, 5),
+GEN_VXFORM_DUAL(vslw, vrlwnm, 2, 6, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_207(vsld, 2, 23),
+GEN_VXFORM(vsrb, 2, 8),
+GEN_VXFORM(vsrh, 2, 9),
+GEN_VXFORM(vsrw, 2, 10),
+GEN_VXFORM_207(vsrd, 2, 27),
+GEN_VXFORM(vsrab, 2, 12),
+GEN_VXFORM(vsrah, 2, 13),
+GEN_VXFORM(vsraw, 2, 14),
+GEN_VXFORM_207(vsrad, 2, 15),
+GEN_VXFORM_300(vsrv, 2, 28),
+GEN_VXFORM_300(vslv, 2, 29),
+GEN_VXFORM(vslo, 6, 16),
+GEN_VXFORM(vsro, 6, 17),
+GEN_VXFORM(vaddcuw, 0, 6),
+GEN_HANDLER_E_2(vprtybw, 0x4, 0x1, 0x18, 8, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E_2(vprtybd, 0x4, 0x1, 0x18, 9, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E_2(vprtybq, 0x4, 0x1, 0x18, 10, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_VXFORM_DUAL(vsubcuw, xpnd04_1, 0, 22, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vaddubs, vmul10uq, 0, 8, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vadduhs, vmul10euq, 0, 9, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vadduws, 0, 10),
+GEN_VXFORM(vaddsbs, 0, 12),
+GEN_VXFORM(vaddshs, 0, 13),
+GEN_VXFORM(vaddsws, 0, 14),
+GEN_VXFORM_DUAL(vsububs, bcdadd, 0, 24, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vsubuhs, bcdsub, 0, 25, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsubuws, 0, 26),
+GEN_VXFORM(vsubsbs, 0, 28),
+GEN_VXFORM(vsubshs, 0, 29),
+GEN_VXFORM_DUAL(vsubsws, xpnd04_2, 0, 30, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_207(vadduqm, 0, 4),
+GEN_VXFORM_207(vaddcuq, 0, 5),
+GEN_VXFORM_DUAL(vaddeuqm, vaddecuq, 30, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_207(vsubuqm, 0, 20),
+GEN_VXFORM_207(vsubcuq, 0, 21),
+GEN_VXFORM_DUAL(vsubeuqm, vsubecuq, 31, 0xFF, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM(vrlb, 2, 0),
+GEN_VXFORM(vrlh, 2, 1),
+GEN_VXFORM_DUAL(vrlw, vrlwmi, 2, 2, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM_DUAL(vrld, vrldmi, 2, 3, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vsl, vrldnm, 2, 7, PPC_ALTIVEC, PPC_NONE),
+GEN_VXFORM(vsr, 2, 11),
+GEN_VXFORM(vpkuhum, 7, 0),
+GEN_VXFORM(vpkuwum, 7, 1),
+GEN_VXFORM_207(vpkudum, 7, 17),
+GEN_VXFORM(vpkuhus, 7, 2),
+GEN_VXFORM(vpkuwus, 7, 3),
+GEN_VXFORM_207(vpkudus, 7, 19),
+GEN_VXFORM(vpkshus, 7, 4),
+GEN_VXFORM(vpkswus, 7, 5),
+GEN_VXFORM_207(vpksdus, 7, 21),
+GEN_VXFORM(vpkshss, 7, 6),
+GEN_VXFORM(vpkswss, 7, 7),
+GEN_VXFORM_207(vpksdss, 7, 23),
+GEN_VXFORM(vpkpx, 7, 12),
+GEN_VXFORM(vsum4ubs, 4, 24),
+GEN_VXFORM(vsum4sbs, 4, 28),
+GEN_VXFORM(vsum4shs, 4, 25),
+GEN_VXFORM(vsum2sws, 4, 26),
+GEN_VXFORM(vsumsws, 4, 30),
+GEN_VXFORM(vaddfp, 5, 0),
+GEN_VXFORM(vsubfp, 5, 1),
+GEN_VXFORM(vmaxfp, 5, 16),
+GEN_VXFORM(vminfp, 5, 17),
+
+#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
+ GEN_HANDLER2(name, str, 0x4, opc2, opc3, 0x00000000, PPC_ALTIVEC),
+#define GEN_VXRFORM1_300(opname, name, str, opc2, opc3) \
+GEN_HANDLER2_E(name, str, 0x4, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ISA300),
+#define GEN_VXRFORM(name, opc2, opc3) \
+ GEN_VXRFORM1(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+#define GEN_VXRFORM_300(name, opc2, opc3) \
+ GEN_VXRFORM1_300(name, name, #name, opc2, opc3) \
+ GEN_VXRFORM1_300(name##_dot, name##_, #name ".", opc2, (opc3 | (0x1 << 4)))
+
+GEN_VXRFORM_300(vcmpnezb, 3, 4)
+GEN_VXRFORM_300(vcmpnezh, 3, 5)
+GEN_VXRFORM_300(vcmpnezw, 3, 6)
+GEN_VXRFORM(vcmpgtsb, 3, 12)
+GEN_VXRFORM(vcmpgtsh, 3, 13)
+GEN_VXRFORM(vcmpgtsw, 3, 14)
+GEN_VXRFORM(vcmpgtub, 3, 8)
+GEN_VXRFORM(vcmpgtuh, 3, 9)
+GEN_VXRFORM(vcmpgtuw, 3, 10)
+GEN_VXRFORM_DUAL(vcmpeqfp, vcmpequd, 3, 3, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM(vcmpgefp, 3, 7)
+GEN_VXRFORM_DUAL(vcmpgtfp, vcmpgtud, 3, 11, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpbfp, vcmpgtsd, 3, 15, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequb, vcmpneb, 3, 0, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequh, vcmpneh, 3, 1, PPC_ALTIVEC, PPC_NONE)
+GEN_VXRFORM_DUAL(vcmpequw, vcmpnew, 3, 2, PPC_ALTIVEC, PPC_NONE)
+
+#define GEN_VXFORM_DUAL_INV(name0, name1, opc2, opc3, inval0, inval1, type) \
+GEN_OPCODE_DUAL(name0##_##name1, 0x04, opc2, opc3, inval0, inval1, type, \
+ PPC_NONE)
+GEN_VXFORM_DUAL_INV(vspltb, vextractub, 6, 8, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000),
+GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000,
+ PPC_ALTIVEC),
+GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000),
+GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06),
+GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07),
+GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10),
+GEN_VXFORM_300_EO(vextsh2w, 0x01, 0x18, 0x11),
+GEN_VXFORM_300_EO(vextsb2d, 0x01, 0x18, 0x18),
+GEN_VXFORM_300_EO(vextsh2d, 0x01, 0x18, 0x19),
+GEN_VXFORM_300_EO(vextsw2d, 0x01, 0x18, 0x1A),
+GEN_VXFORM_300_EO(vctzb, 0x01, 0x18, 0x1C),
+GEN_VXFORM_300_EO(vctzh, 0x01, 0x18, 0x1D),
+GEN_VXFORM_300_EO(vctzw, 0x01, 0x18, 0x1E),
+GEN_VXFORM_300_EO(vctzd, 0x01, 0x18, 0x1F),
+GEN_VXFORM_300_EO(vclzlsbb, 0x01, 0x18, 0x0),
+GEN_VXFORM_300_EO(vctzlsbb, 0x01, 0x18, 0x1),
+GEN_VXFORM_300(vpermr, 0x1D, 0xFF),
+
+#define GEN_VXFORM_NOA(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x001f0000, PPC_ALTIVEC)
+GEN_VXFORM_NOA(vupkhsb, 7, 8),
+GEN_VXFORM_NOA(vupkhsh, 7, 9),
+GEN_VXFORM_207(vupkhsw, 7, 25),
+GEN_VXFORM_NOA(vupklsb, 7, 10),
+GEN_VXFORM_NOA(vupklsh, 7, 11),
+GEN_VXFORM_207(vupklsw, 7, 27),
+GEN_VXFORM_NOA(vupkhpx, 7, 13),
+GEN_VXFORM_NOA(vupklpx, 7, 15),
+GEN_VXFORM_NOA(vrefp, 5, 4),
+GEN_VXFORM_NOA(vrsqrtefp, 5, 5),
+GEN_VXFORM_NOA(vexptefp, 5, 6),
+GEN_VXFORM_NOA(vlogefp, 5, 7),
+GEN_VXFORM_NOA(vrfim, 5, 11),
+GEN_VXFORM_NOA(vrfin, 5, 8),
+GEN_VXFORM_NOA(vrfip, 5, 10),
+GEN_VXFORM_NOA(vrfiz, 5, 9),
+
+#define GEN_VXFORM_UIMM(name, opc2, opc3) \
+ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC)
+GEN_VXFORM_UIMM(vcfux, 5, 12),
+GEN_VXFORM_UIMM(vcfsx, 5, 13),
+GEN_VXFORM_UIMM(vctuxs, 5, 14),
+GEN_VXFORM_UIMM(vctsxs, 5, 15),
+
+
+#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
+ GEN_HANDLER(name0##_##name1, 0x04, opc2, 0xFF, 0x00000000, PPC_ALTIVEC)
+GEN_VAFORM_PAIRED(vmhaddshs, vmhraddshs, 16),
+GEN_VAFORM_PAIRED(vmsumubm, vmsummbm, 18),
+GEN_VAFORM_PAIRED(vmsumuhm, vmsumuhs, 19),
+GEN_VAFORM_PAIRED(vmsumshm, vmsumshs, 20),
+GEN_VAFORM_PAIRED(vsel, vperm, 21),
+GEN_VAFORM_PAIRED(vmaddfp, vnmsubfp, 23),
+
+GEN_VXFORM_DUAL(vclzb, vpopcntb, 1, 28, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzh, vpopcnth, 1, 29, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzw, vpopcntw, 1, 30, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vclzd, vpopcntd, 1, 31, PPC_NONE, PPC2_ALTIVEC_207),
+
+GEN_VXFORM_300(vbpermd, 6, 23),
+GEN_VXFORM_207(vbpermq, 6, 21),
+GEN_VXFORM_207(vgbbd, 6, 20),
+GEN_VXFORM_207(vpmsumb, 4, 16),
+GEN_VXFORM_207(vpmsumh, 4, 17),
+GEN_VXFORM_207(vpmsumw, 4, 18),
+GEN_VXFORM_207(vpmsumd, 4, 19),
+
+GEN_VXFORM_207(vsbox, 4, 23),
+
+GEN_VXFORM_DUAL(vcipher, vcipherlast, 4, 20, PPC_NONE, PPC2_ALTIVEC_207),
+GEN_VXFORM_DUAL(vncipher, vncipherlast, 4, 21, PPC_NONE, PPC2_ALTIVEC_207),
+
+GEN_VXFORM_207(vshasigmaw, 1, 26),
+GEN_VXFORM_207(vshasigmad, 1, 27),
+
+GEN_VXFORM_DUAL(vsldoi, vpermxor, 22, 0xFF, PPC_ALTIVEC, PPC_NONE),
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
new file mode 100644
index 0000000000..5a27be4bd4
--- /dev/null
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -0,0 +1,1009 @@
+/*** VSX extension ***/
+
+static inline TCGv_i64 cpu_vsrh(int n)
+{
+ if (n < 32) {
+ return cpu_fpr[n];
+ } else {
+ return cpu_avrh[n-32];
+ }
+}
+
+static inline TCGv_i64 cpu_vsrl(int n)
+{
+ if (n < 32) {
+ return cpu_vsr[n];
+ } else {
+ return cpu_avrl[n-32];
+ }
+}
+
+#define VSX_LOAD_SCALAR(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
+ /* NOTE: cpu_vsrl is undefined */ \
+ tcg_temp_free(EA); \
+}
+
+VSX_LOAD_SCALAR(lxsdx, ld64_i64)
+VSX_LOAD_SCALAR(lxsiwax, ld32s_i64)
+VSX_LOAD_SCALAR(lxsibzx, ld8u_i64)
+VSX_LOAD_SCALAR(lxsihzx, ld16u_i64)
+VSX_LOAD_SCALAR(lxsiwzx, ld32u_i64)
+VSX_LOAD_SCALAR(lxsspx, ld32fs)
+
+static void gen_lxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvdsx(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvw4x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xth, t1, t0, 32, 32);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(t0, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_shri_i64(t1, t0, 32);
+ tcg_gen_deposit_i64(xtl, t1, t0, 32, 32);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_bswap16x8(TCGv_i64 outh, TCGv_i64 outl,
+ TCGv_i64 inh, TCGv_i64 inl)
+{
+ TCGv_i64 mask = tcg_const_i64(0x00FF00FF00FF00FF);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ /* outh = ((inh & mask) << 8) | ((inh >> 8) & mask) */
+ tcg_gen_and_i64(t0, inh, mask);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(t1, inh, 8);
+ tcg_gen_and_i64(t1, t1, mask);
+ tcg_gen_or_i64(outh, t0, t1);
+
+ /* outl = ((inl & mask) << 8) | ((inl >> 8) & mask) */
+ tcg_gen_and_i64(t0, inl, mask);
+ tcg_gen_shli_i64(t0, t0, 8);
+ tcg_gen_shri_i64(t1, inl, 8);
+ tcg_gen_and_i64(t1, t1, mask);
+ tcg_gen_or_i64(outl, t0, t1);
+
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(mask);
+}
+
+static void gen_bswap32x4(TCGv_i64 outh, TCGv_i64 outl,
+ TCGv_i64 inh, TCGv_i64 inl)
+{
+ TCGv_i64 hi = tcg_temp_new_i64();
+ TCGv_i64 lo = tcg_temp_new_i64();
+
+ tcg_gen_bswap64_i64(hi, inh);
+ tcg_gen_bswap64_i64(lo, inl);
+ tcg_gen_shri_i64(outh, hi, 32);
+ tcg_gen_deposit_i64(outh, outh, hi, 32, 32);
+ tcg_gen_shri_i64(outl, lo, 32);
+ tcg_gen_deposit_i64(outl, outl, lo, 32, 32);
+
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(lo);
+}
+static void gen_lxvh8x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ if (ctx->le_mode) {
+ gen_bswap16x8(xth, xtl, xth, xtl);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_lxvb16x(DisasContext *ctx)
+{
+ TCGv EA;
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free(EA);
+}
+
+#define VSX_STORE_SCALAR(name, operation) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ TCGv EA; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##operation(ctx, cpu_vsrh(xS(ctx->opcode)), EA); \
+ tcg_temp_free(EA); \
+}
+
+VSX_STORE_SCALAR(stxsdx, st64_i64)
+
+VSX_STORE_SCALAR(stxsibx, st8_i64)
+VSX_STORE_SCALAR(stxsihx, st16_i64)
+VSX_STORE_SCALAR(stxsiwx, st32_i64)
+VSX_STORE_SCALAR(stxsspx, st32fs)
+
+static void gen_stxvd2x(DisasContext *ctx)
+{
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ gen_qemu_st64_i64(ctx, cpu_vsrh(xS(ctx->opcode)), EA);
+ tcg_gen_addi_tl(EA, EA, 8);
+ gen_qemu_st64_i64(ctx, cpu_vsrl(xS(ctx->opcode)), EA);
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvw4x(DisasContext *ctx)
+{
+ TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
+ TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
+ TCGv EA;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t0, xsh, 32);
+ tcg_gen_deposit_i64(t1, t0, xsh, 32, 32);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_shri_i64(t0, xsl, 32);
+ tcg_gen_deposit_i64(t1, t0, xsl, 32, 32);
+ tcg_gen_qemu_st_i64(t1, EA, ctx->mem_idx, MO_LEQ);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+ } else {
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvh8x(DisasContext *ctx)
+{
+ TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
+ TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
+ TCGv EA;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ if (ctx->le_mode) {
+ TCGv_i64 outh = tcg_temp_new_i64();
+ TCGv_i64 outl = tcg_temp_new_i64();
+
+ gen_bswap16x8(outh, outl, xsh, xsl);
+ tcg_gen_qemu_st_i64(outh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(outl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free_i64(outh);
+ tcg_temp_free_i64(outl);
+ } else {
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ }
+ tcg_temp_free(EA);
+}
+
+static void gen_stxvb16x(DisasContext *ctx)
+{
+ TCGv_i64 xsh = cpu_vsrh(xS(ctx->opcode));
+ TCGv_i64 xsl = cpu_vsrl(xS(ctx->opcode));
+ TCGv EA;
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_set_access_type(ctx, ACCESS_INT);
+ EA = tcg_temp_new();
+ gen_addr_reg_index(ctx, EA);
+ tcg_gen_qemu_st_i64(xsh, EA, ctx->mem_idx, MO_BEQ);
+ tcg_gen_addi_tl(EA, EA, 8);
+ tcg_gen_qemu_st_i64(xsl, EA, ctx->mem_idx, MO_BEQ);
+ tcg_temp_free(EA);
+}
+
+#define MV_VSRW(name, tcgop1, tcgop2, target, source) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ if (xS(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ tcg_gen_##tcgop1(tmp, source); \
+ tcg_gen_##tcgop2(target, tmp); \
+ tcg_temp_free_i64(tmp); \
+}
+
+
+MV_VSRW(mfvsrwz, ext32u_i64, trunc_i64_tl, cpu_gpr[rA(ctx->opcode)], \
+ cpu_vsrh(xS(ctx->opcode)))
+MV_VSRW(mtvsrwa, extu_tl_i64, ext32s_i64, cpu_vsrh(xT(ctx->opcode)), \
+ cpu_gpr[rA(ctx->opcode)])
+MV_VSRW(mtvsrwz, extu_tl_i64, ext32u_i64, cpu_vsrh(xT(ctx->opcode)), \
+ cpu_gpr[rA(ctx->opcode)])
+
+#if defined(TARGET_PPC64)
+#define MV_VSRD(name, target, source) \
+static void gen_##name(DisasContext *ctx) \
+{ \
+ if (xS(ctx->opcode) < 32) { \
+ if (unlikely(!ctx->fpu_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_FPU); \
+ return; \
+ } \
+ } else { \
+ if (unlikely(!ctx->altivec_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VPU); \
+ return; \
+ } \
+ } \
+ tcg_gen_mov_i64(target, source); \
+}
+
+MV_VSRD(mfvsrd, cpu_gpr[rA(ctx->opcode)], cpu_vsrh(xS(ctx->opcode)))
+MV_VSRD(mtvsrd, cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)])
+
+static void gen_mfvsrld(DisasContext *ctx)
+{
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], cpu_vsrl(xS(ctx->opcode)));
+}
+
+static void gen_mtvsrdd(DisasContext *ctx)
+{
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ if (!rA(ctx->opcode)) {
+ tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), 0);
+ } else {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)]);
+ }
+
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rB(ctx->opcode)]);
+}
+
+static void gen_mtvsrws(DisasContext *ctx)
+{
+ if (xT(ctx->opcode) < 32) {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ }
+
+ tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), cpu_gpr[rA(ctx->opcode)],
+ cpu_gpr[rA(ctx->opcode)], 32, 32);
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xT(ctx->opcode)));
+}
+
+#endif
+
+static void gen_xxpermdi(DisasContext *ctx)
+{
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) ||
+ (xT(ctx->opcode) == xB(ctx->opcode)))) {
+ TCGv_i64 xh, xl;
+
+ xh = tcg_temp_new_i64();
+ xl = tcg_temp_new_i64();
+
+ if ((DM(ctx->opcode) & 2) == 0) {
+ tcg_gen_mov_i64(xh, cpu_vsrh(xA(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(xh, cpu_vsrl(xA(ctx->opcode)));
+ }
+ if ((DM(ctx->opcode) & 1) == 0) {
+ tcg_gen_mov_i64(xl, cpu_vsrh(xB(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(xl, cpu_vsrl(xB(ctx->opcode)));
+ }
+
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xh);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xl);
+
+ tcg_temp_free_i64(xh);
+ tcg_temp_free_i64(xl);
+ } else {
+ if ((DM(ctx->opcode) & 2) == 0) {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)));
+ }
+ if ((DM(ctx->opcode) & 1) == 0) {
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xB(ctx->opcode)));
+ } else {
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xB(ctx->opcode)));
+ }
+ }
+}
+
+#define OP_ABS 1
+#define OP_NABS 2
+#define OP_NEG 3
+#define OP_CPSGN 4
+#define SGN_MASK_DP 0x8000000000000000ull
+#define SGN_MASK_SP 0x8000000080000000ull
+
+#define VSX_SCALAR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xb, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xb = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xb, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xb, xb, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xa = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xa, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xa, xa, sgm); \
+ tcg_gen_andc_i64(xb, xb, sgm); \
+ tcg_gen_or_i64(xb, xb, xa); \
+ tcg_temp_free_i64(xa); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xb); \
+ tcg_temp_free_i64(xb); \
+ tcg_temp_free_i64(sgm); \
+ }
+
+VSX_SCALAR_MOVE(xsabsdp, OP_ABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xsnegdp, OP_NEG, SGN_MASK_DP)
+VSX_SCALAR_MOVE(xscpsgndp, OP_CPSGN, SGN_MASK_DP)
+
+#define VSX_VECTOR_MOVE(name, op, sgn_mask) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 xbh, xbl, sgm; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ xbh = tcg_temp_new_i64(); \
+ xbl = tcg_temp_new_i64(); \
+ sgm = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xbh, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(xbl, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_movi_i64(sgm, sgn_mask); \
+ switch (op) { \
+ case OP_ABS: { \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NABS: { \
+ tcg_gen_or_i64(xbh, xbh, sgm); \
+ tcg_gen_or_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_NEG: { \
+ tcg_gen_xor_i64(xbh, xbh, sgm); \
+ tcg_gen_xor_i64(xbl, xbl, sgm); \
+ break; \
+ } \
+ case OP_CPSGN: { \
+ TCGv_i64 xah = tcg_temp_new_i64(); \
+ TCGv_i64 xal = tcg_temp_new_i64(); \
+ tcg_gen_mov_i64(xah, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(xal, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_and_i64(xah, xah, sgm); \
+ tcg_gen_and_i64(xal, xal, sgm); \
+ tcg_gen_andc_i64(xbh, xbh, sgm); \
+ tcg_gen_andc_i64(xbl, xbl, sgm); \
+ tcg_gen_or_i64(xbh, xbh, xah); \
+ tcg_gen_or_i64(xbl, xbl, xal); \
+ tcg_temp_free_i64(xah); \
+ tcg_temp_free_i64(xal); \
+ break; \
+ } \
+ } \
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xbh); \
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xbl); \
+ tcg_temp_free_i64(xbh); \
+ tcg_temp_free_i64(xbl); \
+ tcg_temp_free_i64(sgm); \
+ }
+
+VSX_VECTOR_MOVE(xvabsdp, OP_ABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnabsdp, OP_NABS, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvnegdp, OP_NEG, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvcpsgndp, OP_CPSGN, SGN_MASK_DP)
+VSX_VECTOR_MOVE(xvabssp, OP_ABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnabssp, OP_NABS, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvnegsp, OP_NEG, SGN_MASK_SP)
+VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP)
+
+#define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext * ctx) \
+{ \
+ TCGv_i32 opc; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ opc = tcg_const_i32(ctx->opcode); \
+ gen_helper_##name(cpu_env, opc); \
+ tcg_temp_free_i32(opc); \
+}
+
+#define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \
+static void gen_##name(DisasContext * ctx) \
+{ \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ gen_helper_##name(cpu_vsrh(xT(ctx->opcode)), cpu_env, \
+ cpu_vsrh(xB(ctx->opcode))); \
+}
+
+GEN_VSX_HELPER_2(xsadddp, 0x00, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xssubdp, 0x00, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmuldp, 0x00, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsdivdp, 0x00, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsredp, 0x14, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xssqrtdp, 0x16, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrsqrtedp, 0x14, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xstdivdp, 0x14, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xstsqrtdp, 0x14, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaddadp, 0x04, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaddmdp, 0x04, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmsubadp, 0x04, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmsubmdp, 0x04, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsmindp, 0x00, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpsp, 0x12, 0x10, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xscvdpspn, 0x16, 0x10, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvspdp, 0x12, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xscvspdpn, 0x16, 0x14, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvdpsxds, 0x10, 0x15, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpsxws, 0x10, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpuxds, 0x10, 0x14, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvdpuxws, 0x10, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvsxddp, 0x10, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscvuxddp, 0x10, 0x16, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpi, 0x12, 0x04, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpic, 0x16, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpim, 0x12, 0x07, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpip, 0x12, 0x06, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xsrdpiz, 0x12, 0x05, 0, PPC2_VSX)
+GEN_VSX_HELPER_XT_XB_ENV(xsrsp, 0x12, 0x11, 0, PPC2_VSX207)
+
+GEN_VSX_HELPER_2(xsaddsp, 0x00, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xssubsp, 0x00, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmulsp, 0x00, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsdivsp, 0x00, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsresp, 0x14, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xssqrtsp, 0x16, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsrsqrtesp, 0x14, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmaddasp, 0x04, 0x00, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmaddmsp, 0x04, 0x01, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmsubasp, 0x04, 0x02, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsmsubmsp, 0x04, 0x03, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmaddasp, 0x04, 0x10, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmaddmsp, 0x04, 0x11, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmsubasp, 0x04, 0x12, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xsnmsubmsp, 0x04, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvsxdsp, 0x10, 0x13, 0, PPC2_VSX207)
+GEN_VSX_HELPER_2(xscvuxdsp, 0x10, 0x12, 0, PPC2_VSX207)
+
+GEN_VSX_HELPER_2(xvadddp, 0x00, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsubdp, 0x00, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmuldp, 0x00, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvdivdp, 0x00, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvredp, 0x14, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsqrtdp, 0x16, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrsqrtedp, 0x14, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtdivdp, 0x14, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtsqrtdp, 0x14, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddadp, 0x04, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddmdp, 0x04, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubadp, 0x04, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubmdp, 0x04, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddadp, 0x04, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddmdp, 0x04, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubadp, 0x04, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubmdp, 0x04, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaxdp, 0x00, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmindp, 0x00, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpeqdp, 0x0C, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgtdp, 0x0C, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgedp, 0x0C, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpnedp, 0x0C, 0x0F, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xvcvdpsp, 0x12, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpsxds, 0x10, 0x1D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpsxws, 0x10, 0x0D, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpuxds, 0x10, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvdpuxws, 0x10, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxddp, 0x10, 0x1F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxddp, 0x10, 0x1E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxwdp, 0x10, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxwdp, 0x10, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpi, 0x12, 0x0C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpic, 0x16, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpim, 0x12, 0x0F, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpip, 0x12, 0x0E, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrdpiz, 0x12, 0x0D, 0, PPC2_VSX)
+
+GEN_VSX_HELPER_2(xvaddsp, 0x00, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsubsp, 0x00, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmulsp, 0x00, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvdivsp, 0x00, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvresp, 0x14, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvsqrtsp, 0x16, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrsqrtesp, 0x14, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtdivsp, 0x14, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvtsqrtsp, 0x14, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddasp, 0x04, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaddmsp, 0x04, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubasp, 0x04, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmsubmsp, 0x04, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddasp, 0x04, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmaddmsp, 0x04, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubasp, 0x04, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvnmsubmsp, 0x04, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvmaxsp, 0x00, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvminsp, 0x00, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpeqsp, 0x0C, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgtsp, 0x0C, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpgesp, 0x0C, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcmpnesp, 0x0C, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspdp, 0x12, 0x1C, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspsxds, 0x10, 0x19, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspsxws, 0x10, 0x09, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspuxds, 0x10, 0x18, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvspuxws, 0x10, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxdsp, 0x10, 0x1B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxdsp, 0x10, 0x1A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvsxwsp, 0x10, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvcvuxwsp, 0x10, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspi, 0x12, 0x08, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspic, 0x16, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspim, 0x12, 0x0B, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspip, 0x12, 0x0A, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xvrspiz, 0x12, 0x09, 0, PPC2_VSX)
+
+static void gen_xxbrd(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ tcg_gen_bswap64_i64(xth, xbh);
+ tcg_gen_bswap64_i64(xtl, xbl);
+}
+
+static void gen_xxbrh(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_bswap16x8(xth, xtl, xbh, xbl);
+}
+
+static void gen_xxbrq(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+ TCGv_i64 t0 = tcg_temp_new_i64();
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ tcg_gen_bswap64_i64(t0, xbl);
+ tcg_gen_bswap64_i64(xtl, xbh);
+ tcg_gen_mov_i64(xth, t0);
+ tcg_temp_free_i64(t0);
+}
+
+static void gen_xxbrw(DisasContext *ctx)
+{
+ TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
+ TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+ TCGv_i64 xbh = cpu_vsrh(xB(ctx->opcode));
+ TCGv_i64 xbl = cpu_vsrl(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ gen_bswap32x4(xth, xtl, xbh, xbl);
+}
+
+#define VSX_LOGICAL(name, tcg_op) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ tcg_op(cpu_vsrh(xT(ctx->opcode)), cpu_vsrh(xA(ctx->opcode)), \
+ cpu_vsrh(xB(ctx->opcode))); \
+ tcg_op(cpu_vsrl(xT(ctx->opcode)), cpu_vsrl(xA(ctx->opcode)), \
+ cpu_vsrl(xB(ctx->opcode))); \
+ }
+
+VSX_LOGICAL(xxland, tcg_gen_and_i64)
+VSX_LOGICAL(xxlandc, tcg_gen_andc_i64)
+VSX_LOGICAL(xxlor, tcg_gen_or_i64)
+VSX_LOGICAL(xxlxor, tcg_gen_xor_i64)
+VSX_LOGICAL(xxlnor, tcg_gen_nor_i64)
+VSX_LOGICAL(xxleqv, tcg_gen_eqv_i64)
+VSX_LOGICAL(xxlnand, tcg_gen_nand_i64)
+VSX_LOGICAL(xxlorc, tcg_gen_orc_i64)
+
+#define VSX_XXMRG(name, high) \
+static void glue(gen_, name)(DisasContext * ctx) \
+ { \
+ TCGv_i64 a0, a1, b0, b1; \
+ if (unlikely(!ctx->vsx_enabled)) { \
+ gen_exception(ctx, POWERPC_EXCP_VSXU); \
+ return; \
+ } \
+ a0 = tcg_temp_new_i64(); \
+ a1 = tcg_temp_new_i64(); \
+ b0 = tcg_temp_new_i64(); \
+ b1 = tcg_temp_new_i64(); \
+ if (high) { \
+ tcg_gen_mov_i64(a0, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(a1, cpu_vsrh(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(b0, cpu_vsrh(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(b1, cpu_vsrh(xB(ctx->opcode))); \
+ } else { \
+ tcg_gen_mov_i64(a0, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(a1, cpu_vsrl(xA(ctx->opcode))); \
+ tcg_gen_mov_i64(b0, cpu_vsrl(xB(ctx->opcode))); \
+ tcg_gen_mov_i64(b1, cpu_vsrl(xB(ctx->opcode))); \
+ } \
+ tcg_gen_shri_i64(a0, a0, 32); \
+ tcg_gen_shri_i64(b0, b0, 32); \
+ tcg_gen_deposit_i64(cpu_vsrh(xT(ctx->opcode)), \
+ b0, a0, 32, 32); \
+ tcg_gen_deposit_i64(cpu_vsrl(xT(ctx->opcode)), \
+ b1, a1, 32, 32); \
+ tcg_temp_free_i64(a0); \
+ tcg_temp_free_i64(a1); \
+ tcg_temp_free_i64(b0); \
+ tcg_temp_free_i64(b1); \
+ }
+
+VSX_XXMRG(xxmrghw, 1)
+VSX_XXMRG(xxmrglw, 0)
+
+static void gen_xxsel(DisasContext * ctx)
+{
+ TCGv_i64 a, b, c;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ a = tcg_temp_new_i64();
+ b = tcg_temp_new_i64();
+ c = tcg_temp_new_i64();
+
+ tcg_gen_mov_i64(a, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_mov_i64(b, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_mov_i64(c, cpu_vsrh(xC(ctx->opcode)));
+
+ tcg_gen_and_i64(b, b, c);
+ tcg_gen_andc_i64(a, a, c);
+ tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), a, b);
+
+ tcg_gen_mov_i64(a, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_mov_i64(b, cpu_vsrl(xB(ctx->opcode)));
+ tcg_gen_mov_i64(c, cpu_vsrl(xC(ctx->opcode)));
+
+ tcg_gen_and_i64(b, b, c);
+ tcg_gen_andc_i64(a, a, c);
+ tcg_gen_or_i64(cpu_vsrl(xT(ctx->opcode)), a, b);
+
+ tcg_temp_free_i64(a);
+ tcg_temp_free_i64(b);
+ tcg_temp_free_i64(c);
+}
+
+static void gen_xxspltw(DisasContext *ctx)
+{
+ TCGv_i64 b, b2;
+ TCGv_i64 vsr = (UIM(ctx->opcode) & 2) ?
+ cpu_vsrl(xB(ctx->opcode)) :
+ cpu_vsrh(xB(ctx->opcode));
+
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+
+ b = tcg_temp_new_i64();
+ b2 = tcg_temp_new_i64();
+
+ if (UIM(ctx->opcode) & 1) {
+ tcg_gen_ext32u_i64(b, vsr);
+ } else {
+ tcg_gen_shri_i64(b, vsr, 32);
+ }
+
+ tcg_gen_shli_i64(b2, b, 32);
+ tcg_gen_or_i64(cpu_vsrh(xT(ctx->opcode)), b, b2);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+
+ tcg_temp_free_i64(b);
+ tcg_temp_free_i64(b2);
+}
+
+#define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff))
+
+static void gen_xxspltib(DisasContext *ctx)
+{
+ unsigned char uim8 = IMM8(ctx->opcode);
+ if (xS(ctx->opcode) < 32) {
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
+ }
+ } else {
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ }
+ tcg_gen_movi_i64(cpu_vsrh(xT(ctx->opcode)), pattern(uim8));
+ tcg_gen_movi_i64(cpu_vsrl(xT(ctx->opcode)), pattern(uim8));
+}
+
+static void gen_xxsldwi(DisasContext *ctx)
+{
+ TCGv_i64 xth, xtl;
+ if (unlikely(!ctx->vsx_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VSXU);
+ return;
+ }
+ xth = tcg_temp_new_i64();
+ xtl = tcg_temp_new_i64();
+
+ switch (SHW(ctx->opcode)) {
+ case 0: {
+ tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ break;
+ }
+ case 1: {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(xth, cpu_vsrh(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xth, xth, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ tcg_gen_mov_i64(xtl, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free_i64(t0);
+ break;
+ }
+ case 2: {
+ tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ break;
+ }
+ case 3: {
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_mov_i64(xth, cpu_vsrl(xA(ctx->opcode)));
+ tcg_gen_shli_i64(xth, xth, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xth, xth, t0);
+ tcg_gen_mov_i64(xtl, cpu_vsrh(xB(ctx->opcode)));
+ tcg_gen_shli_i64(xtl, xtl, 32);
+ tcg_gen_mov_i64(t0, cpu_vsrl(xB(ctx->opcode)));
+ tcg_gen_shri_i64(t0, t0, 32);
+ tcg_gen_or_i64(xtl, xtl, t0);
+ tcg_temp_free_i64(t0);
+ break;
+ }
+ }
+
+ tcg_gen_mov_i64(cpu_vsrh(xT(ctx->opcode)), xth);
+ tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), xtl);
+
+ tcg_temp_free_i64(xth);
+ tcg_temp_free_i64(xtl);
+}
+
+#undef GEN_XX2FORM
+#undef GEN_XX3FORM
+#undef GEN_XX2IFORM
+#undef GEN_XX3_RC_FORM
+#undef GEN_XX3FORM_DM
+#undef VSX_LOGICAL
diff --git a/target/ppc/translate/vsx-ops.inc.c b/target/ppc/translate/vsx-ops.inc.c
new file mode 100644
index 0000000000..3d9104155a
--- /dev/null
+++ b/target/ppc/translate/vsx-ops.inc.c
@@ -0,0 +1,300 @@
+GEN_HANDLER_E(lxsdx, 0x1F, 0x0C, 0x12, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxsiwax, 0x1F, 0x0C, 0x02, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsiwzx, 0x1F, 0x0C, 0x00, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxsibzx, 0x1F, 0x0D, 0x18, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsihzx, 0x1F, 0x0D, 0x19, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxsspx, 0x1F, 0x0C, 0x10, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(lxvd2x, 0x1F, 0x0C, 0x1A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_HANDLER_E(stxsdx, 0x1F, 0xC, 0x16, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxsibx, 0x1F, 0xD, 0x1C, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsihx, 0x1F, 0xD, 0x1D, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxsiwx, 0x1F, 0xC, 0x04, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(stxsspx, 0x1F, 0xC, 0x14, 0, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX),
+GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300),
+
+GEN_HANDLER_E(mfvsrwz, 0x1F, 0x13, 0x03, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrwa, 0x1F, 0x13, 0x06, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrwz, 0x1F, 0x13, 0x07, 0x0000F800, PPC_NONE, PPC2_VSX207),
+#if defined(TARGET_PPC64)
+GEN_HANDLER_E(mfvsrd, 0x1F, 0x13, 0x01, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mtvsrd, 0x1F, 0x13, 0x05, 0x0000F800, PPC_NONE, PPC2_VSX207),
+GEN_HANDLER_E(mfvsrld, 0X1F, 0x13, 0x09, 0x0000F800, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mtvsrdd, 0X1F, 0x13, 0x0D, 0x0, PPC_NONE, PPC2_ISA300),
+GEN_HANDLER_E(mtvsrws, 0x1F, 0x13, 0x0C, 0x0000F800, PPC_NONE, PPC2_ISA300),
+#endif
+
+#define GEN_XX1FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2FORM_EO(name, opc2, opc3, opc4, fl2) \
+GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 0, opc3, opc4, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E_2(name, #name, 0x3C, opc2 | 1, opc3, opc4, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 0, PPC_NONE, fl2)
+
+#define GEN_XX2IFORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 1, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 2, opc3, 1, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 3, opc3, 1, PPC_NONE, fl2)
+
+#define GEN_XX3_RC_FORM(name, opc2, opc3, fl2) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x00, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x00, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x01, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x02, opc3 | 0x10, 0, PPC_NONE, fl2), \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2 | 0x03, opc3 | 0x10, 0, PPC_NONE, fl2)
+
+#define GEN_XX3FORM_DM(name, opc2, opc3) \
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x00, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x04, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x08, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x00, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x01, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x02, opc3|0x0C, 0, PPC_NONE, PPC2_VSX),\
+GEN_HANDLER2_E(name, #name, 0x3C, opc2|0x03, opc3|0x0C, 0, PPC_NONE, PPC2_VSX)
+
+GEN_XX2FORM(xsabsdp, 0x12, 0x15, PPC2_VSX),
+GEN_XX2FORM(xsnabsdp, 0x12, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsnegdp, 0x12, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscpsgndp, 0x00, 0x16, PPC2_VSX),
+
+GEN_XX2FORM(xvabsdp, 0x12, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvnabsdp, 0x12, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvnegdp, 0x12, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvcpsgndp, 0x00, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvabssp, 0x12, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvnabssp, 0x12, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvnegsp, 0x12, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvcpsgnsp, 0x00, 0x1A, PPC2_VSX),
+
+GEN_XX3FORM(xsadddp, 0x00, 0x04, PPC2_VSX),
+GEN_XX3FORM(xssubdp, 0x00, 0x05, PPC2_VSX),
+GEN_XX3FORM(xsmuldp, 0x00, 0x06, PPC2_VSX),
+GEN_XX3FORM(xsdivdp, 0x00, 0x07, PPC2_VSX),
+GEN_XX2FORM(xsredp, 0x14, 0x05, PPC2_VSX),
+GEN_XX2FORM(xssqrtdp, 0x16, 0x04, PPC2_VSX),
+GEN_XX2FORM(xsrsqrtedp, 0x14, 0x04, PPC2_VSX),
+GEN_XX3FORM(xstdivdp, 0x14, 0x07, PPC2_VSX),
+GEN_XX2FORM(xstsqrtdp, 0x14, 0x06, PPC2_VSX),
+GEN_XX3FORM(xsmaddadp, 0x04, 0x04, PPC2_VSX),
+GEN_XX3FORM(xsmaddmdp, 0x04, 0x05, PPC2_VSX),
+GEN_XX3FORM(xsmsubadp, 0x04, 0x06, PPC2_VSX),
+GEN_XX3FORM(xsmsubmdp, 0x04, 0x07, PPC2_VSX),
+GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
+GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
+GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
+GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
+GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
+GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
+GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300),
+GEN_XX2IFORM(xscmpodp, 0x0C, 0x05, PPC2_VSX),
+GEN_XX2IFORM(xscmpudp, 0x0C, 0x04, PPC2_VSX),
+GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
+GEN_XX3FORM(xsmindp, 0x00, 0x15, PPC2_VSX),
+GEN_XX2FORM(xscvdpsp, 0x12, 0x10, PPC2_VSX),
+GEN_XX2FORM(xscvdpspn, 0x16, 0x10, PPC2_VSX207),
+GEN_XX2FORM(xscvspdp, 0x12, 0x14, PPC2_VSX),
+GEN_XX2FORM(xscvspdpn, 0x16, 0x14, PPC2_VSX207),
+GEN_XX2FORM(xscvdpsxds, 0x10, 0x15, PPC2_VSX),
+GEN_XX2FORM(xscvdpsxws, 0x10, 0x05, PPC2_VSX),
+GEN_XX2FORM(xscvdpuxds, 0x10, 0x14, PPC2_VSX),
+GEN_XX2FORM(xscvdpuxws, 0x10, 0x04, PPC2_VSX),
+GEN_XX2FORM(xscvsxddp, 0x10, 0x17, PPC2_VSX),
+GEN_XX2FORM(xscvuxddp, 0x10, 0x16, PPC2_VSX),
+GEN_XX2FORM(xsrdpi, 0x12, 0x04, PPC2_VSX),
+GEN_XX2FORM(xsrdpic, 0x16, 0x06, PPC2_VSX),
+GEN_XX2FORM(xsrdpim, 0x12, 0x07, PPC2_VSX),
+GEN_XX2FORM(xsrdpip, 0x12, 0x06, PPC2_VSX),
+GEN_XX2FORM(xsrdpiz, 0x12, 0x05, PPC2_VSX),
+
+GEN_XX3FORM(xsaddsp, 0x00, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xssubsp, 0x00, 0x01, PPC2_VSX207),
+GEN_XX3FORM(xsmulsp, 0x00, 0x02, PPC2_VSX207),
+GEN_XX3FORM(xsdivsp, 0x00, 0x03, PPC2_VSX207),
+GEN_XX2FORM(xsresp, 0x14, 0x01, PPC2_VSX207),
+GEN_XX2FORM(xsrsp, 0x12, 0x11, PPC2_VSX207),
+GEN_XX2FORM(xssqrtsp, 0x16, 0x00, PPC2_VSX207),
+GEN_XX2FORM(xsrsqrtesp, 0x14, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xsmaddasp, 0x04, 0x00, PPC2_VSX207),
+GEN_XX3FORM(xsmaddmsp, 0x04, 0x01, PPC2_VSX207),
+GEN_XX3FORM(xsmsubasp, 0x04, 0x02, PPC2_VSX207),
+GEN_XX3FORM(xsmsubmsp, 0x04, 0x03, PPC2_VSX207),
+GEN_XX3FORM(xsnmaddasp, 0x04, 0x10, PPC2_VSX207),
+GEN_XX3FORM(xsnmaddmsp, 0x04, 0x11, PPC2_VSX207),
+GEN_XX3FORM(xsnmsubasp, 0x04, 0x12, PPC2_VSX207),
+GEN_XX3FORM(xsnmsubmsp, 0x04, 0x13, PPC2_VSX207),
+GEN_XX2FORM(xscvsxdsp, 0x10, 0x13, PPC2_VSX207),
+GEN_XX2FORM(xscvuxdsp, 0x10, 0x12, PPC2_VSX207),
+
+GEN_XX3FORM(xvadddp, 0x00, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvsubdp, 0x00, 0x0D, PPC2_VSX),
+GEN_XX3FORM(xvmuldp, 0x00, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvdivdp, 0x00, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvredp, 0x14, 0x0D, PPC2_VSX),
+GEN_XX2FORM(xvsqrtdp, 0x16, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvrsqrtedp, 0x14, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvtdivdp, 0x14, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvtsqrtdp, 0x14, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvmaddadp, 0x04, 0x0C, PPC2_VSX),
+GEN_XX3FORM(xvmaddmdp, 0x04, 0x0D, PPC2_VSX),
+GEN_XX3FORM(xvmsubadp, 0x04, 0x0E, PPC2_VSX),
+GEN_XX3FORM(xvmsubmdp, 0x04, 0x0F, PPC2_VSX),
+GEN_XX3FORM(xvnmaddadp, 0x04, 0x1C, PPC2_VSX),
+GEN_XX3FORM(xvnmaddmdp, 0x04, 0x1D, PPC2_VSX),
+GEN_XX3FORM(xvnmsubadp, 0x04, 0x1E, PPC2_VSX),
+GEN_XX3FORM(xvnmsubmdp, 0x04, 0x1F, PPC2_VSX),
+GEN_XX3FORM(xvmaxdp, 0x00, 0x1C, PPC2_VSX),
+GEN_XX3FORM(xvmindp, 0x00, 0x1D, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpeqdp, 0x0C, 0x0C, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgtdp, 0x0C, 0x0D, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgedp, 0x0C, 0x0E, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpnedp, 0x0C, 0x0F, PPC2_ISA300),
+GEN_XX2FORM(xvcvdpsp, 0x12, 0x18, PPC2_VSX),
+GEN_XX2FORM(xvcvdpsxds, 0x10, 0x1D, PPC2_VSX),
+GEN_XX2FORM(xvcvdpsxws, 0x10, 0x0D, PPC2_VSX),
+GEN_XX2FORM(xvcvdpuxds, 0x10, 0x1C, PPC2_VSX),
+GEN_XX2FORM(xvcvdpuxws, 0x10, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvcvsxddp, 0x10, 0x1F, PPC2_VSX),
+GEN_XX2FORM(xvcvuxddp, 0x10, 0x1E, PPC2_VSX),
+GEN_XX2FORM(xvcvsxwdp, 0x10, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvcvuxwdp, 0x10, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpi, 0x12, 0x0C, PPC2_VSX),
+GEN_XX2FORM(xvrdpic, 0x16, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpim, 0x12, 0x0F, PPC2_VSX),
+GEN_XX2FORM(xvrdpip, 0x12, 0x0E, PPC2_VSX),
+GEN_XX2FORM(xvrdpiz, 0x12, 0x0D, PPC2_VSX),
+
+GEN_XX3FORM(xvaddsp, 0x00, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvsubsp, 0x00, 0x09, PPC2_VSX),
+GEN_XX3FORM(xvmulsp, 0x00, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvdivsp, 0x00, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvresp, 0x14, 0x09, PPC2_VSX),
+GEN_XX2FORM(xvsqrtsp, 0x16, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvrsqrtesp, 0x14, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvtdivsp, 0x14, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvtsqrtsp, 0x14, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvmaddasp, 0x04, 0x08, PPC2_VSX),
+GEN_XX3FORM(xvmaddmsp, 0x04, 0x09, PPC2_VSX),
+GEN_XX3FORM(xvmsubasp, 0x04, 0x0A, PPC2_VSX),
+GEN_XX3FORM(xvmsubmsp, 0x04, 0x0B, PPC2_VSX),
+GEN_XX3FORM(xvnmaddasp, 0x04, 0x18, PPC2_VSX),
+GEN_XX3FORM(xvnmaddmsp, 0x04, 0x19, PPC2_VSX),
+GEN_XX3FORM(xvnmsubasp, 0x04, 0x1A, PPC2_VSX),
+GEN_XX3FORM(xvnmsubmsp, 0x04, 0x1B, PPC2_VSX),
+GEN_XX3FORM(xvmaxsp, 0x00, 0x18, PPC2_VSX),
+GEN_XX3FORM(xvminsp, 0x00, 0x19, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpeqsp, 0x0C, 0x08, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgtsp, 0x0C, 0x09, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpgesp, 0x0C, 0x0A, PPC2_VSX),
+GEN_XX3_RC_FORM(xvcmpnesp, 0x0C, 0x0B, PPC2_ISA300),
+GEN_XX2FORM(xvcvspdp, 0x12, 0x1C, PPC2_VSX),
+GEN_XX2FORM(xvcvspsxds, 0x10, 0x19, PPC2_VSX),
+GEN_XX2FORM(xvcvspsxws, 0x10, 0x09, PPC2_VSX),
+GEN_XX2FORM(xvcvspuxds, 0x10, 0x18, PPC2_VSX),
+GEN_XX2FORM(xvcvspuxws, 0x10, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvcvsxdsp, 0x10, 0x1B, PPC2_VSX),
+GEN_XX2FORM(xvcvuxdsp, 0x10, 0x1A, PPC2_VSX),
+GEN_XX2FORM(xvcvsxwsp, 0x10, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvcvuxwsp, 0x10, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspi, 0x12, 0x08, PPC2_VSX),
+GEN_XX2FORM(xvrspic, 0x16, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspim, 0x12, 0x0B, PPC2_VSX),
+GEN_XX2FORM(xvrspip, 0x12, 0x0A, PPC2_VSX),
+GEN_XX2FORM(xvrspiz, 0x12, 0x09, PPC2_VSX),
+GEN_XX2FORM_EO(xxbrh, 0x16, 0x1D, 0x07, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrw, 0x16, 0x1D, 0x0F, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrd, 0x16, 0x1D, 0x17, PPC2_ISA300),
+GEN_XX2FORM_EO(xxbrq, 0x16, 0x1D, 0x1F, PPC2_ISA300),
+
+#define VSX_LOGICAL(name, opc2, opc3, fl2) \
+GEN_XX3FORM(name, opc2, opc3, fl2)
+
+VSX_LOGICAL(xxland, 0x8, 0x10, PPC2_VSX),
+VSX_LOGICAL(xxlandc, 0x8, 0x11, PPC2_VSX),
+VSX_LOGICAL(xxlor, 0x8, 0x12, PPC2_VSX),
+VSX_LOGICAL(xxlxor, 0x8, 0x13, PPC2_VSX),
+VSX_LOGICAL(xxlnor, 0x8, 0x14, PPC2_VSX),
+VSX_LOGICAL(xxleqv, 0x8, 0x17, PPC2_VSX207),
+VSX_LOGICAL(xxlnand, 0x8, 0x16, PPC2_VSX207),
+VSX_LOGICAL(xxlorc, 0x8, 0x15, PPC2_VSX207),
+GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX),
+GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX),
+GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX),
+GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300),
+GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00),
+
+#define GEN_XXSEL_ROW(opc3) \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x18, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x19, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1A, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1B, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1C, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1D, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1E, opc3, 0, PPC_NONE, PPC2_VSX), \
+GEN_HANDLER2_E(xxsel, "xxsel", 0x3C, 0x1F, opc3, 0, PPC_NONE, PPC2_VSX), \
+
+GEN_XXSEL_ROW(0x00)
+GEN_XXSEL_ROW(0x01)
+GEN_XXSEL_ROW(0x02)
+GEN_XXSEL_ROW(0x03)
+GEN_XXSEL_ROW(0x04)
+GEN_XXSEL_ROW(0x05)
+GEN_XXSEL_ROW(0x06)
+GEN_XXSEL_ROW(0x07)
+GEN_XXSEL_ROW(0x08)
+GEN_XXSEL_ROW(0x09)
+GEN_XXSEL_ROW(0x0A)
+GEN_XXSEL_ROW(0x0B)
+GEN_XXSEL_ROW(0x0C)
+GEN_XXSEL_ROW(0x0D)
+GEN_XXSEL_ROW(0x0E)
+GEN_XXSEL_ROW(0x0F)
+GEN_XXSEL_ROW(0x10)
+GEN_XXSEL_ROW(0x11)
+GEN_XXSEL_ROW(0x12)
+GEN_XXSEL_ROW(0x13)
+GEN_XXSEL_ROW(0x14)
+GEN_XXSEL_ROW(0x15)
+GEN_XXSEL_ROW(0x16)
+GEN_XXSEL_ROW(0x17)
+GEN_XXSEL_ROW(0x18)
+GEN_XXSEL_ROW(0x19)
+GEN_XXSEL_ROW(0x1A)
+GEN_XXSEL_ROW(0x1B)
+GEN_XXSEL_ROW(0x1C)
+GEN_XXSEL_ROW(0x1D)
+GEN_XXSEL_ROW(0x1E)
+GEN_XXSEL_ROW(0x1F)
+
+GEN_XX3FORM_DM(xxpermdi, 0x08, 0x01),
diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c
new file mode 100644
index 0000000000..626e03186c
--- /dev/null
+++ b/target/ppc/translate_init.c
@@ -0,0 +1,10601 @@
+/*
+ * PowerPC CPU initialization for qemu.
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright 2011 Freescale Semiconductor, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "disas/bfd.h"
+#include "exec/gdbstub.h"
+#include "sysemu/kvm.h"
+#include "kvm_ppc.h"
+#include "sysemu/arch_init.h"
+#include "sysemu/cpus.h"
+#include "cpu-models.h"
+#include "mmu-hash32.h"
+#include "mmu-hash64.h"
+#include "qemu/error-report.h"
+#include "qapi/visitor.h"
+#include "hw/qdev-properties.h"
+#include "hw/ppc/ppc.h"
+
+//#define PPC_DUMP_CPU
+//#define PPC_DEBUG_SPR
+//#define PPC_DUMP_SPR_ACCESSES
+/* #define USE_APPLE_GDB */
+
+/* Generic callbacks:
+ * do nothing but store/retrieve spr value
+ */
+static void spr_load_dump_spr(int sprn)
+{
+#ifdef PPC_DUMP_SPR_ACCESSES
+ TCGv_i32 t0 = tcg_const_i32(sprn);
+ gen_helper_load_dump_spr(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+#endif
+}
+
+static void spr_read_generic (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_load_spr(cpu_gpr[gprn], sprn);
+ spr_load_dump_spr(sprn);
+}
+
+static void spr_store_dump_spr(int sprn)
+{
+#ifdef PPC_DUMP_SPR_ACCESSES
+ TCGv_i32 t0 = tcg_const_i32(sprn);
+ gen_helper_store_dump_spr(cpu_env, t0);
+ tcg_temp_free_i32(t0);
+#endif
+}
+
+static void spr_write_generic (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_store_spr(sprn, cpu_gpr[gprn]);
+ spr_store_dump_spr(sprn);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_generic32(DisasContext *ctx, int sprn, int gprn)
+{
+#ifdef TARGET_PPC64
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ spr_store_dump_spr(sprn);
+#else
+ spr_write_generic(ctx, sprn, gprn);
+#endif
+}
+
+static void spr_write_clear (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ gen_load_spr(t0, sprn);
+ tcg_gen_neg_tl(t1, cpu_gpr[gprn]);
+ tcg_gen_and_tl(t0, t0, t1);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+}
+
+static void spr_access_nop(DisasContext *ctx, int sprn, int gprn)
+{
+}
+
+#endif
+
+/* SPR common to all PowerPC */
+/* XER */
+static void spr_read_xer (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_read_xer(cpu_gpr[gprn]);
+}
+
+static void spr_write_xer (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_write_xer(cpu_gpr[gprn]);
+}
+
+/* LR */
+static void spr_read_lr (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr);
+}
+
+static void spr_write_lr (DisasContext *ctx, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]);
+}
+
+/* CFAR */
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+static void spr_read_cfar (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar);
+}
+
+static void spr_write_cfar (DisasContext *ctx, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]);
+}
+#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */
+
+/* CTR */
+static void spr_read_ctr (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr);
+}
+
+static void spr_write_ctr (DisasContext *ctx, int sprn, int gprn)
+{
+ tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]);
+}
+
+/* User read access to SPR */
+/* USPRx */
+/* UMMCRx */
+/* UPMCx */
+/* USIA */
+/* UDECR */
+static void spr_read_ureg (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_load_spr(cpu_gpr[gprn], sprn + 0x10);
+}
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+static void spr_write_ureg(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_store_spr(sprn + 0x10, cpu_gpr[gprn]);
+}
+#endif
+
+/* SPR common to all non-embedded PowerPC */
+/* DECR */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_decr (DisasContext *ctx, int gprn, int sprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_load_decr(cpu_gpr[gprn], cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+static void spr_write_decr (DisasContext *ctx, int sprn, int gprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_store_decr(cpu_env, cpu_gpr[gprn]);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+#endif
+
+/* SPR common to all non-embedded PowerPC, except 601 */
+/* Time base */
+static void spr_read_tbl (DisasContext *ctx, int gprn, int sprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_load_tbl(cpu_gpr[gprn], cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+static void spr_read_tbu (DisasContext *ctx, int gprn, int sprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_load_tbu(cpu_gpr[gprn], cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+__attribute__ (( unused ))
+static void spr_read_atbl (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_atbl(cpu_gpr[gprn], cpu_env);
+}
+
+__attribute__ (( unused ))
+static void spr_read_atbu (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_atbu(cpu_gpr[gprn], cpu_env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_tbl (DisasContext *ctx, int sprn, int gprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+static void spr_write_tbu (DisasContext *ctx, int sprn, int gprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+__attribute__ (( unused ))
+static void spr_write_atbl (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]);
+}
+
+__attribute__ (( unused ))
+static void spr_write_atbu (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]);
+}
+
+#if defined(TARGET_PPC64)
+__attribute__ (( unused ))
+static void spr_read_purr (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_purr(cpu_gpr[gprn], cpu_env);
+}
+
+/* HDECR */
+static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn)
+{
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]);
+ if (ctx->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_stop_exception(ctx);
+ }
+}
+
+#endif
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/* IBAT0U...IBAT0U */
+/* IBAT0L...IBAT7L */
+static void spr_read_ibat (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+}
+
+static void spr_read_ibat_h (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4]));
+}
+
+static void spr_write_ibatu (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_ibatu_h (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4);
+ gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_ibatl (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2);
+ gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_ibatl_h (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4);
+ gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+/* DBAT0U...DBAT7U */
+/* DBAT0L...DBAT7L */
+static void spr_read_dbat (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2]));
+}
+
+static void spr_read_dbat_h (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4]));
+}
+
+static void spr_write_dbatu (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2);
+ gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_dbatu_h (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4);
+ gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_dbatl (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2);
+ gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_dbatl_h (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4);
+ gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+/* SDR1 */
+static void spr_write_sdr1 (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]);
+}
+
+/* 64 bits PowerPC specific SPRs */
+#if defined(TARGET_PPC64)
+static void spr_read_hior (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix));
+}
+
+static void spr_write_hior (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ tcg_temp_free(t0);
+}
+#endif
+#endif
+
+/* PowerPC 601 specific registers */
+/* RTC */
+static void spr_read_601_rtcl (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_601_rtcl(cpu_gpr[gprn], cpu_env);
+}
+
+static void spr_read_601_rtcu (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_601_rtcu(cpu_gpr[gprn], cpu_env);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_601_rtcu (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_601_rtcu(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_601_rtcl (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_601_rtcl(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_hid0_601 (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_hid0_601(cpu_env, cpu_gpr[gprn]);
+ /* Must stop the translation as endianness may have changed */
+ gen_stop_exception(ctx);
+}
+#endif
+
+/* Unified bats */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_601_ubat (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2]));
+}
+
+static void spr_write_601_ubatu (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_601_batl(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_601_ubatl (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2);
+ gen_helper_store_601_batu(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+#endif
+
+/* PowerPC 40x specific registers */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_40x_pit (DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env);
+}
+
+static void spr_write_40x_pit (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_40x_dbcr0 (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]);
+ /* We must stop translation as we may have rebooted */
+ gen_stop_exception(ctx);
+}
+
+static void spr_write_40x_sler (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_booke_tcr (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_booke_tsr (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]);
+}
+#endif
+
+/* PowerPC 403 specific registers */
+/* PBL1 / PBU1 / PBL2 / PBU2 */
+#if !defined(CONFIG_USER_ONLY)
+static void spr_read_403_pbr (DisasContext *ctx, int gprn, int sprn)
+{
+ tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1]));
+}
+
+static void spr_write_403_pbr (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(sprn - SPR_403_PBL1);
+ gen_helper_store_403_pbr(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_pir (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF);
+ gen_store_spr(SPR_PIR, t0);
+ tcg_temp_free(t0);
+}
+#endif
+
+/* SPE specific registers */
+static void spr_read_spefscr (DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0);
+ tcg_temp_free_i32(t0);
+}
+
+static void spr_write_spefscr (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]);
+ tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr));
+ tcg_temp_free_i32(t0);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* Callback used to write the exception vector base */
+static void spr_write_excp_prefix (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+static void spr_write_excp_vector (DisasContext *ctx, int sprn, int gprn)
+{
+ int sprn_offs;
+
+ if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) {
+ sprn_offs = sprn - SPR_BOOKE_IVOR0;
+ } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) {
+ sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32;
+ } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) {
+ sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38;
+ } else {
+ printf("Trying to write an unknown exception vector %d %03x\n",
+ sprn, sprn);
+ gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG);
+ return;
+ }
+
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask));
+ tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]);
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs]));
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+#endif
+
+static inline void vscr_init (CPUPPCState *env, uint32_t val)
+{
+ env->vscr = val;
+ /* Altivec always uses round-to-nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->vec_status);
+ set_flush_to_zero(vscr_nj, &env->vec_status);
+}
+
+#ifdef CONFIG_USER_ONLY
+#define spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, initial_value)
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, initial_value)
+#else
+#if !defined(CONFIG_KVM)
+#define spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, oea_read, oea_write, initial_value)
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, initial_value)
+#else
+#define spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, oea_read, oea_write, \
+ one_reg_id, initial_value)
+#define spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value) \
+ _spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ one_reg_id, initial_value)
+#endif
+#endif
+
+#define spr_register(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, initial_value) \
+ spr_register_kvm(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, 0, initial_value)
+
+#define spr_register_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ initial_value) \
+ spr_register_kvm_hv(env, num, name, uea_read, uea_write, \
+ oea_read, oea_write, hea_read, hea_write, \
+ 0, initial_value)
+
+static inline void _spr_register(CPUPPCState *env, int num,
+ const char *name,
+ void (*uea_read)(DisasContext *ctx, int gprn, int sprn),
+ void (*uea_write)(DisasContext *ctx, int sprn, int gprn),
+#if !defined(CONFIG_USER_ONLY)
+
+ void (*oea_read)(DisasContext *ctx, int gprn, int sprn),
+ void (*oea_write)(DisasContext *ctx, int sprn, int gprn),
+ void (*hea_read)(DisasContext *opaque, int gprn, int sprn),
+ void (*hea_write)(DisasContext *opaque, int sprn, int gprn),
+#endif
+#if defined(CONFIG_KVM)
+ uint64_t one_reg_id,
+#endif
+ target_ulong initial_value)
+{
+ ppc_spr_t *spr;
+
+ spr = &env->spr_cb[num];
+ if (spr->name != NULL ||env-> spr[num] != 0x00000000 ||
+#if !defined(CONFIG_USER_ONLY)
+ spr->oea_read != NULL || spr->oea_write != NULL ||
+#endif
+ spr->uea_read != NULL || spr->uea_write != NULL) {
+ printf("Error: Trying to register SPR %d (%03x) twice !\n", num, num);
+ exit(1);
+ }
+#if defined(PPC_DEBUG_SPR)
+ printf("*** register spr %d (%03x) %s val " TARGET_FMT_lx "\n", num, num,
+ name, initial_value);
+#endif
+ spr->name = name;
+ spr->uea_read = uea_read;
+ spr->uea_write = uea_write;
+#if !defined(CONFIG_USER_ONLY)
+ spr->oea_read = oea_read;
+ spr->oea_write = oea_write;
+ spr->hea_read = hea_read;
+ spr->hea_write = hea_write;
+#endif
+#if defined(CONFIG_KVM)
+ spr->one_reg_id = one_reg_id,
+#endif
+ env->spr[num] = spr->default_value = initial_value;
+}
+
+/* Generic PowerPC SPRs */
+static void gen_spr_generic (CPUPPCState *env)
+{
+ /* Integer processing */
+ spr_register(env, SPR_XER, "XER",
+ &spr_read_xer, &spr_write_xer,
+ &spr_read_xer, &spr_write_xer,
+ 0x00000000);
+ /* Branch contol */
+ spr_register(env, SPR_LR, "LR",
+ &spr_read_lr, &spr_write_lr,
+ &spr_read_lr, &spr_write_lr,
+ 0x00000000);
+ spr_register(env, SPR_CTR, "CTR",
+ &spr_read_ctr, &spr_write_ctr,
+ &spr_read_ctr, &spr_write_ctr,
+ 0x00000000);
+ /* Interrupt processing */
+ spr_register(env, SPR_SRR0, "SRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SRR1, "SRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_SPRG0, "SPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG1, "SPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG2, "SPRG2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG3, "SPRG3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR common to all non-embedded PowerPC, including 601 */
+static void gen_spr_ne_601 (CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register_kvm(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DSISR, 0x00000000);
+ spr_register_kvm(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAR, 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ /* Memory management */
+ spr_register(env, SPR_SDR1, "SDR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_sdr1,
+ 0x00000000);
+}
+
+/* BATs 0-3 */
+static void gen_low_BATs (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat, &spr_write_ibatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0U, "DBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT0L, "DBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1U, "DBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT1L, "DBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2U, "DBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT2L, "DBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3U, "DBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatu,
+ 0x00000000);
+ spr_register(env, SPR_DBAT3L, "DBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat, &spr_write_dbatl,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* BATs 4-7 */
+static void gen_high_BATs (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT4U, "IBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT4L, "IBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5U, "IBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT5L, "IBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6U, "IBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT6L, "IBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7U, "IBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatu_h,
+ 0x00000000);
+ spr_register(env, SPR_IBAT7L, "IBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_ibat_h, &spr_write_ibatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4U, "DBAT4U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT4L, "DBAT4L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5U, "DBAT5U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT5L, "DBAT5L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6U, "DBAT6U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT6L, "DBAT6L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7U, "DBAT7U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatu_h,
+ 0x00000000);
+ spr_register(env, SPR_DBAT7L, "DBAT7L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_dbat_h, &spr_write_dbatl_h,
+ 0x00000000);
+ env->nb_BATs += 4;
+#endif
+}
+
+/* Generic PowerPC time base */
+static void gen_tbl (CPUPPCState *env)
+{
+ spr_register(env, SPR_VTBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_VTBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_TBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, &spr_write_tbu,
+ 0x00000000);
+}
+
+/* Softare table search registers */
+static void gen_6xx_7xx_soft_tlb (CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ env->tlb_type = TLB_6XX;
+ spr_register(env, SPR_DMISS, "DMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_DCMP, "DCMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH1, "HASH1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_HASH2, "HASH2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_IMISS, "IMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_ICMP, "ICMP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_RPA, "RPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+/* SPR common to MPC755 and G2 */
+static void gen_spr_G2_755 (CPUPPCState *env)
+{
+ /* SGPRs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR common to all 7xx PowerPC implementations */
+static void gen_spr_7xx (CPUPPCState *env)
+{
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Cache management */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTC, "ICTC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Performance monitors */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UMMCR0, "UMMCR0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UMMCR1, "UMMCR1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC1, "UPMC1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC2, "UPMC2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC3, "UPMC3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC4, "UPMC4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_USIAR, "USIAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+#ifdef TARGET_PPC64
+#ifndef CONFIG_USER_ONLY
+static void spr_write_amr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /* Note, the HV=1 PR=0 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ if (ctx->pr) {
+ gen_load_spr(t1, SPR_UAMOR);
+ } else {
+ gen_load_spr(t1, SPR_AMOR);
+ }
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_AMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_AMR, t0);
+ spr_store_dump_spr(SPR_AMR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void spr_write_uamor(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /* Note, the HV=1 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ gen_load_spr(t1, SPR_AMOR);
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_UAMOR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_UAMOR, t0);
+ spr_store_dump_spr(SPR_UAMOR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void spr_write_iamr(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ /* Note, the HV=1 case is handled earlier by simply using
+ * spr_write_generic for HV mode in the SPR table
+ */
+
+ /* Build insertion mask into t1 based on context */
+ gen_load_spr(t1, SPR_AMOR);
+
+ /* Mask new bits into t2 */
+ tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]);
+
+ /* Load AMR and clear new bits in t0 */
+ gen_load_spr(t0, SPR_IAMR);
+ tcg_gen_andc_tl(t0, t0, t1);
+
+ /* Or'in new bits and write it out */
+ tcg_gen_or_tl(t0, t0, t2);
+ gen_store_spr(SPR_IAMR, t0);
+ spr_store_dump_spr(SPR_IAMR);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+#endif /* CONFIG_USER_ONLY */
+
+static void gen_spr_amr(CPUPPCState *env, bool has_iamr)
+{
+#ifndef CONFIG_USER_ONLY
+ /* Virtual Page Class Key protection */
+ /* The AMR is accessible either via SPR 13 or SPR 29. 13 is
+ * userspace accessible, 29 is privileged. So we only need to set
+ * the kvm ONE_REG id on one of them, we use 29 */
+ spr_register(env, SPR_UAMR, "UAMR",
+ &spr_read_generic, &spr_write_amr,
+ &spr_read_generic, &spr_write_amr,
+ 0);
+ spr_register_kvm_hv(env, SPR_AMR, "AMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_amr,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_AMR, 0);
+ spr_register_kvm_hv(env, SPR_UAMOR, "UAMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_uamor,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_UAMOR, 0);
+ spr_register_hv(env, SPR_AMOR, "AMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+ if (has_iamr) {
+ spr_register_kvm_hv(env, SPR_IAMR, "IAMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_iamr,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_IAMR, 0);
+ }
+#endif /* !CONFIG_USER_ONLY */
+}
+#endif /* TARGET_PPC64 */
+
+#ifndef CONFIG_USER_ONLY
+static void spr_read_thrm(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_helper_fixup_thrm(cpu_env);
+ gen_load_spr(cpu_gpr[gprn], sprn);
+ spr_load_dump_spr(sprn);
+}
+#endif /* !CONFIG_USER_ONLY */
+
+static void gen_spr_thrm (CPUPPCState *env)
+{
+ /* Thermal management */
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_thrm, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 604 implementation */
+static void gen_spr_604 (CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+ /* Performance counters */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 603 implementation */
+static void gen_spr_603 (CPUPPCState *env)
+{
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+}
+
+/* SPR specific to PowerPC G2 implementation */
+static void gen_spr_G2 (CPUPPCState *env)
+{
+ /* Memory base address */
+ /* MBAR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MBAR, "MBAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Exception processing */
+ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_CSRR1, "CSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR2, "DABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR2, "IABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IBCR, "IBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DBCR, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 602 implementation */
+static void gen_spr_602 (CPUPPCState *env)
+{
+ /* ESA registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_SER, "SER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_SEBR, "SEBR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_ESASRR, "ESASRR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Floating point status */
+ /* XXX : not implemented */
+ spr_register(env, SPR_SP, "SP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_LT, "LT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Watchdog timer */
+ /* XXX : not implemented */
+ spr_register(env, SPR_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Interrupt base */
+ spr_register(env, SPR_IBR, "IBR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 601 implementation */
+static void gen_spr_601 (CPUPPCState *env)
+{
+ /* Multiplication/division register */
+ /* MQ */
+ spr_register(env, SPR_MQ, "MQ",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* RTC registers */
+ spr_register(env, SPR_601_RTCU, "RTCU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_601_rtcu,
+ 0x00000000);
+ spr_register(env, SPR_601_VRTCU, "RTCU",
+ &spr_read_601_rtcu, SPR_NOACCESS,
+ &spr_read_601_rtcu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_601_RTCL, "RTCL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_601_rtcl,
+ 0x00000000);
+ spr_register(env, SPR_601_VRTCL, "RTCL",
+ &spr_read_601_rtcl, SPR_NOACCESS,
+ &spr_read_601_rtcl, SPR_NOACCESS,
+ 0x00000000);
+ /* Timer */
+#if 0 /* ? */
+ spr_register(env, SPR_601_UDECR, "UDECR",
+ &spr_read_decr, SPR_NOACCESS,
+ &spr_read_decr, SPR_NOACCESS,
+ 0x00000000);
+#endif
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_IBAT0U, "IBAT0U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT0L, "IBAT0L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1U, "IBAT1U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT1L, "IBAT1L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2U, "IBAT2U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT2L, "IBAT2L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3U, "IBAT3U",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatu,
+ 0x00000000);
+ spr_register(env, SPR_IBAT3L, "IBAT3L",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_601_ubat, &spr_write_601_ubatl,
+ 0x00000000);
+ env->nb_BATs = 4;
+#endif
+}
+
+static void gen_spr_74xx (CPUPPCState *env)
+{
+ /* Processor identification */
+ spr_register(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_74XX_MMCR2, "MMCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_74XX_UMMCR2, "UMMCR2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX: not implemented */
+ spr_register(env, SPR_BAMR, "BAMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSCR0, "MSSCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Altivec */
+ spr_register(env, SPR_VRSAVE, "VRSAVE",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Not strictly an SPR */
+ vscr_init(env, 0x00010000);
+}
+
+static void gen_l3_ctrl (CPUPPCState *env)
+{
+ /* L3CR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3CR, "L3CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR0, "L3ITCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3PM */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3PM, "L3PM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_74xx_soft_tlb (CPUPPCState *env, int nb_tlbs, int nb_ways)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = nb_tlbs;
+ env->nb_ways = nb_ways;
+ env->id_tlbs = 1;
+ env->tlb_type = TLB_6XX;
+ /* XXX : not implemented */
+ spr_register(env, SPR_PTEHI, "PTEHI",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_PTELO, "PTELO",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_TLBMISS, "TLBMISS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_e500_l1csr0 (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+static void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE);
+ gen_store_spr(sprn, t0);
+ tcg_temp_free(t0);
+}
+
+static void spr_write_booke206_mmucsr0 (DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_booke_pid (DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv_i32 t0 = tcg_const_i32(sprn);
+ gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]);
+ tcg_temp_free_i32(t0);
+}
+#endif
+
+static void gen_spr_usprgh (CPUPPCState *env)
+{
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
+
+/* PowerPC BookE SPR */
+static void gen_spr_BookE (CPUPPCState *env, uint64_t ivor_mask)
+{
+ const char *ivor_names[64] = {
+ "IVOR0", "IVOR1", "IVOR2", "IVOR3",
+ "IVOR4", "IVOR5", "IVOR6", "IVOR7",
+ "IVOR8", "IVOR9", "IVOR10", "IVOR11",
+ "IVOR12", "IVOR13", "IVOR14", "IVOR15",
+ "IVOR16", "IVOR17", "IVOR18", "IVOR19",
+ "IVOR20", "IVOR21", "IVOR22", "IVOR23",
+ "IVOR24", "IVOR25", "IVOR26", "IVOR27",
+ "IVOR28", "IVOR29", "IVOR30", "IVOR31",
+ "IVOR32", "IVOR33", "IVOR34", "IVOR35",
+ "IVOR36", "IVOR37", "IVOR38", "IVOR39",
+ "IVOR40", "IVOR41", "IVOR42", "IVOR43",
+ "IVOR44", "IVOR45", "IVOR46", "IVOR47",
+ "IVOR48", "IVOR49", "IVOR50", "IVOR51",
+ "IVOR52", "IVOR53", "IVOR54", "IVOR55",
+ "IVOR56", "IVOR57", "IVOR58", "IVOR59",
+ "IVOR60", "IVOR61", "IVOR62", "IVOR63",
+ };
+#define SPR_BOOKE_IVORxx (-1)
+ int ivor_sprn[64] = {
+ SPR_BOOKE_IVOR0, SPR_BOOKE_IVOR1, SPR_BOOKE_IVOR2, SPR_BOOKE_IVOR3,
+ SPR_BOOKE_IVOR4, SPR_BOOKE_IVOR5, SPR_BOOKE_IVOR6, SPR_BOOKE_IVOR7,
+ SPR_BOOKE_IVOR8, SPR_BOOKE_IVOR9, SPR_BOOKE_IVOR10, SPR_BOOKE_IVOR11,
+ SPR_BOOKE_IVOR12, SPR_BOOKE_IVOR13, SPR_BOOKE_IVOR14, SPR_BOOKE_IVOR15,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVOR32, SPR_BOOKE_IVOR33, SPR_BOOKE_IVOR34, SPR_BOOKE_IVOR35,
+ SPR_BOOKE_IVOR36, SPR_BOOKE_IVOR37, SPR_BOOKE_IVOR38, SPR_BOOKE_IVOR39,
+ SPR_BOOKE_IVOR40, SPR_BOOKE_IVOR41, SPR_BOOKE_IVOR42, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx, SPR_BOOKE_IVORxx,
+ };
+ int i;
+
+ /* Interrupt processing */
+ spr_register(env, SPR_BOOKE_CSRR0, "CSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_CSRR1, "CSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Debug */
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR1, "DBCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBCR2, "DBCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DEAR, "DEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_ESR, "ESR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_IVPR, "IVPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_prefix,
+ 0x00000000);
+ /* Exception vectors */
+ for (i = 0; i < 64; i++) {
+ if (ivor_mask & (1ULL << i)) {
+ if (ivor_sprn[i] == SPR_BOOKE_IVORxx) {
+ fprintf(stderr, "ERROR: IVOR %d SPR is not defined\n", i);
+ exit(1);
+ }
+ spr_register(env, ivor_sprn[i], ivor_names[i],
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_vector,
+ 0x00000000);
+ }
+ }
+ spr_register(env, SPR_BOOKE_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_pid,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tcr,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_TSR, "TSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tsr,
+ 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DECAR, "DECAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_USPRG0, "USPRG0",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static inline uint32_t gen_tlbncfg(uint32_t assoc, uint32_t minsize,
+ uint32_t maxsize, uint32_t flags,
+ uint32_t nentries)
+{
+ return (assoc << TLBnCFG_ASSOC_SHIFT) |
+ (minsize << TLBnCFG_MINSIZE_SHIFT) |
+ (maxsize << TLBnCFG_MAXSIZE_SHIFT) |
+ flags | nentries;
+}
+
+/* BookE 2.06 storage control registers */
+static void gen_spr_BookE206(CPUPPCState *env, uint32_t mas_mask,
+ uint32_t *tlbncfg)
+{
+#if !defined(CONFIG_USER_ONLY)
+ const char *mas_names[8] = {
+ "MAS0", "MAS1", "MAS2", "MAS3", "MAS4", "MAS5", "MAS6", "MAS7",
+ };
+ int mas_sprn[8] = {
+ SPR_BOOKE_MAS0, SPR_BOOKE_MAS1, SPR_BOOKE_MAS2, SPR_BOOKE_MAS3,
+ SPR_BOOKE_MAS4, SPR_BOOKE_MAS5, SPR_BOOKE_MAS6, SPR_BOOKE_MAS7,
+ };
+ int i;
+
+ /* TLB assist registers */
+ /* XXX : not implemented */
+ for (i = 0; i < 8; i++) {
+ void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = &spr_write_generic32;
+ if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) {
+ uea_write = &spr_write_generic;
+ }
+ if (mas_mask & (1 << i)) {
+ spr_register(env, mas_sprn[i], mas_names[i],
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, uea_write,
+ 0x00000000);
+ }
+ }
+ if (env->nb_pids > 1) {
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_PID1, "PID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_pid,
+ 0x00000000);
+ }
+ if (env->nb_pids > 2) {
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_PID2, "PID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_pid,
+ 0x00000000);
+ }
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCFG, "MMUCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000); /* TOFIX */
+ switch (env->nb_ways) {
+ case 4:
+ spr_register(env, SPR_BOOKE_TLB3CFG, "TLB3CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[3]);
+ /* Fallthru */
+ case 3:
+ spr_register(env, SPR_BOOKE_TLB2CFG, "TLB2CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[2]);
+ /* Fallthru */
+ case 2:
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[1]);
+ /* Fallthru */
+ case 1:
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ tlbncfg[0]);
+ /* Fallthru */
+ case 0:
+ default:
+ break;
+ }
+#endif
+
+ gen_spr_usprgh(env);
+}
+
+/* SPR specific to PowerPC 440 implementation */
+static void gen_spr_440 (CPUPPCState *env)
+{
+ /* Cache control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV0, "DNV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV1, "DNV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV2, "DNV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DNV3, "DNV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV0, "DTV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV1, "DTV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV2, "DTV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DTV3, "DTV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DVLIM, "DVLIM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV0, "INV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV1, "INV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV2, "INV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_INV3, "INV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV0, "ITV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV1, "ITV1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV2, "ITV2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_ITV3, "ITV3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_IVLIM, "IVLIM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Cache debug */
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DCDBTRH, "DCDBTRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DCDBTRL, "DCDBTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBTRH, "ICDBTRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_ICDBTRL, "ICDBTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_DBDR, "DBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Processor control */
+ spr_register(env, SPR_4xx_CCR0, "CCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_440_RSTCFG, "RSTCFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* Storage control */
+ spr_register(env, SPR_440_MMUCR, "MMUCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR shared between PowerPC 40x implementations */
+static void gen_spr_40x (CPUPPCState *env)
+{
+ /* Cache */
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCCR, "DCCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_ICCR, "ICCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_BOOKE_ICDBDR, "ICDBDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* Exception */
+ spr_register(env, SPR_40x_DEAR, "DEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ESR, "ESR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_EVPR, "EVPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_excp_prefix,
+ 0x00000000);
+ spr_register(env, SPR_40x_SRR2, "SRR2",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_SRR3, "SRR3",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Timers */
+ spr_register(env, SPR_40x_PIT, "PIT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_40x_pit, &spr_write_40x_pit,
+ 0x00000000);
+ spr_register(env, SPR_40x_TCR, "TCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tcr,
+ 0x00000000);
+ spr_register(env, SPR_40x_TSR, "TSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke_tsr,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 405 implementation */
+static void gen_spr_405 (CPUPPCState *env)
+{
+ /* MMU */
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_4xx_CCR0, "CCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00700000);
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DBCR1, "DBCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Storage control */
+ /* XXX: TODO: not implemented */
+ spr_register(env, SPR_405_SLER, "SLER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_sler,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_405_SU0R, "SU0R",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* SPRG */
+ spr_register(env, SPR_USPRG0, "USPRG0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ gen_spr_usprgh(env);
+}
+
+/* SPR shared between PowerPC 401 & 403 implementations */
+static void gen_spr_401_403 (CPUPPCState *env)
+{
+ /* Time base */
+ spr_register(env, SPR_403_VTBL, "TBL",
+ &spr_read_tbl, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_403_TBL, "TBL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbl,
+ 0x00000000);
+ spr_register(env, SPR_403_VTBU, "TBU",
+ &spr_read_tbu, SPR_NOACCESS,
+ &spr_read_tbu, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_403_TBU, "TBU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_tbu,
+ 0x00000000);
+ /* Debug */
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_403_CDBCR, "CDBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 401 implementation */
+static void gen_spr_401 (CPUPPCState *env)
+{
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Storage control */
+ /* XXX: TODO: not implemented */
+ spr_register(env, SPR_405_SLER, "SLER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_sler,
+ 0x00000000);
+ /* not emulated, as QEMU never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_401x2 (CPUPPCState *env)
+{
+ gen_spr_401(env);
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC 403 implementation */
+static void gen_spr_403 (CPUPPCState *env)
+{
+ /* Debug interface */
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBCR0, "DBCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_40x_dbcr0,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DBSR, "DBSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ /* Last reset was system reset */
+ 0x00000300);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC1, "DAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_DAC2, "DAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC1, "IAC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_40x_IAC2, "IAC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_403_real (CPUPPCState *env)
+{
+ spr_register(env, SPR_403_PBL1, "PBL1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBU1, "PBU1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBL2, "PBL2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+ spr_register(env, SPR_403_PBU2, "PBU2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_403_pbr, &spr_write_403_pbr,
+ 0x00000000);
+}
+
+static void gen_spr_403_mmu (CPUPPCState *env)
+{
+ /* MMU */
+ spr_register(env, SPR_40x_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_40x_ZPR, "ZPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+/* SPR specific to PowerPC compression coprocessor extension */
+static void gen_spr_compress (CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_401_SKR, "SKR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_5xx_8xx (CPUPPCState *env)
+{
+ /* Exception processing */
+ spr_register_kvm(env, SPR_DSISR, "DSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DSISR, 0x00000000);
+ spr_register_kvm(env, SPR_DAR, "DAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAR, 0x00000000);
+ /* Timer */
+ spr_register(env, SPR_DECR, "DECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_decr, &spr_write_decr,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_EIE, "EIE",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_EID, "EID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_NRI, "NRI",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPA, "CMPA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPB, "CMPB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPC, "CMPC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPD, "CMPD",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_ECR, "ECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DER, "DER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_COUNTA, "COUNTA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_COUNTB, "COUNTB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPE, "CMPE",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPF, "CMPF",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPG, "CMPG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_CMPH, "CMPH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_LCTRL1, "LCTRL1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_LCTRL2, "LCTRL2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_BAR, "BAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DPDR, "DPDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IMMR, "IMMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_5xx (CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_GRA, "MI_GRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_GRA, "L2U_GRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RPCU_BBCMCR, "L2U_BBCMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_MCR, "L2U_MCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA0, "MI_RBA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA1, "MI_RBA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA2, "MI_RBA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RBA3, "MI_RBA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA0, "L2U_RBA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA1, "L2U_RBA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA2, "L2U_RBA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RBA3, "L2U_RBA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA0, "MI_RA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA1, "MI_RA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA2, "MI_RA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_MI_RA3, "MI_RA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA0, "L2U_RA0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA1, "L2U_RA1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA2, "L2U_RA2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_L2U_RA3, "L2U_RA3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_RCPU_FPECR, "FPECR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_8xx (CPUPPCState *env)
+{
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_CST, "IC_CST",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_ADR, "IC_ADR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_IC_DAT, "IC_DAT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_CST, "DC_CST",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_ADR, "DC_ADR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_DC_DAT, "DC_DAT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_CTR, "MI_CTR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_AP, "MI_AP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_EPN, "MI_EPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_TWC, "MI_TWC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_RPN, "MI_RPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBCAM, "MI_DBCAM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBRAM0, "MI_DBRAM0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MI_DBRAM1, "MI_DBRAM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_CTR, "MD_CTR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_CASID, "MD_CASID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_AP, "MD_AP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_EPN, "MD_EPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TWB, "MD_TWB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TWC, "MD_TWC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_RPN, "MD_RPN",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_TW, "MD_TW",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBCAM, "MD_DBCAM",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBRAM0, "MD_DBRAM0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MPC_MD_DBRAM1, "MD_DBRAM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+// XXX: TODO
+/*
+ * AMR => SPR 29 (Power 2.04)
+ * CTRL => SPR 136 (Power 2.04)
+ * CTRL => SPR 152 (Power 2.04)
+ * SCOMC => SPR 276 (64 bits ?)
+ * SCOMD => SPR 277 (64 bits ?)
+ * TBU40 => SPR 286 (Power 2.04 hypv)
+ * HSPRG0 => SPR 304 (Power 2.04 hypv)
+ * HSPRG1 => SPR 305 (Power 2.04 hypv)
+ * HDSISR => SPR 306 (Power 2.04 hypv)
+ * HDAR => SPR 307 (Power 2.04 hypv)
+ * PURR => SPR 309 (Power 2.04 hypv)
+ * HDEC => SPR 310 (Power 2.04 hypv)
+ * HIOR => SPR 311 (hypv)
+ * RMOR => SPR 312 (970)
+ * HRMOR => SPR 313 (Power 2.04 hypv)
+ * HSRR0 => SPR 314 (Power 2.04 hypv)
+ * HSRR1 => SPR 315 (Power 2.04 hypv)
+ * LPIDR => SPR 317 (970)
+ * EPR => SPR 702 (Power 2.04 emb)
+ * perf => 768-783 (Power 2.04)
+ * perf => 784-799 (Power 2.04)
+ * PPR => SPR 896 (Power 2.04)
+ * EPLC => SPR 947 (Power 2.04 emb)
+ * EPSC => SPR 948 (Power 2.04 emb)
+ * DABRX => 1015 (Power 2.04 hypv)
+ * FPECR => SPR 1022 (?)
+ * ... and more (thermal management, performance counters, ...)
+ */
+
+/*****************************************************************************/
+/* Exception vectors models */
+static void init_excp_4xx_real (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_4xx_softmmu (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_PIT] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00001010;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001020;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00002000;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_MPC5xx (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00;
+ env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_MPC8xx (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_FPA] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_ITLBE] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_DTLBE] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_DABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001C00;
+ env->excp_vectors[POWERPC_EXCP_MEXTBR] = 0x00001E00;
+ env->excp_vectors[POWERPC_EXCP_NMEXTBR] = 0x00001F00;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_G2 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_e200(CPUPPCState *env, target_ulong ivpr_mask)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000FFC;
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SPEU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EFPDI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EFPRI] = 0x00000000;
+ env->ivor_mask = 0x0000FFF7UL;
+ env->ivpr_mask = ivpr_mask;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_BookE (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_CRITICAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_APU] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DTLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_ITLB] = 0x00000000;
+ env->excp_vectors[POWERPC_EXCP_DEBUG] = 0x00000000;
+ env->ivor_mask = 0x0000FFF0UL;
+ env->ivpr_mask = 0xFFFF0000UL;
+ /* Hardware reset vector */
+ env->hreset_vector = 0xFFFFFFFCUL;
+#endif
+}
+
+static void init_excp_601 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_IO] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_RUNM] = 0x00002000;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_602 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* XXX: exception prefix has a special behavior on 602 */
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_WDT] = 0x00001500;
+ env->excp_vectors[POWERPC_EXCP_EMUL] = 0x00001600;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_603 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_604 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_7x0 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_750cl (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_750cx (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+/* XXX: Check if this is correct */
+static void init_excp_7x5 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_7400 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001700;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+static void init_excp_7450 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IFTLB] = 0x00001000;
+ env->excp_vectors[POWERPC_EXCP_DLTLB] = 0x00001100;
+ env->excp_vectors[POWERPC_EXCP_DSTLB] = 0x00001200;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_SMI] = 0x00001400;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001600;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x00000100UL;
+#endif
+}
+
+#if defined (TARGET_PPC64)
+static void init_excp_970 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_IABR] = 0x00001300;
+ env->excp_vectors[POWERPC_EXCP_MAINT] = 0x00001600;
+ env->excp_vectors[POWERPC_EXCP_VPUA] = 0x00001700;
+ env->excp_vectors[POWERPC_EXCP_THERM] = 0x00001800;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x0000000000000100ULL;
+#endif
+}
+
+static void init_excp_POWER7 (CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000100;
+ env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000200;
+ env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000300;
+ env->excp_vectors[POWERPC_EXCP_DSEG] = 0x00000380;
+ env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000400;
+ env->excp_vectors[POWERPC_EXCP_ISEG] = 0x00000480;
+ env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x00000500;
+ env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x00000600;
+ env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x00000700;
+ env->excp_vectors[POWERPC_EXCP_FPU] = 0x00000800;
+ env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000900;
+ env->excp_vectors[POWERPC_EXCP_HDECR] = 0x00000980;
+ env->excp_vectors[POWERPC_EXCP_SYSCALL] = 0x00000C00;
+ env->excp_vectors[POWERPC_EXCP_TRACE] = 0x00000D00;
+ env->excp_vectors[POWERPC_EXCP_HDSI] = 0x00000E00;
+ env->excp_vectors[POWERPC_EXCP_HISI] = 0x00000E20;
+ env->excp_vectors[POWERPC_EXCP_HV_EMU] = 0x00000E40;
+ env->excp_vectors[POWERPC_EXCP_HV_MAINT] = 0x00000E60;
+ env->excp_vectors[POWERPC_EXCP_PERFM] = 0x00000F00;
+ env->excp_vectors[POWERPC_EXCP_VPU] = 0x00000F20;
+ env->excp_vectors[POWERPC_EXCP_VSXU] = 0x00000F40;
+ /* Hardware reset vector */
+ env->hreset_vector = 0x0000000000000100ULL;
+#endif
+}
+
+static void init_excp_POWER8(CPUPPCState *env)
+{
+ init_excp_POWER7(env);
+
+#if !defined(CONFIG_USER_ONLY)
+ env->excp_vectors[POWERPC_EXCP_SDOOR] = 0x00000A00;
+ env->excp_vectors[POWERPC_EXCP_FU] = 0x00000F60;
+ env->excp_vectors[POWERPC_EXCP_HV_FU] = 0x00000F80;
+ env->excp_vectors[POWERPC_EXCP_SDOOR_HV] = 0x00000E80;
+#endif
+}
+
+#endif
+
+/*****************************************************************************/
+/* Power management enable checks */
+static int check_pow_none (CPUPPCState *env)
+{
+ return 0;
+}
+
+static int check_pow_nocheck (CPUPPCState *env)
+{
+ return 1;
+}
+
+static int check_pow_hid0 (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00E00000)
+ return 1;
+
+ return 0;
+}
+
+static int check_pow_hid0_74xx (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & 0x00600000)
+ return 1;
+
+ return 0;
+}
+
+static bool ppc_cpu_interrupts_big_endian_always(PowerPCCPU *cpu)
+{
+ return true;
+}
+
+#ifdef TARGET_PPC64
+static bool ppc_cpu_interrupts_big_endian_lpcr(PowerPCCPU *cpu)
+{
+ return !(cpu->env.spr[SPR_LPCR] & LPCR_ILE);
+}
+#endif
+
+/*****************************************************************************/
+/* PowerPC implementations definitions */
+
+#define POWERPC_FAMILY(_name) \
+ static void \
+ glue(glue(ppc_, _name), _cpu_family_class_init)(ObjectClass *, void *); \
+ \
+ static const TypeInfo \
+ glue(glue(ppc_, _name), _cpu_family_type_info) = { \
+ .name = stringify(_name) "-family-" TYPE_POWERPC_CPU, \
+ .parent = TYPE_POWERPC_CPU, \
+ .abstract = true, \
+ .class_init = glue(glue(ppc_, _name), _cpu_family_class_init), \
+ }; \
+ \
+ static void glue(glue(ppc_, _name), _cpu_family_register_types)(void) \
+ { \
+ type_register_static( \
+ &glue(glue(ppc_, _name), _cpu_family_type_info)); \
+ } \
+ \
+ type_init(glue(glue(ppc_, _name), _cpu_family_register_types)) \
+ \
+ static void glue(glue(ppc_, _name), _cpu_family_class_init)
+
+static void init_proc_401 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401(env);
+ init_excp_4xx_real(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(401)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 401";
+ pcc->init_proc = init_proc_401;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_WRTEE | PPC_DCR |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_REAL;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_401x2 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401x2(env);
+ gen_spr_compress(env);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(401x2)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 401x2";
+ pcc->init_proc = init_proc_401x2;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << 20) |
+ (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_401x3 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401(env);
+ gen_spr_401x2(env);
+ gen_spr_compress(env);
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(401x3)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 401x3";
+ pcc->init_proc = init_proc_401x3;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << 20) |
+ (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_IOP480 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_401x2(env);
+ gen_spr_compress(env);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(IOP480)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "IOP480";
+ pcc->init_proc = init_proc_IOP480;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << 20) |
+ (1ull << MSR_KEY) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_403 (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_403(env);
+ gen_spr_403_real(env);
+ init_excp_4xx_real(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(403)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 403";
+ pcc->init_proc = init_proc_403;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_PE) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_REAL;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_403GCX (CPUPPCState *env)
+{
+ gen_spr_40x(env);
+ gen_spr_401_403(env);
+ gen_spr_403(env);
+ gen_spr_403_real(env);
+ gen_spr_403_mmu(env);
+ /* Bus access control */
+ /* not emulated, as QEMU never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(403GCX)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 403 GCX";
+ pcc->init_proc = init_proc_403GCX;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_PE) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx_Z;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_401;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_405 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_40x(env);
+ gen_spr_405(env);
+ /* Bus access control */
+ /* not emulated, as QEMU never does speculative access */
+ spr_register(env, SPR_40x_SGR, "SGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0xFFFFFFFF);
+ /* not emulated, as QEMU do not emulate caches */
+ spr_register(env, SPR_40x_DCWR, "DCWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_4xx_softmmu(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(8, 12, 16, 20);
+ SET_WDT_PERIOD(16, 20, 24, 28);
+}
+
+POWERPC_FAMILY(405)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 405";
+ pcc->init_proc = init_proc_405;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_40x_TLB | PPC_MEM_TLBIA | PPC_MEM_TLBSYNC |
+ PPC_4xx_COMMON | PPC_405_MAC | PPC_40x_EXCP;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_SOFT_4xx;
+ pcc->excp_model = POWERPC_EXCP_40x;
+ pcc->bus_model = PPC_FLAGS_INPUT_405;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440EP (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440 EP";
+ pcc->init_proc = init_proc_440EP;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440GP (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440 GP";
+ pcc->init_proc = init_proc_440GP;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_MFAPIDI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVA | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440x4 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440x4)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440x4";
+ pcc->init_proc = init_proc_440x4;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_440x5 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ ppc40x_irq_init(ppc_env_get_cpu(env));
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440x5";
+ pcc->init_proc = init_proc_440x5;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 440x5 with double precision FPU";
+ pcc->init_proc = init_proc_440x5;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_FLOAT | PPC_FLOAT_FSQRT |
+ PPC_FLOAT_STFIWX |
+ PPC_DCR | PPC_WRTEE | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_MFTB |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_460 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DCRIPR, "SPR_DCRIPR",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(460)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 460 (guessed)";
+ pcc->init_proc = init_proc_460;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_DCR | PPC_DCRX | PPC_DCRUX |
+ PPC_WRTEE | PPC_MFAPIDI | PPC_MFTB |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVA |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_460F (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000000000FFFFULL);
+ gen_spr_440(env);
+ gen_spr_usprgh(env);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC1, "DVC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_DVC2, "DVC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_440_CCR1, "CCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DCRIPR, "SPR_DCRIPR",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_BookE(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+
+ SET_FIT_PERIOD(12, 16, 20, 24);
+ SET_WDT_PERIOD(20, 24, 28, 32);
+}
+
+POWERPC_FAMILY(460F)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 460F (guessed)";
+ pcc->init_proc = init_proc_460F;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX | PPC_MFTB |
+ PPC_DCR | PPC_DCRX | PPC_DCRUX |
+ PPC_WRTEE | PPC_MFAPIDI |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVA |
+ PPC_BOOKE | PPC_4xx_COMMON | PPC_405_MAC |
+ PPC_440_SPEC;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_403;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_MPC5xx (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_5xx_8xx(env);
+ gen_spr_5xx(env);
+ init_excp_MPC5xx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "Freescale 5xx cores (aka RCPU)";
+ pcc->init_proc = init_proc_MPC5xx;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_MEM_EIEIO | PPC_MEM_SYNC |
+ PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX |
+ PPC_MFTB;
+ pcc->msr_mask = (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_REAL;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_RCPU;
+ pcc->bfd_mach = bfd_mach_ppc_505;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_MPC8xx (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_5xx_8xx(env);
+ gen_spr_8xx(env);
+ init_excp_MPC8xx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "Freescale 8xx cores (aka PowerQUICC)";
+ pcc->init_proc = init_proc_MPC8xx;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING |
+ PPC_MEM_EIEIO | PPC_MEM_SYNC |
+ PPC_CACHE_ICBI | PPC_MFTB;
+ pcc->msr_mask = (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_MPC8xx;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_RCPU;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+/* Freescale 82xx cores (aka PowerQUICC-II) */
+
+static void init_proc_G2 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_G2_755(env);
+ gen_spr_G2(env);
+ /* Time base */
+ gen_tbl(env);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation register */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_G2(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(G2)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC G2";
+ pcc->init_proc = init_proc_G2;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_G2;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_G2LE (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_G2_755(env);
+ gen_spr_G2(env);
+ /* Time base */
+ gen_tbl(env);
+ /* External access control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation register */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_G2(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC G2LE";
+ pcc->init_proc = init_proc_G2LE;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_G2;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e200 (CPUPPCState *env)
+{
+ /* Time base */
+ gen_tbl(env);
+ gen_spr_BookE(env, 0x000000070000FFFFULL);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
+ &spr_read_spefscr, &spr_write_spefscr,
+ &spr_read_spefscr, &spr_write_spefscr,
+ 0x00000000);
+ /* Memory management */
+ gen_spr_BookE206(env, 0x0000005D, NULL);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_ALTCTXCR, "ALTCTXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_CTXCR, "CTXCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_DBCNT, "DBCNT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_DBCR3, "DBCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1FINV0, "L1FINV0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB0CFG, "TLB0CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_TLB1CFG, "TLB1CFG",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC3, "IAC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_IAC4, "IAC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000); /* TOFIX */
+ spr_register(env, SPR_BOOKE_DSRR0, "DSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_DSRR1, "DSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 64;
+ env->nb_ways = 1;
+ env->id_tlbs = 0;
+ env->tlb_type = TLB_EMB;
+#endif
+ init_excp_e200(env, 0xFFFF0000UL);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* XXX: TODO: allocate internal IRQ controller */
+}
+
+POWERPC_FAMILY(e200)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e200 core";
+ pcc->init_proc = init_proc_e200;
+ pcc->check_pow = check_pow_hid0;
+ /* XXX: unimplemented instructions:
+ * dcblc
+ * dcbtlst
+ * dcbtstls
+ * icblc
+ * icbtls
+ * tlbivax
+ * all SPE multiply-accumulate instructions
+ */
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL |
+ PPC_SPE | PPC_SPE_SINGLE |
+ PPC_WRTEE | PPC_RFDI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX |
+ PPC_BOOKE;
+ pcc->msr_mask = (1ull << MSR_UCLE) |
+ (1ull << MSR_SPE) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE |
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e300 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_603(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Breakpoints */
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DABR2, "DABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IABR2, "IABR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_IBCR, "IBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_DBCR, "DBCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(e300)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e300 core";
+ pcc->init_proc = init_proc_e300;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_603;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_mas73(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv val = tcg_temp_new();
+ tcg_gen_ext32u_tl(val, cpu_gpr[gprn]);
+ gen_store_spr(SPR_BOOKE_MAS3, val);
+ tcg_gen_shri_tl(val, cpu_gpr[gprn], 32);
+ gen_store_spr(SPR_BOOKE_MAS7, val);
+ tcg_temp_free(val);
+}
+
+static void spr_read_mas73(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv mas7 = tcg_temp_new();
+ TCGv mas3 = tcg_temp_new();
+ gen_load_spr(mas7, SPR_BOOKE_MAS7);
+ tcg_gen_shli_tl(mas7, mas7, 32);
+ gen_load_spr(mas3, SPR_BOOKE_MAS3);
+ tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7);
+ tcg_temp_free(mas3);
+ tcg_temp_free(mas7);
+}
+
+#endif
+
+enum fsl_e500_version {
+ fsl_e500v1,
+ fsl_e500v2,
+ fsl_e500mc,
+ fsl_e5500,
+};
+
+static void init_proc_e500 (CPUPPCState *env, int version)
+{
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ uint32_t tlbncfg[2];
+ uint64_t ivor_mask;
+ uint64_t ivpr_mask = 0xFFFF0000ULL;
+ uint32_t l1cfg0 = 0x3800 /* 8 ways */
+ | 0x0020; /* 32 kb */
+ uint32_t l1cfg1 = 0x3800 /* 8 ways */
+ | 0x0020; /* 32 kb */
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+#endif
+
+ /* Time base */
+ gen_tbl(env);
+ /*
+ * XXX The e500 doesn't implement IVOR7 and IVOR9, but doesn't
+ * complain when accessing them.
+ * gen_spr_BookE(env, 0x0000000F0000FD7FULL);
+ */
+ switch (version) {
+ case fsl_e500v1:
+ case fsl_e500v2:
+ default:
+ ivor_mask = 0x0000000F0000FFFFULL;
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ ivor_mask = 0x000003FE0000FFFFULL;
+ break;
+ }
+ gen_spr_BookE(env, ivor_mask);
+ /* Processor identification */
+ spr_register(env, SPR_BOOKE_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_pir,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_SPEFSCR, "SPEFSCR",
+ &spr_read_spefscr, &spr_write_spefscr,
+ &spr_read_spefscr, &spr_write_spefscr,
+ 0x00000000);
+#if !defined(CONFIG_USER_ONLY)
+ /* Memory management */
+ env->nb_pids = 3;
+ env->nb_ways = 2;
+ env->id_tlbs = 0;
+ switch (version) {
+ case fsl_e500v1:
+ tlbncfg[0] = gen_tlbncfg(2, 1, 1, 0, 256);
+ tlbncfg[1] = gen_tlbncfg(16, 1, 9, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16);
+ break;
+ case fsl_e500v2:
+ tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512);
+ tlbncfg[1] = gen_tlbncfg(16, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 16);
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ tlbncfg[0] = gen_tlbncfg(4, 1, 1, 0, 512);
+ tlbncfg[1] = gen_tlbncfg(64, 1, 12, TLBnCFG_AVAIL | TLBnCFG_IPROT, 64);
+ break;
+ default:
+ cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
+ }
+#endif
+ /* Cache sizes */
+ switch (version) {
+ case fsl_e500v1:
+ case fsl_e500v2:
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ break;
+ case fsl_e500mc:
+ case fsl_e5500:
+ env->dcache_line_size = 64;
+ env->icache_line_size = 64;
+ l1cfg0 |= 0x1000000; /* 64 byte cache block size */
+ l1cfg1 |= 0x1000000; /* 64 byte cache block size */
+ break;
+ default:
+ cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]);
+ }
+ gen_spr_BookE206(env, 0x000000DF, tlbncfg);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BBEAR, "BBEAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BBTAR, "BBTAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_MCAR, "MCAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_BOOKE_MCSR, "MCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_NPIDR, "NPIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_BUCSR, "BUCSR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_Exxx_L1CFG0, "L1CFG0",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ l1cfg0);
+ spr_register(env, SPR_Exxx_L1CFG1, "L1CFG1",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ l1cfg1);
+ spr_register(env, SPR_Exxx_L1CSR0, "L1CSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_e500_l1csr0,
+ 0x00000000);
+ spr_register(env, SPR_Exxx_L1CSR1, "L1CSR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_e500_l1csr1,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR0, "MCSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MCSRR1, "MCSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_MMUCSR0, "MMUCSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_booke206_mmucsr0,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_EPR, "EPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX better abstract into Emb.xxx features */
+ if (version == fsl_e5500) {
+ spr_register(env, SPR_BOOKE_EPCR, "EPCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BOOKE_MAS7_MAS3, "MAS7_MAS3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_mas73, &spr_write_mas73,
+ 0x00000000);
+ ivpr_mask = (target_ulong)~0xFFFFULL;
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ env->nb_tlb = 0;
+ env->tlb_type = TLB_MAS;
+ for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
+ env->nb_tlb += booke206_tlb_size(env, i);
+ }
+#endif
+
+ init_excp_e200(env, ivpr_mask);
+ /* Allocate hardware IRQ controller */
+ ppce500_irq_init(ppc_env_get_cpu(env));
+}
+
+static void init_proc_e500v1(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e500v1);
+}
+
+POWERPC_FAMILY(e500v1)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e500v1 core";
+ pcc->init_proc = init_proc_e500v1;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL |
+ PPC_SPE | PPC_SPE_SINGLE |
+ PPC_WRTEE | PPC_RFDI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC;
+ pcc->insns_flags2 = PPC2_BOOKE206;
+ pcc->msr_mask = (1ull << MSR_UCLE) |
+ (1ull << MSR_SPE) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE |
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e500v2(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e500v2);
+}
+
+POWERPC_FAMILY(e500v2)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e500v2 core";
+ pcc->init_proc = init_proc_e500v2;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL |
+ PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE |
+ PPC_WRTEE | PPC_RFDI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC;
+ pcc->insns_flags2 = PPC2_BOOKE206;
+ pcc->msr_mask = (1ull << MSR_UCLE) |
+ (1ull << MSR_SPE) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DWE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ pcc->bfd_mach = bfd_mach_ppc_860;
+ pcc->flags = POWERPC_FLAG_SPE | POWERPC_FLAG_CE |
+ POWERPC_FLAG_UBLE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e500mc(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e500mc);
+}
+
+POWERPC_FAMILY(e500mc)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e500mc core";
+ pcc->init_proc = init_proc_e500mc;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB |
+ PPC_WRTEE | PPC_RFDI | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_FLOAT | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL |
+ PPC_FLOAT_STFIWX | PPC_WAIT |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC;
+ pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL;
+ pcc->msr_mask = (1ull << MSR_GS) |
+ (1ull << MSR_UCLE) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ /* FIXME: figure out the correct flag for e500mc */
+ pcc->bfd_mach = bfd_mach_ppc_e500;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+#ifdef TARGET_PPC64
+static void init_proc_e5500(CPUPPCState *env)
+{
+ init_proc_e500(env, fsl_e5500);
+}
+
+POWERPC_FAMILY(e5500)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "e5500 core";
+ pcc->init_proc = init_proc_e5500;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB |
+ PPC_WRTEE | PPC_RFDI | PPC_RFMCI |
+ PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBZ | PPC_CACHE_DCBA |
+ PPC_FLOAT | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_FSEL |
+ PPC_FLOAT_STFIWX | PPC_WAIT |
+ PPC_MEM_TLBSYNC | PPC_TLBIVAX | PPC_MEM_SYNC |
+ PPC_64B | PPC_POPCNTB | PPC_POPCNTWD;
+ pcc->insns_flags2 = PPC2_BOOKE206 | PPC2_PRCNTL | PPC2_PERM_ISA206 | \
+ PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_CM) |
+ (1ull << MSR_GS) |
+ (1ull << MSR_UCLE) |
+ (1ull << MSR_CE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PX) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_BOOKE206;
+ pcc->excp_model = POWERPC_EXCP_BOOKE;
+ pcc->bus_model = PPC_FLAGS_INPUT_BookE;
+ /* FIXME: figure out the correct flag for e5500 */
+ pcc->bfd_mach = bfd_mach_ppc_e500;
+ pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_DE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+#endif
+
+/* Non-embedded PowerPC */
+
+/* POWER : same as 601, without mfmsr, mfsr */
+POWERPC_FAMILY(POWER)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "POWER";
+ /* pcc->insns_flags = XXX_TODO; */
+ /* POWER RSC (from RAD6000) */
+ pcc->msr_mask = (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_AL) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+}
+
+#define POWERPC_MSRR_601 (0x0000000000001040ULL)
+
+static void init_proc_601 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_601(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hid0_601,
+ 0x80010080);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ init_excp_601(env);
+ /* XXX: beware that dcache line size is 64
+ * but dcbz uses 32 bytes "sectors"
+ * XXX: this breaks clcs instruction !
+ */
+ env->dcache_line_size = 32;
+ env->icache_line_size = 64;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(601)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 601";
+ pcc->init_proc = init_proc_601;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR |
+ PPC_FLOAT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_601;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_601;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_601;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK;
+}
+
+#define POWERPC_MSRR_601v (0x0000000000001040ULL)
+
+static void init_proc_601v (CPUPPCState *env)
+{
+ init_proc_601(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_601_HID15, "HID15",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+POWERPC_FAMILY(601v)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 601v";
+ pcc->init_proc = init_proc_601v;
+ pcc->check_pow = check_pow_none;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_POWER_BR |
+ PPC_FLOAT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR);
+ pcc->mmu_model = POWERPC_MMU_601;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_601;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_RTC_CLK;
+}
+
+static void init_proc_602 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_602(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_602(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(602)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 602";
+ pcc->init_proc = init_proc_602;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_6xx_TLB | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_602_SPEC;
+ pcc->msr_mask = (1ull << MSR_VSX) |
+ (1ull << MSR_SA) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ /* XXX: 602 MMU is quite specific. Should add a special case */
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_602;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_602;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_603 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_603(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(603)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 603";
+ pcc->init_proc = init_proc_603;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_603;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_603;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_603E (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_603(env);
+ /* Time base */
+ gen_tbl(env);
+ /* hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_603(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(603E)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 603e";
+ pcc->init_proc = init_proc_603E;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_TGPR) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_603E;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_ec603e;
+ pcc->flags = POWERPC_FLAG_TGPR | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_604 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_604(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_604(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(604)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 604";
+ pcc->init_proc = init_proc_604;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_604;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_604;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_604E (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_604(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_604(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(604E)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 604E";
+ pcc->init_proc = init_proc_604E;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_604;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_604;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_740 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(740)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 740";
+ pcc->init_proc = init_proc_740;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* XXX: high BATs are also present but are known to be bugged on
+ * die version 1.x
+ */
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(750)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750";
+ pcc->init_proc = init_proc_750;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750cl (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ /* Those registers are fake on 750CL */
+ spr_register(env, SPR_THRM1, "THRM1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_THRM2, "THRM2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_THRM3, "THRM3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX: not implemented */
+ spr_register(env, SPR_750_TDCL, "TDCL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_TDCH, "TDCH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* DMA */
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_WPAR, "WPAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_DMAL, "DMAL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_750_DMAU, "DMAU",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750CL_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750CL_HID4, "HID4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Quantization registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR0, "GQR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR1, "GQR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR2, "GQR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR3, "GQR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR4, "GQR4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR5, "GQR5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR6, "GQR6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_GQR7, "GQR7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750cl has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_750cl(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750 CL";
+ pcc->init_proc = init_proc_750cl;
+ pcc->check_pow = check_pow_hid0;
+ /* XXX: not implemented:
+ * cache lock instructions:
+ * dcbz_l
+ * floating point paired instructions
+ * psq_lux
+ * psq_lx
+ * psq_stux
+ * psq_stx
+ * ps_abs
+ * ps_add
+ * ps_cmpo0
+ * ps_cmpo1
+ * ps_cmpu0
+ * ps_cmpu1
+ * ps_div
+ * ps_madd
+ * ps_madds0
+ * ps_madds1
+ * ps_merge00
+ * ps_merge01
+ * ps_merge10
+ * ps_merge11
+ * ps_mr
+ * ps_msub
+ * ps_mul
+ * ps_muls0
+ * ps_muls1
+ * ps_nabs
+ * ps_neg
+ * ps_nmadd
+ * ps_nmsub
+ * ps_res
+ * ps_rsqrte
+ * ps_sel
+ * ps_sub
+ * ps_sum0
+ * ps_sum1
+ */
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750cx (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* This register is not implemented but is present for compatibility */
+ spr_register(env, SPR_SDA, "SDA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750cx has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_750cx(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750CX";
+ pcc->init_proc = init_proc_750cx;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750fx (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_THRM4, "THRM4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750FX";
+ pcc->init_proc = init_proc_750fx;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_750gx (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_750_THRM4, "THRM4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Hardware implementation registers */
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented (XXX: different from 750fx) */
+ spr_register(env, SPR_750FX_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ /* PowerPC 750fx & 750gx has 8 DBATs and 8 IBATs */
+ gen_high_BATs(env);
+ init_excp_7x0(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 750GX";
+ pcc->init_proc = init_proc_750gx;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_7x0;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_745 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ gen_spr_G2_755(env);
+ /* Time base */
+ gen_tbl(env);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_7x5(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(745)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 745";
+ pcc->init_proc = init_proc_745;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_7x5;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_755 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ gen_spr_G2_755(env);
+ /* Time base */
+ gen_tbl(env);
+ /* L2 cache control */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2CR, "L2CR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, spr_access_nop,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID2, "HID2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_6xx_7xx_soft_tlb(env, 64, 2);
+ init_excp_7x5(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(755)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 755";
+ pcc->init_proc = init_proc_755;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | PPC_6xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN;
+ pcc->msr_mask = (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_6xx;
+ pcc->excp_model = POWERPC_EXCP_7x5;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_750;
+ pcc->flags = POWERPC_FLAG_SE | POWERPC_FLAG_BE |
+ POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7400 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX: this seems not implemented on all revisions. */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSCR1, "MSSCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_7400(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7400)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7400 (aka G4)";
+ pcc->init_proc = init_proc_7400;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7410 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Thermal management */
+ gen_spr_thrm(env);
+ /* L2PMCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L2PMCR, "L2PMCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* LDSTDB */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTDB, "LDSTDB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ init_excp_7400(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7410)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7410 (aka G4)";
+ pcc->init_proc = init_proc_7410;
+ pcc->check_pow = check_pow_hid0;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7440 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7440)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7440 (aka G4)";
+ pcc->init_proc = init_proc_7440;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7450 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* Level 3 cache control */
+ gen_l3_ctrl(env);
+ /* L3ITCR1 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR1, "L3ITCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR2 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR2, "L3ITCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR3 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR3, "L3ITCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3OHCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3OHCR, "L3OHCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7450)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7450 (aka G4)";
+ pcc->init_proc = init_proc_7450;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7445 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7445)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7445 (aka G4)";
+ pcc->init_proc = init_proc_7445;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7455 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* Level 3 cache control */
+ gen_l3_ctrl(env);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7455)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7455 (aka G4)";
+ pcc->init_proc = init_proc_7455;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_7457 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* Level 3 cache control */
+ gen_l3_ctrl(env);
+ /* L3ITCR1 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR1, "L3ITCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR2 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR2, "L3ITCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3ITCR3 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3ITCR3, "L3ITCR3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* L3OHCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_L3OHCR, "L3OHCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* LDSTCR */
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* ICTRL */
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* MSSSR0 */
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* PMC */
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(7457)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 7457 (aka G4)";
+ pcc->init_proc = init_proc_7457;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_SOFT_74xx;
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+static void init_proc_e600 (CPUPPCState *env)
+{
+ gen_spr_ne_601(env);
+ gen_spr_7xx(env);
+ /* Time base */
+ gen_tbl(env);
+ /* 74xx specific SPR */
+ gen_spr_74xx(env);
+ /* XXX : not implemented */
+ spr_register(env, SPR_UBAMR, "UBAMR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_LDSTCR, "LDSTCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_ICTRL, "ICTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_MSSSR0, "MSSSR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ /* XXX : not implemented */
+ spr_register(env, SPR_7XX_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* SPRGs */
+ spr_register(env, SPR_SPRG4, "SPRG4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG4, "USPRG4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG5, "SPRG5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG5, "USPRG5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG6, "SPRG6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG6, "USPRG6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ spr_register(env, SPR_SPRG7, "SPRG7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_USPRG7, "USPRG7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+ /* Memory management */
+ gen_low_BATs(env);
+ gen_high_BATs(env);
+ gen_74xx_soft_tlb(env, 128, 2);
+ init_excp_7450(env);
+ env->dcache_line_size = 32;
+ env->icache_line_size = 32;
+ /* Allocate hardware IRQ controller */
+ ppc6xx_irq_init(ppc_env_get_cpu(env));
+}
+
+POWERPC_FAMILY(e600)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC e600";
+ pcc->init_proc = init_proc_e600;
+ pcc->check_pow = check_pow_hid0_74xx;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI |
+ PPC_CACHE_DCBA | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_MEM_TLBIA | PPC_74xx_TLB |
+ PPC_SEGMENT | PPC_EXTERN |
+ PPC_ALTIVEC;
+ pcc->insns_flags2 = PPC_NONE;
+ pcc->msr_mask = (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_ILE) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_EP) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_32B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash32_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_74xx;
+ pcc->bus_model = PPC_FLAGS_INPUT_6xx;
+ pcc->bfd_mach = bfd_mach_ppc_7400;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+}
+
+#if defined (TARGET_PPC64)
+#if defined(CONFIG_USER_ONLY)
+#define POWERPC970_HID5_INIT 0x00000080
+#else
+#define POWERPC970_HID5_INIT 0x00000000
+#endif
+
+enum BOOK3S_CPU_TYPE {
+ BOOK3S_CPU_970,
+ BOOK3S_CPU_POWER5PLUS,
+ BOOK3S_CPU_POWER6,
+ BOOK3S_CPU_POWER7,
+ BOOK3S_CPU_POWER8,
+ BOOK3S_CPU_POWER9
+};
+
+static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn,
+ int bit, int sprn, int cause)
+{
+ TCGv_i32 t1 = tcg_const_i32(bit);
+ TCGv_i32 t2 = tcg_const_i32(sprn);
+ TCGv_i32 t3 = tcg_const_i32(cause);
+
+ gen_helper_fscr_facility_check(cpu_env, t1, t2, t3);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn,
+ int bit, int sprn, int cause)
+{
+ TCGv_i32 t1 = tcg_const_i32(bit);
+ TCGv_i32 t2 = tcg_const_i32(sprn);
+ TCGv_i32 t3 = tcg_const_i32(cause);
+
+ gen_helper_msr_facility_check(cpu_env, t1, t2, t3);
+
+ tcg_temp_free_i32(t3);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(t1);
+}
+
+static void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn)
+{
+ TCGv spr_up = tcg_temp_new();
+ TCGv spr = tcg_temp_new();
+
+ gen_load_spr(spr, sprn - 1);
+ tcg_gen_shri_tl(spr_up, spr, 32);
+ tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up);
+
+ tcg_temp_free(spr);
+ tcg_temp_free(spr_up);
+}
+
+static void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv spr = tcg_temp_new();
+
+ gen_load_spr(spr, sprn - 1);
+ tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32);
+ gen_store_spr(sprn - 1, spr);
+
+ tcg_temp_free(spr);
+}
+
+static int check_pow_970 (CPUPPCState *env)
+{
+ if (env->spr[SPR_HID0] & (HID0_DEEPNAP | HID0_DOZE | HID0_NAP)) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void gen_spr_970_hid(CPUPPCState *env)
+{
+ /* Hardware implementation registers */
+ /* XXX : not implemented */
+ spr_register(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_clear,
+ 0x60000000);
+ spr_register(env, SPR_HID1, "HID1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_970_HID5, "HID5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ POWERPC970_HID5_INIT);
+}
+
+static void gen_spr_970_hior(CPUPPCState *env)
+{
+ spr_register(env, SPR_HIOR, "SPR_HIOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hior, &spr_write_hior,
+ 0x00000000);
+}
+
+static void gen_spr_book3s_common(CPUPPCState *env)
+{
+ spr_register(env, SPR_CTRL, "SPR_CTRL",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_UCTRL, "SPR_UCTRL",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void gen_spr_book3s_altivec(CPUPPCState *env)
+{
+ if (!(env->insns_flags & PPC_ALTIVEC)) {
+ return;
+ }
+
+ spr_register_kvm(env, SPR_VRSAVE, "VRSAVE",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_VRSAVE, 0x00000000);
+
+ /* Can't find information on what this should be on reset. This
+ * value is the one used by 74xx processors. */
+ vscr_init(env, 0x00010000);
+}
+
+static void gen_spr_book3s_dbg(CPUPPCState *env)
+{
+ /*
+ * TODO: different specs define different scopes for these,
+ * will have to address this:
+ * 970: super/write and super/read
+ * powerisa 2.03..2.04: hypv/write and super/read.
+ * powerisa 2.05 and newer: hypv/write and hypv/read.
+ */
+ spr_register_kvm(env, SPR_DABR, "DABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABR, 0x00000000);
+ spr_register_kvm(env, SPR_DABRX, "DABRX",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DABRX, 0x00000000);
+}
+
+static void gen_spr_book3s_207_dbg(CPUPPCState *env)
+{
+ spr_register_kvm_hv(env, SPR_DAWR, "DAWR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAWR, 0x00000000);
+ spr_register_kvm_hv(env, SPR_DAWRX, "DAWRX",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DAWRX, 0x00000000);
+ spr_register_kvm_hv(env, SPR_CIABR, "CIABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_CIABR, 0x00000000);
+}
+
+static void gen_spr_970_dbg(CPUPPCState *env)
+{
+ /* Breakpoints */
+ spr_register(env, SPR_IABR, "IABR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_book3s_pmu_sup(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_POWER_MMCR0, "MMCR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR0, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_MMCR1, "MMCR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCRA, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC1, "PMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC2, "PMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC2, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC3, "PMC3",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC3, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC4, "PMC4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC4, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC5, "PMC5",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC5, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_PMC6, "PMC6",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC6, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIAR, "SIAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIAR, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SDAR, "SDAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SDAR, 0x00000000);
+}
+
+static void gen_spr_book3s_pmu_user(CPUPPCState *env)
+{
+ spr_register(env, SPR_POWER_UMMCR0, "UMMCR0",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UMMCR1, "UMMCR1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UMMCRA, "UMMCRA",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC1, "UPMC1",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC2, "UPMC2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC3, "UPMC3",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC4, "UPMC4",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC5, "UPMC5",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_UPMC6, "UPMC6",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USIAR, "USIAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USDAR, "USDAR",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+}
+
+static void gen_spr_970_pmu_sup(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_970_PMC7, "PMC7",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC7, 0x00000000);
+ spr_register_kvm(env, SPR_970_PMC8, "PMC8",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PMC8, 0x00000000);
+}
+
+static void gen_spr_970_pmu_user(CPUPPCState *env)
+{
+ spr_register(env, SPR_970_UPMC7, "UPMC7",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_970_UPMC8, "UPMC8",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+}
+
+static void gen_spr_power8_pmu_sup(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_POWER_MMCR2, "MMCR2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCR2, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_MMCRS, "MMCRS",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_MMCRS, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SIER, "SIER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SIER, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SPMC1, "SPMC1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SPMC1, 0x00000000);
+ spr_register_kvm(env, SPR_POWER_SPMC2, "SPMC2",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_SPMC2, 0x00000000);
+ spr_register_kvm(env, SPR_TACR, "TACR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TACR, 0x00000000);
+ spr_register_kvm(env, SPR_TCSCR, "TCSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TCSCR, 0x00000000);
+ spr_register_kvm(env, SPR_CSIGR, "CSIGR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_CSIGR, 0x00000000);
+}
+
+static void gen_spr_power8_pmu_user(CPUPPCState *env)
+{
+ spr_register(env, SPR_POWER_UMMCR2, "UMMCR2",
+ &spr_read_ureg, SPR_NOACCESS,
+ &spr_read_ureg, &spr_write_ureg,
+ 0x00000000);
+ spr_register(env, SPR_POWER_USIER, "USIER",
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_power5p_ear(CPUPPCState *env)
+{
+ /* External access control */
+ spr_register(env, SPR_EAR, "EAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void spr_write_hmer(DisasContext *ctx, int sprn, int gprn)
+{
+ TCGv hmer = tcg_temp_new();
+
+ gen_load_spr(hmer, sprn);
+ tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer);
+ gen_store_spr(sprn, hmer);
+ spr_store_dump_spr(sprn);
+ tcg_temp_free(hmer);
+}
+
+static void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+}
+
+static void spr_write_970_hid4(DisasContext *ctx, int sprn, int gprn)
+{
+#if defined(TARGET_PPC64)
+ spr_write_generic(ctx, sprn, gprn);
+ gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
+#endif
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+static void gen_spr_970_lpar(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* Logical partitionning */
+ /* PPC970: HID4 is effectively the LPCR */
+ spr_register(env, SPR_970_HID4, "HID4",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_970_hid4,
+ 0x00000000);
+#endif
+}
+
+static void gen_spr_power5p_lpar(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* Logical partitionning */
+ spr_register_kvm_hv(env, SPR_LPCR, "LPCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_lpcr,
+ KVM_REG_PPC_LPCR, LPCR_LPES0 | LPCR_LPES1);
+ spr_register_hv(env, SPR_HDEC, "HDEC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_hdecr, &spr_write_hdecr, 0);
+#endif
+}
+
+static void gen_spr_book3s_ids(CPUPPCState *env)
+{
+ /* FIXME: Will need to deal with thread vs core only SPRs */
+
+ /* Processor identification */
+ spr_register_hv(env, SPR_PIR, "PIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, NULL,
+ 0x00000000);
+ spr_register_hv(env, SPR_HID0, "HID0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TSCR, "TSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMER, "HMER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_hmer,
+ 0x00000000);
+ spr_register_hv(env, SPR_HMEER, "HMEER",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_TFMR, "TFMR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_LPIDR, "LPIDR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HFSCR, "HFSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_MMCRC, "MMCRC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_MMCRH, "MMCRH",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSPRG0, "HSPRG0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSPRG1, "HSPRG1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSRR0, "HSRR0",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HSRR1, "HSRR1",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HDAR, "HDAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HDSISR, "HDSISR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_RMOR, "RMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register_hv(env, SPR_HRMOR, "HRMOR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+}
+
+static void gen_spr_power8_ids(CPUPPCState *env)
+{
+ /* Thread identification */
+ spr_register(env, SPR_TIR, "TIR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void gen_spr_book3s_purr(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ /* PURR & SPURR: Hack - treat these as aliases for the TB for now */
+ spr_register_kvm(env, SPR_PURR, "PURR",
+ &spr_read_purr, SPR_NOACCESS,
+ &spr_read_purr, SPR_NOACCESS,
+ KVM_REG_PPC_PURR, 0x00000000);
+ spr_register_kvm(env, SPR_SPURR, "SPURR",
+ &spr_read_purr, SPR_NOACCESS,
+ &spr_read_purr, SPR_NOACCESS,
+ KVM_REG_PPC_SPURR, 0x00000000);
+#endif
+}
+
+static void gen_spr_power6_dbg(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register(env, SPR_CFAR, "SPR_CFAR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_cfar, &spr_write_cfar,
+ 0x00000000);
+#endif
+}
+
+static void gen_spr_power5p_common(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_PPR, "PPR",
+ &spr_read_generic, &spr_write_generic,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PPR, 0x00000000);
+}
+
+static void gen_spr_power6_common(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_DSCR, "SPR_DSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_DSCR, 0x00000000);
+#endif
+ /*
+ * Register PCR to report POWERPC_EXCP_PRIV_REG instead of
+ * POWERPC_EXCP_INVAL_SPR.
+ */
+ spr_register(env, SPR_PCR, "PCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void spr_read_tar(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
+ spr_read_generic(ctx, gprn, sprn);
+}
+
+static void spr_write_tar(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR);
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+static void gen_spr_power8_tce_address_control(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_TAR, "TAR",
+ &spr_read_tar, &spr_write_tar,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_TAR, 0x00000000);
+}
+
+static void spr_read_tm(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_read_generic(ctx, gprn, sprn);
+}
+
+static void spr_write_tm(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+static void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_read_prev_upper32(ctx, gprn, sprn);
+}
+
+static void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM);
+ spr_write_prev_upper32(ctx, sprn, gprn);
+}
+
+static void gen_spr_power8_tm(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_TFHAR, "TFHAR",
+ &spr_read_tm, &spr_write_tm,
+ &spr_read_tm, &spr_write_tm,
+ KVM_REG_PPC_TFHAR, 0x00000000);
+ spr_register_kvm(env, SPR_TFIAR, "TFIAR",
+ &spr_read_tm, &spr_write_tm,
+ &spr_read_tm, &spr_write_tm,
+ KVM_REG_PPC_TFIAR, 0x00000000);
+ spr_register_kvm(env, SPR_TEXASR, "TEXASR",
+ &spr_read_tm, &spr_write_tm,
+ &spr_read_tm, &spr_write_tm,
+ KVM_REG_PPC_TEXASR, 0x00000000);
+ spr_register(env, SPR_TEXASRU, "TEXASRU",
+ &spr_read_tm_upper32, &spr_write_tm_upper32,
+ &spr_read_tm_upper32, &spr_write_tm_upper32,
+ 0x00000000);
+}
+
+static void spr_read_ebb(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_read_generic(ctx, gprn, sprn);
+}
+
+static void spr_write_ebb(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_write_generic(ctx, sprn, gprn);
+}
+
+static void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_read_prev_upper32(ctx, gprn, sprn);
+}
+
+static void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn)
+{
+ gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB);
+ spr_write_prev_upper32(ctx, sprn, gprn);
+}
+
+static void gen_spr_power8_ebb(CPUPPCState *env)
+{
+ spr_register(env, SPR_BESCRS, "BESCRS",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BESCRSU, "BESCRSU",
+ &spr_read_ebb_upper32, &spr_write_ebb_upper32,
+ &spr_read_prev_upper32, &spr_write_prev_upper32,
+ 0x00000000);
+ spr_register(env, SPR_BESCRR, "BESCRR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000000);
+ spr_register(env, SPR_BESCRRU, "BESCRRU",
+ &spr_read_ebb_upper32, &spr_write_ebb_upper32,
+ &spr_read_prev_upper32, &spr_write_prev_upper32,
+ 0x00000000);
+ spr_register_kvm(env, SPR_EBBHR, "EBBHR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_EBBHR, 0x00000000);
+ spr_register_kvm(env, SPR_EBBRR, "EBBRR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_EBBRR, 0x00000000);
+ spr_register_kvm(env, SPR_BESCR, "BESCR",
+ &spr_read_ebb, &spr_write_ebb,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_BESCR, 0x00000000);
+}
+
+/* Virtual Time Base */
+static void gen_spr_vtb(CPUPPCState *env)
+{
+ spr_register(env, SPR_VTB, "VTB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_tbl, SPR_NOACCESS,
+ 0x00000000);
+}
+
+static void gen_spr_power8_fscr(CPUPPCState *env)
+{
+#if defined(CONFIG_USER_ONLY)
+ target_ulong initval = 1ULL << FSCR_TAR;
+#else
+ target_ulong initval = 0;
+#endif
+ spr_register_kvm(env, SPR_FSCR, "FSCR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_FSCR, initval);
+}
+
+static void gen_spr_power8_pspb(CPUPPCState *env)
+{
+ spr_register_kvm(env, SPR_PSPB, "PSPB",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic32,
+ KVM_REG_PPC_PSPB, 0);
+}
+
+static void gen_spr_power8_ic(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_IC, "IC",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0);
+#endif
+}
+
+static void gen_spr_power8_book4(CPUPPCState *env)
+{
+ /* Add a number of P8 book4 registers */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_ACOP, "ACOP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_ACOP, 0);
+ spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PID, 0);
+ spr_register_kvm(env, SPR_WORT, "WORT",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_WORT, 0);
+#endif
+}
+
+static void gen_spr_power7_book4(CPUPPCState *env)
+{
+ /* Add a number of P7 book4 registers */
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_kvm(env, SPR_ACOP, "ACOP",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_ACOP, 0);
+ spr_register_kvm(env, SPR_BOOKS_PID, "PID",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ KVM_REG_PPC_PID, 0);
+#endif
+}
+
+static void gen_spr_power8_rpr(CPUPPCState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ spr_register_hv(env, SPR_RPR, "RPR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, &spr_write_generic,
+ 0x00000103070F1F3F);
+#endif
+}
+
+static void init_proc_book3s_64(CPUPPCState *env, int version)
+{
+ gen_spr_ne_601(env);
+ gen_tbl(env);
+ gen_spr_book3s_altivec(env);
+ gen_spr_book3s_pmu_sup(env);
+ gen_spr_book3s_pmu_user(env);
+ gen_spr_book3s_common(env);
+
+ switch (version) {
+ case BOOK3S_CPU_970:
+ case BOOK3S_CPU_POWER5PLUS:
+ gen_spr_970_hid(env);
+ gen_spr_970_hior(env);
+ gen_low_BATs(env);
+ gen_spr_970_pmu_sup(env);
+ gen_spr_970_pmu_user(env);
+ break;
+ case BOOK3S_CPU_POWER7:
+ case BOOK3S_CPU_POWER8:
+ case BOOK3S_CPU_POWER9:
+ gen_spr_book3s_ids(env);
+ gen_spr_amr(env, version >= BOOK3S_CPU_POWER8);
+ gen_spr_book3s_purr(env);
+ env->ci_large_pages = true;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ if (version >= BOOK3S_CPU_POWER5PLUS) {
+ gen_spr_power5p_common(env);
+ gen_spr_power5p_lpar(env);
+ gen_spr_power5p_ear(env);
+ } else {
+ gen_spr_970_lpar(env);
+ }
+ if (version == BOOK3S_CPU_970) {
+ gen_spr_970_dbg(env);
+ }
+ if (version >= BOOK3S_CPU_POWER6) {
+ gen_spr_power6_common(env);
+ gen_spr_power6_dbg(env);
+ }
+ if (version == BOOK3S_CPU_POWER7) {
+ gen_spr_power7_book4(env);
+ }
+ if (version >= BOOK3S_CPU_POWER8) {
+ gen_spr_power8_tce_address_control(env);
+ gen_spr_power8_ids(env);
+ gen_spr_power8_ebb(env);
+ gen_spr_power8_fscr(env);
+ gen_spr_power8_pmu_sup(env);
+ gen_spr_power8_pmu_user(env);
+ gen_spr_power8_tm(env);
+ gen_spr_power8_pspb(env);
+ gen_spr_vtb(env);
+ gen_spr_power8_ic(env);
+ gen_spr_power8_book4(env);
+ gen_spr_power8_rpr(env);
+ }
+ if (version < BOOK3S_CPU_POWER8) {
+ gen_spr_book3s_dbg(env);
+ } else {
+ gen_spr_book3s_207_dbg(env);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ switch (version) {
+ case BOOK3S_CPU_970:
+ case BOOK3S_CPU_POWER5PLUS:
+ env->slb_nr = 64;
+ break;
+ case BOOK3S_CPU_POWER7:
+ case BOOK3S_CPU_POWER8:
+ case BOOK3S_CPU_POWER9:
+ default:
+ env->slb_nr = 32;
+ break;
+ }
+#endif
+ /* Allocate hardware IRQ controller */
+ switch (version) {
+ case BOOK3S_CPU_970:
+ case BOOK3S_CPU_POWER5PLUS:
+ init_excp_970(env);
+ ppc970_irq_init(ppc_env_get_cpu(env));
+ break;
+ case BOOK3S_CPU_POWER7:
+ init_excp_POWER7(env);
+ ppcPOWER7_irq_init(ppc_env_get_cpu(env));
+ break;
+ case BOOK3S_CPU_POWER8:
+ case BOOK3S_CPU_POWER9:
+ init_excp_POWER8(env);
+ ppcPOWER7_irq_init(ppc_env_get_cpu(env));
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ env->dcache_line_size = 128;
+ env->icache_line_size = 128;
+}
+
+static void init_proc_970(CPUPPCState *env)
+{
+ init_proc_book3s_64(env, BOOK3S_CPU_970);
+}
+
+POWERPC_FAMILY(970)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->desc = "PowerPC 970";
+ pcc->init_proc = init_proc_970;
+ pcc->check_pow = check_pow_970;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_64B;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_970;
+ pcc->bus_model = PPC_FLAGS_INPUT_970;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
+}
+
+static void init_proc_power5plus(CPUPPCState *env)
+{
+ init_proc_book3s_64(env, BOOK3S_CPU_POWER5PLUS);
+}
+
+POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER5";
+ dc->desc = "POWER5+";
+ pcc->init_proc = init_proc_power5plus;
+ pcc->check_pow = check_pow_970;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_STFIWX |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B |
+ PPC_SEGMENT_64B | PPC_SLBI;
+ pcc->insns_flags2 = PPC2_FP_CVT_S64;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_POW) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI);
+ pcc->mmu_model = POWERPC_MMU_2_03;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+#endif
+ pcc->excp_model = POWERPC_EXCP_970;
+ pcc->bus_model = PPC_FLAGS_INPUT_970;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x10000;
+}
+
+static void powerpc_get_compat(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ char *value = (char *)"";
+ Property *prop = opaque;
+ uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop);
+
+ switch (*max_compat) {
+ case CPU_POWERPC_LOGICAL_2_05:
+ value = (char *)"power6";
+ break;
+ case CPU_POWERPC_LOGICAL_2_06:
+ value = (char *)"power7";
+ break;
+ case CPU_POWERPC_LOGICAL_2_07:
+ value = (char *)"power8";
+ break;
+ case 0:
+ break;
+ default:
+ error_report("Internal error: compat is set to %x", *max_compat);
+ abort();
+ break;
+ }
+
+ visit_type_str(v, name, &value, errp);
+}
+
+static void powerpc_set_compat(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ Error *error = NULL;
+ char *value = NULL;
+ Property *prop = opaque;
+ uint32_t *max_compat = qdev_get_prop_ptr(DEVICE(obj), prop);
+
+ visit_type_str(v, name, &value, &error);
+ if (error) {
+ error_propagate(errp, error);
+ return;
+ }
+
+ if (strcmp(value, "power6") == 0) {
+ *max_compat = CPU_POWERPC_LOGICAL_2_05;
+ } else if (strcmp(value, "power7") == 0) {
+ *max_compat = CPU_POWERPC_LOGICAL_2_06;
+ } else if (strcmp(value, "power8") == 0) {
+ *max_compat = CPU_POWERPC_LOGICAL_2_07;
+ } else {
+ error_setg(errp, "Invalid compatibility mode \"%s\"", value);
+ }
+
+ g_free(value);
+}
+
+static PropertyInfo powerpc_compat_propinfo = {
+ .name = "str",
+ .description = "compatibility mode, power6/power7/power8",
+ .get = powerpc_get_compat,
+ .set = powerpc_set_compat,
+};
+
+#define DEFINE_PROP_POWERPC_COMPAT(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, powerpc_compat_propinfo, uint32_t)
+
+static Property powerpc_servercpu_properties[] = {
+ DEFINE_PROP_POWERPC_COMPAT("compat", PowerPCCPU, max_compat),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+#ifdef CONFIG_SOFTMMU
+static const struct ppc_segment_page_sizes POWER7_POWER8_sps = {
+ .sps = {
+ {
+ .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 },
+ { .page_shift = 16, .pte_enc = 0x7 },
+ { .page_shift = 24, .pte_enc = 0x38 }, },
+ },
+ {
+ .page_shift = 16, /* 64K */
+ .slb_enc = SLB_VSID_64K,
+ .enc = { { .page_shift = 16, .pte_enc = 0x1 },
+ { .page_shift = 24, .pte_enc = 0x8 }, },
+ },
+ {
+ .page_shift = 24, /* 16M */
+ .slb_enc = SLB_VSID_16M,
+ .enc = { { .page_shift = 24, .pte_enc = 0 }, },
+ },
+ {
+ .page_shift = 34, /* 16G */
+ .slb_enc = SLB_VSID_16G,
+ .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
+ },
+ }
+};
+#endif /* CONFIG_SOFTMMU */
+
+static void init_proc_POWER7 (CPUPPCState *env)
+{
+ init_proc_book3s_64(env, BOOK3S_CPU_POWER7);
+}
+
+static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7P_BASE) {
+ return true;
+ }
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER7_BASE) {
+ return true;
+ }
+ return false;
+}
+
+static bool cpu_has_work_POWER7(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (cs->halted) {
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE0)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE1)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) &&
+ (env->spr[SPR_LPCR] & LPCR_P7_PECE2)) {
+ return true;
+ }
+ if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ return true;
+ }
+ return false;
+ } else {
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ }
+}
+
+POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER7";
+ dc->desc = "POWER7";
+ dc->props = powerpc_servercpu_properties;
+ pcc->pvr_match = ppc_pvr_match_power7;
+ pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05;
+ pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->init_proc = init_proc_POWER7;
+ pcc->check_pow = check_pow_nocheck;
+ cc->has_work = cpu_has_work_POWER7;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_DFP | PPC2_DBRX | PPC2_ISA205 |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_FP_CVT_S64 |
+ PPC2_PM_ISA206;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_2_06;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ pcc->sps = &POWER7_POWER8_sps;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER7;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+ pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+}
+
+static void init_proc_POWER8(CPUPPCState *env)
+{
+ init_proc_book3s_64(env, BOOK3S_CPU_POWER8);
+}
+
+static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8NVL_BASE) {
+ return true;
+ }
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8E_BASE) {
+ return true;
+ }
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER8_BASE) {
+ return true;
+ }
+ return false;
+}
+
+static bool cpu_has_work_POWER8(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ if (cs->halted) {
+ if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
+ return false;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE2)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DECR)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE3)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_MCK)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HMI)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE4)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_DOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE0)) {
+ return true;
+ }
+ if ((env->pending_interrupts & (1u << PPC_INTERRUPT_HDOORBELL)) &&
+ (env->spr[SPR_LPCR] & LPCR_P8_PECE1)) {
+ return true;
+ }
+ if (env->pending_interrupts & (1u << PPC_INTERRUPT_RESET)) {
+ return true;
+ }
+ return false;
+ } else {
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+ }
+}
+
+POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER8";
+ dc->desc = "POWER8";
+ dc->props = powerpc_servercpu_properties;
+ pcc->pvr_match = ppc_pvr_match_power8;
+ pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ pcc->init_proc = init_proc_POWER8;
+ pcc->check_pow = check_pow_nocheck;
+ cc->has_work = cpu_has_work_POWER8;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64H | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
+ PPC2_TM | PPC2_PM_ISA206;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_SHV) |
+ (1ull << MSR_TM) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ pcc->mmu_model = POWERPC_MMU_2_07;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ pcc->sps = &POWER7_POWER8_sps;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER8;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX | POWERPC_FLAG_TM;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+ pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+}
+static void init_proc_POWER9(CPUPPCState *env)
+{
+ init_proc_book3s_64(env, BOOK3S_CPU_POWER9);
+}
+
+static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ if ((pvr & CPU_POWERPC_POWER_SERVER_MASK) == CPU_POWERPC_POWER9_BASE) {
+ return true;
+ }
+ return false;
+}
+
+POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ dc->fw_name = "PowerPC,POWER9";
+ dc->desc = "POWER9";
+ dc->props = powerpc_servercpu_properties;
+ pcc->pvr_match = ppc_pvr_match_power9;
+ pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07;
+ pcc->init_proc = init_proc_POWER9;
+ pcc->check_pow = check_pow_nocheck;
+ pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB |
+ PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
+ PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
+ PPC_FLOAT_FRSQRTES |
+ PPC_FLOAT_STFIWX |
+ PPC_FLOAT_EXT |
+ PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
+ PPC_MEM_SYNC | PPC_MEM_EIEIO |
+ PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
+ PPC_64B | PPC_64BX | PPC_ALTIVEC |
+ PPC_SEGMENT_64B | PPC_SLBI |
+ PPC_POPCNTB | PPC_POPCNTWD |
+ PPC_CILDST;
+ pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
+ PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
+ PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 |
+ PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 |
+ PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
+ PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
+ PPC2_TM | PPC2_PM_ISA206 | PPC2_ISA300;
+ pcc->msr_mask = (1ull << MSR_SF) |
+ (1ull << MSR_TM) |
+ (1ull << MSR_VR) |
+ (1ull << MSR_VSX) |
+ (1ull << MSR_EE) |
+ (1ull << MSR_PR) |
+ (1ull << MSR_FP) |
+ (1ull << MSR_ME) |
+ (1ull << MSR_FE0) |
+ (1ull << MSR_SE) |
+ (1ull << MSR_DE) |
+ (1ull << MSR_FE1) |
+ (1ull << MSR_IR) |
+ (1ull << MSR_DR) |
+ (1ull << MSR_PMM) |
+ (1ull << MSR_RI) |
+ (1ull << MSR_LE);
+ /* Using 2.07 defines until new radix model is added. */
+ pcc->mmu_model = POWERPC_MMU_2_07;
+#if defined(CONFIG_SOFTMMU)
+ pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
+ /* segment page size remain the same */
+ pcc->sps = &POWER7_POWER8_sps;
+#endif
+ pcc->excp_model = POWERPC_EXCP_POWER8;
+ pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
+ pcc->bfd_mach = bfd_mach_ppc64;
+ pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
+ POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
+ POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
+ POWERPC_FLAG_VSX | POWERPC_FLAG_TM;
+ pcc->l1_dcache_size = 0x8000;
+ pcc->l1_icache_size = 0x8000;
+ pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+void cpu_ppc_set_papr(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ ppc_spr_t *lpcr = &env->spr_cb[SPR_LPCR];
+ ppc_spr_t *amor = &env->spr_cb[SPR_AMOR];
+
+ /* PAPR always has exception vectors in RAM not ROM. To ensure this,
+ * MSR[IP] should never be set.
+ *
+ * We also disallow setting of MSR_HV
+ */
+ env->msr_mask &= ~((1ull << MSR_EP) | MSR_HVB);
+
+ /* Set emulated LPCR to not send interrupts to hypervisor. Note that
+ * under KVM, the actual HW LPCR will be set differently by KVM itself,
+ * the settings below ensure proper operations with TCG in absence of
+ * a real hypervisor.
+ *
+ * Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for
+ * real mode accesses, which thankfully defaults to 0 and isn't
+ * accessible in guest mode.
+ */
+ lpcr->default_value &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV);
+ lpcr->default_value |= LPCR_LPES0 | LPCR_LPES1;
+
+ /* Set RMLS to the max (ie, 16G) */
+ lpcr->default_value &= ~LPCR_RMLS;
+ lpcr->default_value |= 1ull << LPCR_RMLS_SHIFT;
+
+ /* P7 and P8 has slightly different PECE bits, mostly because P8 adds
+ * bit 47 and 48 which are reserved on P7. Here we set them all, which
+ * will work as expected for both implementations
+ */
+ lpcr->default_value |= LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
+ LPCR_P8_PECE3 | LPCR_P8_PECE4;
+
+ /* We should be followed by a CPU reset but update the active value
+ * just in case...
+ */
+ env->spr[SPR_LPCR] = lpcr->default_value;
+
+ /* Set a full AMOR so guest can use the AMR as it sees fit */
+ env->spr[SPR_AMOR] = amor->default_value = 0xffffffffffffffffull;
+
+ /* Update some env bits based on new LPCR value */
+ ppc_hash64_update_rmls(env);
+ ppc_hash64_update_vrma(env);
+
+ /* Tell KVM that we're in PAPR mode */
+ if (kvm_enabled()) {
+ kvmppc_set_papr(cpu);
+ }
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+#endif /* defined (TARGET_PPC64) */
+
+/*****************************************************************************/
+/* Generic CPU instantiation routine */
+static void init_ppc_proc(PowerPCCPU *cpu)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+#if !defined(CONFIG_USER_ONLY)
+ int i;
+
+ env->irq_inputs = NULL;
+ /* Set all exception vectors to an invalid address */
+ for (i = 0; i < POWERPC_EXCP_NB; i++)
+ env->excp_vectors[i] = (target_ulong)(-1ULL);
+ env->ivor_mask = 0x00000000;
+ env->ivpr_mask = 0x00000000;
+ /* Default MMU definitions */
+ env->nb_BATs = 0;
+ env->nb_tlb = 0;
+ env->nb_ways = 0;
+ env->tlb_type = TLB_NONE;
+#endif
+ /* Register SPR common to all PowerPC implementations */
+ gen_spr_generic(env);
+ spr_register(env, SPR_PVR, "PVR",
+ /* Linux permits userspace to read PVR */
+#if defined(CONFIG_LINUX_USER)
+ &spr_read_generic,
+#else
+ SPR_NOACCESS,
+#endif
+ SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->pvr);
+ /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */
+ if (pcc->svr != POWERPC_SVR_NONE) {
+ if (pcc->svr & POWERPC_SVR_E500) {
+ spr_register(env, SPR_E500_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->svr & ~POWERPC_SVR_E500);
+ } else {
+ spr_register(env, SPR_SVR, "SVR",
+ SPR_NOACCESS, SPR_NOACCESS,
+ &spr_read_generic, SPR_NOACCESS,
+ pcc->svr);
+ }
+ }
+ /* PowerPC implementation specific initialisations (SPRs, timers, ...) */
+ (*pcc->init_proc)(env);
+
+ /* MSR bits & flags consistency checks */
+ if (env->msr_mask & (1 << 25)) {
+ switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
+ case POWERPC_FLAG_SPE:
+ case POWERPC_FLAG_VRE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 17)) {
+ switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) {
+ case POWERPC_FLAG_TGPR:
+ case POWERPC_FLAG_CE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 10)) {
+ switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_UBLE)) {
+ case POWERPC_FLAG_SE:
+ case POWERPC_FLAG_DWE:
+ case POWERPC_FLAG_UBLE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or "
+ "POWERPC_FLAG_UBLE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE |
+ POWERPC_FLAG_UBLE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor "
+ "POWERPC_FLAG_UBLE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 9)) {
+ switch (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) {
+ case POWERPC_FLAG_BE:
+ case POWERPC_FLAG_DE:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_BE or POWERPC_FLAG_DE\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_BE | POWERPC_FLAG_DE)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_BE nor POWERPC_FLAG_DE\n");
+ exit(1);
+ }
+ if (env->msr_mask & (1 << 2)) {
+ switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) {
+ case POWERPC_FLAG_PX:
+ case POWERPC_FLAG_PMM:
+ break;
+ default:
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n");
+ exit(1);
+ }
+ } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) {
+ fprintf(stderr, "PowerPC MSR definition inconsistency\n"
+ "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n");
+ exit(1);
+ }
+ if ((env->flags & (POWERPC_FLAG_RTC_CLK | POWERPC_FLAG_BUS_CLK)) == 0) {
+ fprintf(stderr, "PowerPC flags inconsistency\n"
+ "Should define the time-base and decrementer clock source\n");
+ exit(1);
+ }
+ /* Allocate TLBs buffer when needed */
+#if !defined(CONFIG_USER_ONLY)
+ if (env->nb_tlb != 0) {
+ int nb_tlb = env->nb_tlb;
+ if (env->id_tlbs != 0)
+ nb_tlb *= 2;
+ switch (env->tlb_type) {
+ case TLB_6XX:
+ env->tlb.tlb6 = g_malloc0(nb_tlb * sizeof(ppc6xx_tlb_t));
+ break;
+ case TLB_EMB:
+ env->tlb.tlbe = g_malloc0(nb_tlb * sizeof(ppcemb_tlb_t));
+ break;
+ case TLB_MAS:
+ env->tlb.tlbm = g_malloc0(nb_tlb * sizeof(ppcmas_tlb_t));
+ break;
+ }
+ /* Pre-compute some useful values */
+ env->tlb_per_way = env->nb_tlb / env->nb_ways;
+ }
+ if (env->irq_inputs == NULL) {
+ fprintf(stderr, "WARNING: no internal IRQ controller registered.\n"
+ " Attempt QEMU to crash very soon !\n");
+ }
+#endif
+ if (env->check_pow == NULL) {
+ fprintf(stderr, "WARNING: no power management check handler "
+ "registered.\n"
+ " Attempt QEMU to crash very soon !\n");
+ }
+}
+
+#if defined(PPC_DUMP_CPU)
+static void dump_ppc_sprs (CPUPPCState *env)
+{
+ ppc_spr_t *spr;
+#if !defined(CONFIG_USER_ONLY)
+ uint32_t sr, sw;
+#endif
+ uint32_t ur, uw;
+ int i, j, n;
+
+ printf("Special purpose registers:\n");
+ for (i = 0; i < 32; i++) {
+ for (j = 0; j < 32; j++) {
+ n = (i << 5) | j;
+ spr = &env->spr_cb[n];
+ uw = spr->uea_write != NULL && spr->uea_write != SPR_NOACCESS;
+ ur = spr->uea_read != NULL && spr->uea_read != SPR_NOACCESS;
+#if !defined(CONFIG_USER_ONLY)
+ sw = spr->oea_write != NULL && spr->oea_write != SPR_NOACCESS;
+ sr = spr->oea_read != NULL && spr->oea_read != SPR_NOACCESS;
+ if (sw || sr || uw || ur) {
+ printf("SPR: %4d (%03x) %-8s s%c%c u%c%c\n",
+ (i << 5) | j, (i << 5) | j, spr->name,
+ sw ? 'w' : '-', sr ? 'r' : '-',
+ uw ? 'w' : '-', ur ? 'r' : '-');
+ }
+#else
+ if (uw || ur) {
+ printf("SPR: %4d (%03x) %-8s u%c%c\n",
+ (i << 5) | j, (i << 5) | j, spr->name,
+ uw ? 'w' : '-', ur ? 'r' : '-');
+ }
+#endif
+ }
+ }
+ fflush(stdout);
+ fflush(stderr);
+}
+#endif
+
+/*****************************************************************************/
+
+/* Opcode types */
+enum {
+ PPC_DIRECT = 0, /* Opcode routine */
+ PPC_INDIRECT = 1, /* Indirect opcode table */
+};
+
+#define PPC_OPCODE_MASK 0x3
+
+static inline int is_indirect_opcode (void *handler)
+{
+ return ((uintptr_t)handler & PPC_OPCODE_MASK) == PPC_INDIRECT;
+}
+
+static inline opc_handler_t **ind_table(void *handler)
+{
+ return (opc_handler_t **)((uintptr_t)handler & ~PPC_OPCODE_MASK);
+}
+
+/* Instruction table creation */
+/* Opcodes tables creation */
+static void fill_new_table (opc_handler_t **table, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ table[i] = &invalid_handler;
+}
+
+static int create_new_table (opc_handler_t **table, unsigned char idx)
+{
+ opc_handler_t **tmp;
+
+ tmp = g_new(opc_handler_t *, PPC_CPU_INDIRECT_OPCODES_LEN);
+ fill_new_table(tmp, PPC_CPU_INDIRECT_OPCODES_LEN);
+ table[idx] = (opc_handler_t *)((uintptr_t)tmp | PPC_INDIRECT);
+
+ return 0;
+}
+
+static int insert_in_table (opc_handler_t **table, unsigned char idx,
+ opc_handler_t *handler)
+{
+ if (table[idx] != &invalid_handler)
+ return -1;
+ table[idx] = handler;
+
+ return 0;
+}
+
+static int register_direct_insn (opc_handler_t **ppc_opcodes,
+ unsigned char idx, opc_handler_t *handler)
+{
+ if (insert_in_table(ppc_opcodes, idx, handler) < 0) {
+ printf("*** ERROR: opcode %02x already assigned in main "
+ "opcode table\n", idx);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ printf(" Registered handler '%s' - new handler '%s'\n",
+ ppc_opcodes[idx]->oname, handler->oname);
+#endif
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_ind_in_table (opc_handler_t **table,
+ unsigned char idx1, unsigned char idx2,
+ opc_handler_t *handler)
+{
+ if (table[idx1] == &invalid_handler) {
+ if (create_new_table(table, idx1) < 0) {
+ printf("*** ERROR: unable to create indirect table "
+ "idx=%02x\n", idx1);
+ return -1;
+ }
+ } else {
+ if (!is_indirect_opcode(table[idx1])) {
+ printf("*** ERROR: idx %02x already assigned to a direct "
+ "opcode\n", idx1);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ printf(" Registered handler '%s' - new handler '%s'\n",
+ ind_table(table[idx1])[idx2]->oname, handler->oname);
+#endif
+ return -1;
+ }
+ }
+ if (handler != NULL &&
+ insert_in_table(ind_table(table[idx1]), idx2, handler) < 0) {
+ printf("*** ERROR: opcode %02x already assigned in "
+ "opcode table %02x\n", idx2, idx1);
+#if defined(DO_PPC_STATISTICS) || defined(PPC_DUMP_CPU)
+ printf(" Registered handler '%s' - new handler '%s'\n",
+ ind_table(table[idx1])[idx2]->oname, handler->oname);
+#endif
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_ind_insn (opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ opc_handler_t *handler)
+{
+ return register_ind_in_table(ppc_opcodes, idx1, idx2, handler);
+}
+
+static int register_dblind_insn (opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ unsigned char idx3, opc_handler_t *handler)
+{
+ if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
+ printf("*** ERROR: unable to join indirect table idx "
+ "[%02x-%02x]\n", idx1, idx2);
+ return -1;
+ }
+ if (register_ind_in_table(ind_table(ppc_opcodes[idx1]), idx2, idx3,
+ handler) < 0) {
+ printf("*** ERROR: unable to insert opcode "
+ "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int register_trplind_insn(opc_handler_t **ppc_opcodes,
+ unsigned char idx1, unsigned char idx2,
+ unsigned char idx3, unsigned char idx4,
+ opc_handler_t *handler)
+{
+ opc_handler_t **table;
+
+ if (register_ind_in_table(ppc_opcodes, idx1, idx2, NULL) < 0) {
+ printf("*** ERROR: unable to join indirect table idx "
+ "[%02x-%02x]\n", idx1, idx2);
+ return -1;
+ }
+ table = ind_table(ppc_opcodes[idx1]);
+ if (register_ind_in_table(table, idx2, idx3, NULL) < 0) {
+ printf("*** ERROR: unable to join 2nd-level indirect table idx "
+ "[%02x-%02x-%02x]\n", idx1, idx2, idx3);
+ return -1;
+ }
+ table = ind_table(table[idx2]);
+ if (register_ind_in_table(table, idx3, idx4, handler) < 0) {
+ printf("*** ERROR: unable to insert opcode "
+ "[%02x-%02x-%02x-%02x]\n", idx1, idx2, idx3, idx4);
+ return -1;
+ }
+ return 0;
+}
+static int register_insn (opc_handler_t **ppc_opcodes, opcode_t *insn)
+{
+ if (insn->opc2 != 0xFF) {
+ if (insn->opc3 != 0xFF) {
+ if (insn->opc4 != 0xFF) {
+ if (register_trplind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, insn->opc4,
+ &insn->handler) < 0) {
+ return -1;
+ }
+ } else {
+ if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2,
+ insn->opc3, &insn->handler) < 0)
+ return -1;
+ }
+ } else {
+ if (register_ind_insn(ppc_opcodes, insn->opc1,
+ insn->opc2, &insn->handler) < 0)
+ return -1;
+ }
+ } else {
+ if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int test_opcode_table (opc_handler_t **table, int len)
+{
+ int i, count, tmp;
+
+ for (i = 0, count = 0; i < len; i++) {
+ /* Consistency fixup */
+ if (table[i] == NULL)
+ table[i] = &invalid_handler;
+ if (table[i] != &invalid_handler) {
+ if (is_indirect_opcode(table[i])) {
+ tmp = test_opcode_table(ind_table(table[i]),
+ PPC_CPU_INDIRECT_OPCODES_LEN);
+ if (tmp == 0) {
+ free(table[i]);
+ table[i] = &invalid_handler;
+ } else {
+ count++;
+ }
+ } else {
+ count++;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void fix_opcode_tables (opc_handler_t **ppc_opcodes)
+{
+ if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0)
+ printf("*** WARNING: no opcode defined !\n");
+}
+
+/*****************************************************************************/
+static void create_ppc_opcodes(PowerPCCPU *cpu, Error **errp)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ opcode_t *opc;
+
+ fill_new_table(env->opcodes, PPC_CPU_OPCODES_LEN);
+ for (opc = opcodes; opc < &opcodes[ARRAY_SIZE(opcodes)]; opc++) {
+ if (((opc->handler.type & pcc->insns_flags) != 0) ||
+ ((opc->handler.type2 & pcc->insns_flags2) != 0)) {
+ if (register_insn(env->opcodes, opc) < 0) {
+ error_setg(errp, "ERROR initializing PowerPC instruction "
+ "0x%02x 0x%02x 0x%02x", opc->opc1, opc->opc2,
+ opc->opc3);
+ return;
+ }
+ }
+ }
+ fix_opcode_tables(env->opcodes);
+ fflush(stdout);
+ fflush(stderr);
+}
+
+#if defined(PPC_DUMP_CPU)
+static void dump_ppc_insns (CPUPPCState *env)
+{
+ opc_handler_t **table, *handler;
+ const char *p, *q;
+ uint8_t opc1, opc2, opc3, opc4;
+
+ printf("Instructions set:\n");
+ /* opc1 is 6 bits long */
+ for (opc1 = 0x00; opc1 < PPC_CPU_OPCODES_LEN; opc1++) {
+ table = env->opcodes;
+ handler = table[opc1];
+ if (is_indirect_opcode(handler)) {
+ /* opc2 is 5 bits long */
+ for (opc2 = 0; opc2 < PPC_CPU_INDIRECT_OPCODES_LEN; opc2++) {
+ table = env->opcodes;
+ handler = env->opcodes[opc1];
+ table = ind_table(handler);
+ handler = table[opc2];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ /* opc3 is 5 bits long */
+ for (opc3 = 0; opc3 < PPC_CPU_INDIRECT_OPCODES_LEN;
+ opc3++) {
+ handler = table[opc3];
+ if (is_indirect_opcode(handler)) {
+ table = ind_table(handler);
+ /* opc4 is 5 bits long */
+ for (opc4 = 0; opc4 < PPC_CPU_INDIRECT_OPCODES_LEN;
+ opc4++) {
+ handler = table[opc4];
+ if (handler->handler != &gen_invalid) {
+ printf("INSN: %02x %02x %02x %02x -- "
+ "(%02d %04d %02d) : %s\n",
+ opc1, opc2, opc3, opc4,
+ opc1, (opc3 << 5) | opc2, opc4,
+ handler->oname);
+ }
+ }
+ } else {
+ if (handler->handler != &gen_invalid) {
+ /* Special hack to properly dump SPE insns */
+ p = strchr(handler->oname, '_');
+ if (p == NULL) {
+ printf("INSN: %02x %02x %02x (%02d %04d) : "
+ "%s\n",
+ opc1, opc2, opc3, opc1,
+ (opc3 << 5) | opc2,
+ handler->oname);
+ } else {
+ q = "speundef";
+ if ((p - handler->oname) != strlen(q)
+ || (memcmp(handler->oname, q, strlen(q))
+ != 0)) {
+ /* First instruction */
+ printf("INSN: %02x %02x %02x"
+ "(%02d %04d) : %.*s\n",
+ opc1, opc2 << 1, opc3, opc1,
+ (opc3 << 6) | (opc2 << 1),
+ (int)(p - handler->oname),
+ handler->oname);
+ }
+ if (strcmp(p + 1, q) != 0) {
+ /* Second instruction */
+ printf("INSN: %02x %02x %02x "
+ "(%02d %04d) : %s\n", opc1,
+ (opc2 << 1) | 1, opc3, opc1,
+ (opc3 << 6) | (opc2 << 1) | 1,
+ p + 1);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (handler->handler != &gen_invalid) {
+ printf("INSN: %02x %02x -- (%02d %04d) : %s\n",
+ opc1, opc2, opc1, opc2, handler->oname);
+ }
+ }
+ }
+ } else {
+ if (handler->handler != &gen_invalid) {
+ printf("INSN: %02x -- -- (%02d ----) : %s\n",
+ opc1, opc1, handler->oname);
+ }
+ }
+ }
+}
+#endif
+
+static bool avr_need_swap(CPUPPCState *env)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+ return msr_le;
+#else
+ return !msr_le;
+#endif
+}
+
+static int gdb_get_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ stfq_p(mem_buf, env->fpr[n]);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ return 8;
+ }
+ if (n == 32) {
+ stl_p(mem_buf, env->fpscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_float_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ env->fpr[n] = ldfq_p(mem_buf);
+ return 8;
+ }
+ if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ helper_store_fpscr(env, ldl_p(mem_buf), 0xffffffff);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ if (!avr_need_swap(env)) {
+ stq_p(mem_buf, env->avr[n].u64[0]);
+ stq_p(mem_buf+8, env->avr[n].u64[1]);
+ } else {
+ stq_p(mem_buf, env->avr[n].u64[1]);
+ stq_p(mem_buf+8, env->avr[n].u64[0]);
+ }
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf + 8, 8);
+ return 16;
+ }
+ if (n == 32) {
+ stl_p(mem_buf, env->vscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ if (n == 33) {
+ stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_avr_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ ppc_maybe_bswap_register(env, mem_buf + 8, 8);
+ if (!avr_need_swap(env)) {
+ env->avr[n].u64[0] = ldq_p(mem_buf);
+ env->avr[n].u64[1] = ldq_p(mem_buf+8);
+ } else {
+ env->avr[n].u64[1] = ldq_p(mem_buf);
+ env->avr[n].u64[0] = ldq_p(mem_buf+8);
+ }
+ return 16;
+ }
+ if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ env->vscr = ldl_p(mem_buf);
+ return 4;
+ }
+ if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ env->spr[SPR_VRSAVE] = (target_ulong)ldl_p(mem_buf);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#if defined(TARGET_PPC64)
+ stl_p(mem_buf, env->gpr[n] >> 32);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+#else
+ stl_p(mem_buf, env->gprh[n]);
+#endif
+ return 4;
+ }
+ if (n == 32) {
+ stq_p(mem_buf, env->spe_acc);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ return 8;
+ }
+ if (n == 33) {
+ stl_p(mem_buf, env->spe_fscr);
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_set_spe_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+#if defined(TARGET_PPC64)
+ target_ulong lo = (uint32_t)env->gpr[n];
+ target_ulong hi;
+
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+
+ hi = (target_ulong)ldl_p(mem_buf) << 32;
+ env->gpr[n] = lo | hi;
+#else
+ env->gprh[n] = ldl_p(mem_buf);
+#endif
+ return 4;
+ }
+ if (n == 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ env->spe_acc = ldq_p(mem_buf);
+ return 8;
+ }
+ if (n == 33) {
+ ppc_maybe_bswap_register(env, mem_buf, 4);
+ env->spe_fscr = ldl_p(mem_buf);
+ return 4;
+ }
+ return 0;
+}
+
+static int gdb_get_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ stq_p(mem_buf, env->vsr[n]);
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ return 8;
+ }
+ return 0;
+}
+
+static int gdb_set_vsx_reg(CPUPPCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ ppc_maybe_bswap_register(env, mem_buf, 8);
+ env->vsr[n] = ldq_p(mem_buf);
+ return 8;
+ }
+ return 0;
+}
+
+static int ppc_fixup_cpu(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+
+ /* TCG doesn't (yet) emulate some groups of instructions that
+ * are implemented on some otherwise supported CPUs (e.g. VSX
+ * and decimal floating point instructions on POWER7). We
+ * remove unsupported instruction groups from the cpu state's
+ * instruction masks and hope the guest can cope. For at
+ * least the pseries machine, the unavailability of these
+ * instructions can be advertised to the guest via the device
+ * tree. */
+ if ((env->insns_flags & ~PPC_TCG_INSNS)
+ || (env->insns_flags2 & ~PPC_TCG_INSNS2)) {
+ fprintf(stderr, "Warning: Disabling some instructions which are not "
+ "emulated by TCG (0x%" PRIx64 ", 0x%" PRIx64 ")\n",
+ env->insns_flags & ~PPC_TCG_INSNS,
+ env->insns_flags2 & ~PPC_TCG_INSNS2);
+ }
+ env->insns_flags &= PPC_TCG_INSNS;
+ env->insns_flags2 &= PPC_TCG_INSNS2;
+ return 0;
+}
+
+static inline bool ppc_cpu_is_valid(PowerPCCPUClass *pcc)
+{
+#ifdef TARGET_PPCEMB
+ return pcc->mmu_model == POWERPC_MMU_BOOKE ||
+ pcc->mmu_model == POWERPC_MMU_SOFT_4xx ||
+ pcc->mmu_model == POWERPC_MMU_SOFT_4xx_Z;
+#else
+ return true;
+#endif
+}
+
+static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ PowerPCCPU *cpu = POWERPC_CPU(dev);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ Error *local_err = NULL;
+#if !defined(CONFIG_USER_ONLY)
+ int max_smt = kvmppc_smt_threads();
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+ if (smp_threads > max_smt) {
+ error_setg(errp, "Cannot support more than %d threads on PPC with %s",
+ max_smt, kvm_enabled() ? "KVM" : "TCG");
+ return;
+ }
+ if (!is_power_of_2(smp_threads)) {
+ error_setg(errp, "Cannot support %d threads on PPC with %s, "
+ "threads count must be a power of 2.",
+ smp_threads, kvm_enabled() ? "KVM" : "TCG");
+ return;
+ }
+#endif
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ cpu->cpu_dt_id = (cs->cpu_index / smp_threads) * max_smt
+ + (cs->cpu_index % smp_threads);
+
+ if (kvm_enabled() && !kvm_vcpu_id_is_valid(cpu->cpu_dt_id)) {
+ error_setg(errp, "Can't create CPU with id %d in KVM", cpu->cpu_dt_id);
+ error_append_hint(errp, "Adjust the number of cpus to %d "
+ "or try to raise the number of threads per core\n",
+ cpu->cpu_dt_id * smp_threads / max_smt);
+ return;
+ }
+#endif
+
+ if (tcg_enabled()) {
+ if (ppc_fixup_cpu(cpu) != 0) {
+ error_setg(errp, "Unable to emulate selected CPU with TCG");
+ return;
+ }
+ }
+
+#if defined(TARGET_PPCEMB)
+ if (!ppc_cpu_is_valid(pcc)) {
+ error_setg(errp, "CPU does not possess a BookE or 4xx MMU. "
+ "Please use qemu-system-ppc or qemu-system-ppc64 instead "
+ "or choose another CPU model.");
+ return;
+ }
+#endif
+
+ create_ppc_opcodes(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ init_ppc_proc(cpu);
+
+ if (pcc->insns_flags & PPC_FLOAT) {
+ gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg,
+ 33, "power-fpu.xml", 0);
+ }
+ if (pcc->insns_flags & PPC_ALTIVEC) {
+ gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg,
+ 34, "power-altivec.xml", 0);
+ }
+ if (pcc->insns_flags & PPC_SPE) {
+ gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
+ 34, "power-spe.xml", 0);
+ }
+ if (pcc->insns_flags2 & PPC2_VSX) {
+ gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
+ 32, "power-vsx.xml", 0);
+ }
+
+ qemu_init_vcpu(cs);
+
+ pcc->parent_realize(dev, errp);
+
+#if defined(PPC_DUMP_CPU)
+ {
+ CPUPPCState *env = &cpu->env;
+ const char *mmu_model, *excp_model, *bus_model;
+ switch (env->mmu_model) {
+ case POWERPC_MMU_32B:
+ mmu_model = "PowerPC 32";
+ break;
+ case POWERPC_MMU_SOFT_6xx:
+ mmu_model = "PowerPC 6xx/7xx with software driven TLBs";
+ break;
+ case POWERPC_MMU_SOFT_74xx:
+ mmu_model = "PowerPC 74xx with software driven TLBs";
+ break;
+ case POWERPC_MMU_SOFT_4xx:
+ mmu_model = "PowerPC 4xx with software driven TLBs";
+ break;
+ case POWERPC_MMU_SOFT_4xx_Z:
+ mmu_model = "PowerPC 4xx with software driven TLBs "
+ "and zones protections";
+ break;
+ case POWERPC_MMU_REAL:
+ mmu_model = "PowerPC real mode only";
+ break;
+ case POWERPC_MMU_MPC8xx:
+ mmu_model = "PowerPC MPC8xx";
+ break;
+ case POWERPC_MMU_BOOKE:
+ mmu_model = "PowerPC BookE";
+ break;
+ case POWERPC_MMU_BOOKE206:
+ mmu_model = "PowerPC BookE 2.06";
+ break;
+ case POWERPC_MMU_601:
+ mmu_model = "PowerPC 601";
+ break;
+#if defined (TARGET_PPC64)
+ case POWERPC_MMU_64B:
+ mmu_model = "PowerPC 64";
+ break;
+#endif
+ default:
+ mmu_model = "Unknown or invalid";
+ break;
+ }
+ switch (env->excp_model) {
+ case POWERPC_EXCP_STD:
+ excp_model = "PowerPC";
+ break;
+ case POWERPC_EXCP_40x:
+ excp_model = "PowerPC 40x";
+ break;
+ case POWERPC_EXCP_601:
+ excp_model = "PowerPC 601";
+ break;
+ case POWERPC_EXCP_602:
+ excp_model = "PowerPC 602";
+ break;
+ case POWERPC_EXCP_603:
+ excp_model = "PowerPC 603";
+ break;
+ case POWERPC_EXCP_603E:
+ excp_model = "PowerPC 603e";
+ break;
+ case POWERPC_EXCP_604:
+ excp_model = "PowerPC 604";
+ break;
+ case POWERPC_EXCP_7x0:
+ excp_model = "PowerPC 740/750";
+ break;
+ case POWERPC_EXCP_7x5:
+ excp_model = "PowerPC 745/755";
+ break;
+ case POWERPC_EXCP_74xx:
+ excp_model = "PowerPC 74xx";
+ break;
+ case POWERPC_EXCP_BOOKE:
+ excp_model = "PowerPC BookE";
+ break;
+#if defined (TARGET_PPC64)
+ case POWERPC_EXCP_970:
+ excp_model = "PowerPC 970";
+ break;
+#endif
+ default:
+ excp_model = "Unknown or invalid";
+ break;
+ }
+ switch (env->bus_model) {
+ case PPC_FLAGS_INPUT_6xx:
+ bus_model = "PowerPC 6xx";
+ break;
+ case PPC_FLAGS_INPUT_BookE:
+ bus_model = "PowerPC BookE";
+ break;
+ case PPC_FLAGS_INPUT_405:
+ bus_model = "PowerPC 405";
+ break;
+ case PPC_FLAGS_INPUT_401:
+ bus_model = "PowerPC 401/403";
+ break;
+ case PPC_FLAGS_INPUT_RCPU:
+ bus_model = "RCPU / MPC8xx";
+ break;
+#if defined (TARGET_PPC64)
+ case PPC_FLAGS_INPUT_970:
+ bus_model = "PowerPC 970";
+ break;
+#endif
+ default:
+ bus_model = "Unknown or invalid";
+ break;
+ }
+ printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n"
+ " MMU model : %s\n",
+ object_class_get_name(OBJECT_CLASS(pcc)),
+ pcc->pvr, pcc->msr_mask, mmu_model);
+#if !defined(CONFIG_USER_ONLY)
+ if (env->tlb.tlb6) {
+ printf(" %d %s TLB in %d ways\n",
+ env->nb_tlb, env->id_tlbs ? "splitted" : "merged",
+ env->nb_ways);
+ }
+#endif
+ printf(" Exceptions model : %s\n"
+ " Bus model : %s\n",
+ excp_model, bus_model);
+ printf(" MSR features :\n");
+ if (env->flags & POWERPC_FLAG_SPE)
+ printf(" signal processing engine enable"
+ "\n");
+ else if (env->flags & POWERPC_FLAG_VRE)
+ printf(" vector processor enable\n");
+ if (env->flags & POWERPC_FLAG_TGPR)
+ printf(" temporary GPRs\n");
+ else if (env->flags & POWERPC_FLAG_CE)
+ printf(" critical input enable\n");
+ if (env->flags & POWERPC_FLAG_SE)
+ printf(" single-step trace mode\n");
+ else if (env->flags & POWERPC_FLAG_DWE)
+ printf(" debug wait enable\n");
+ else if (env->flags & POWERPC_FLAG_UBLE)
+ printf(" user BTB lock enable\n");
+ if (env->flags & POWERPC_FLAG_BE)
+ printf(" branch-step trace mode\n");
+ else if (env->flags & POWERPC_FLAG_DE)
+ printf(" debug interrupt enable\n");
+ if (env->flags & POWERPC_FLAG_PX)
+ printf(" inclusive protection\n");
+ else if (env->flags & POWERPC_FLAG_PMM)
+ printf(" performance monitor mark\n");
+ if (env->flags == POWERPC_FLAG_NONE)
+ printf(" none\n");
+ printf(" Time-base/decrementer clock source: %s\n",
+ env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
+ dump_ppc_insns(env);
+ dump_ppc_sprs(env);
+ fflush(stdout);
+ }
+#endif
+}
+
+static void ppc_cpu_unrealizefn(DeviceState *dev, Error **errp)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(dev);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ Error *local_err = NULL;
+ opc_handler_t **table, **table_2;
+ int i, j, k;
+
+ pcc->parent_unrealize(dev, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ for (i = 0; i < PPC_CPU_OPCODES_LEN; i++) {
+ if (env->opcodes[i] == &invalid_handler) {
+ continue;
+ }
+ if (is_indirect_opcode(env->opcodes[i])) {
+ table = ind_table(env->opcodes[i]);
+ for (j = 0; j < PPC_CPU_INDIRECT_OPCODES_LEN; j++) {
+ if (table[j] == &invalid_handler) {
+ continue;
+ }
+ if (is_indirect_opcode(table[j])) {
+ table_2 = ind_table(table[j]);
+ for (k = 0; k < PPC_CPU_INDIRECT_OPCODES_LEN; k++) {
+ if (table_2[k] != &invalid_handler &&
+ is_indirect_opcode(table_2[k])) {
+ g_free((opc_handler_t *)((uintptr_t)table_2[k] &
+ ~PPC_INDIRECT));
+ }
+ }
+ g_free((opc_handler_t *)((uintptr_t)table[j] &
+ ~PPC_INDIRECT));
+ }
+ }
+ g_free((opc_handler_t *)((uintptr_t)env->opcodes[i] &
+ ~PPC_INDIRECT));
+ }
+ }
+}
+
+int ppc_get_compat_smt_threads(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ int ret = MIN(cs->nr_threads, kvmppc_smt_threads());
+
+ switch (cpu->cpu_version) {
+ case CPU_POWERPC_LOGICAL_2_05:
+ ret = MIN(ret, 2);
+ break;
+ case CPU_POWERPC_LOGICAL_2_06:
+ ret = MIN(ret, 4);
+ break;
+ case CPU_POWERPC_LOGICAL_2_07:
+ ret = MIN(ret, 8);
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef TARGET_PPC64
+void ppc_set_compat(PowerPCCPU *cpu, uint32_t cpu_version, Error **errp)
+{
+ int ret = 0;
+ CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *host_pcc;
+
+ cpu->cpu_version = cpu_version;
+
+ switch (cpu_version) {
+ case CPU_POWERPC_LOGICAL_2_05:
+ env->spr[SPR_PCR] = PCR_TM_DIS | PCR_VSX_DIS | PCR_COMPAT_2_07 |
+ PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
+ break;
+ case CPU_POWERPC_LOGICAL_2_06:
+ case CPU_POWERPC_LOGICAL_2_06_PLUS:
+ env->spr[SPR_PCR] = PCR_TM_DIS | PCR_COMPAT_2_07 | PCR_COMPAT_2_06;
+ break;
+ case CPU_POWERPC_LOGICAL_2_07:
+ env->spr[SPR_PCR] = PCR_COMPAT_2_07;
+ break;
+ default:
+ env->spr[SPR_PCR] = 0;
+ break;
+ }
+
+ host_pcc = kvm_ppc_get_host_cpu_class();
+ if (host_pcc) {
+ env->spr[SPR_PCR] &= host_pcc->pcr_mask;
+ }
+
+ if (kvm_enabled()) {
+ ret = kvmppc_set_compat(cpu, cpu->cpu_version);
+ if (ret < 0) {
+ error_setg_errno(errp, -ret,
+ "Unable to set CPU compatibility mode in KVM");
+ }
+ }
+}
+#endif
+
+static gint ppc_cpu_compare_class_pvr(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc = (ObjectClass *)a;
+ uint32_t pvr = *(uint32_t *)b;
+ PowerPCCPUClass *pcc = (PowerPCCPUClass *)a;
+
+ /* -cpu host does a PVR lookup during construction */
+ if (unlikely(strcmp(object_class_get_name(oc),
+ TYPE_HOST_POWERPC_CPU) == 0)) {
+ return -1;
+ }
+
+ if (!ppc_cpu_is_valid(pcc)) {
+ return -1;
+ }
+
+ return pcc->pvr == pvr ? 0 : -1;
+}
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr)
+{
+ GSList *list, *item;
+ PowerPCCPUClass *pcc = NULL;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ item = g_slist_find_custom(list, &pvr, ppc_cpu_compare_class_pvr);
+ if (item != NULL) {
+ pcc = POWERPC_CPU_CLASS(item->data);
+ }
+ g_slist_free(list);
+
+ return pcc;
+}
+
+static gint ppc_cpu_compare_class_pvr_mask(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc = (ObjectClass *)a;
+ uint32_t pvr = *(uint32_t *)b;
+ PowerPCCPUClass *pcc = (PowerPCCPUClass *)a;
+
+ /* -cpu host does a PVR lookup during construction */
+ if (unlikely(strcmp(object_class_get_name(oc),
+ TYPE_HOST_POWERPC_CPU) == 0)) {
+ return -1;
+ }
+
+ if (!ppc_cpu_is_valid(pcc)) {
+ return -1;
+ }
+
+ if (pcc->pvr_match(pcc, pvr)) {
+ return 0;
+ }
+
+ return -1;
+}
+
+PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr)
+{
+ GSList *list, *item;
+ PowerPCCPUClass *pcc = NULL;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, true);
+ item = g_slist_find_custom(list, &pvr, ppc_cpu_compare_class_pvr_mask);
+ if (item != NULL) {
+ pcc = POWERPC_CPU_CLASS(item->data);
+ }
+ g_slist_free(list);
+
+ return pcc;
+}
+
+static gint ppc_cpu_compare_class_name(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc = (ObjectClass *)a;
+ const char *name = b;
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ if (strncasecmp(name, object_class_get_name(oc), strlen(name)) == 0 &&
+ ppc_cpu_is_valid(pcc) &&
+ strcmp(object_class_get_name(oc) + strlen(name),
+ "-" TYPE_POWERPC_CPU) == 0) {
+ return 0;
+ }
+ return -1;
+}
+
+
+static ObjectClass *ppc_cpu_class_by_name(const char *name);
+
+static ObjectClass *ppc_cpu_class_by_alias(PowerPCCPUAlias *alias)
+{
+ ObjectClass *invalid_class = (void*)ppc_cpu_class_by_alias;
+
+ /* Cache target class lookups in the alias table */
+ if (!alias->oc) {
+ alias->oc = ppc_cpu_class_by_name(alias->model);
+ if (!alias->oc) {
+ /* Fast check for non-existing aliases */
+ alias->oc = invalid_class;
+ }
+ }
+
+ if (alias->oc == invalid_class) {
+ return NULL;
+ } else {
+ return alias->oc;
+ }
+}
+
+static ObjectClass *ppc_cpu_class_by_name(const char *name)
+{
+ GSList *list, *item;
+ ObjectClass *ret = NULL;
+ const char *p;
+ int i, len;
+
+ /* Check if the given name is a PVR */
+ len = strlen(name);
+ if (len == 10 && name[0] == '0' && name[1] == 'x') {
+ p = name + 2;
+ goto check_pvr;
+ } else if (len == 8) {
+ p = name;
+ check_pvr:
+ for (i = 0; i < 8; i++) {
+ if (!qemu_isxdigit(*p++))
+ break;
+ }
+ if (i == 8) {
+ return OBJECT_CLASS(ppc_cpu_class_by_pvr(strtoul(name, NULL, 16)));
+ }
+ }
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ item = g_slist_find_custom(list, name, ppc_cpu_compare_class_name);
+ if (item != NULL) {
+ ret = OBJECT_CLASS(item->data);
+ }
+ g_slist_free(list);
+
+ if (ret) {
+ return ret;
+ }
+
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ if (strcmp(ppc_cpu_aliases[i].alias, name) == 0) {
+ return ppc_cpu_class_by_alias(&ppc_cpu_aliases[i]);
+ }
+ }
+
+ return NULL;
+}
+
+const char *ppc_cpu_lookup_alias(const char *alias)
+{
+ int ai;
+
+ for (ai = 0; ppc_cpu_aliases[ai].alias != NULL; ai++) {
+ if (strcmp(ppc_cpu_aliases[ai].alias, alias) == 0) {
+ return ppc_cpu_aliases[ai].model;
+ }
+ }
+
+ return NULL;
+}
+
+PowerPCCPU *cpu_ppc_init(const char *cpu_model)
+{
+ return POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, cpu_model));
+}
+
+/* Sort by PVR, ordering special case "host" last. */
+static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *oc_a = (ObjectClass *)a;
+ ObjectClass *oc_b = (ObjectClass *)b;
+ PowerPCCPUClass *pcc_a = POWERPC_CPU_CLASS(oc_a);
+ PowerPCCPUClass *pcc_b = POWERPC_CPU_CLASS(oc_b);
+ const char *name_a = object_class_get_name(oc_a);
+ const char *name_b = object_class_get_name(oc_b);
+
+ if (strcmp(name_a, TYPE_HOST_POWERPC_CPU) == 0) {
+ return 1;
+ } else if (strcmp(name_b, TYPE_HOST_POWERPC_CPU) == 0) {
+ return -1;
+ } else {
+ /* Avoid an integer overflow during subtraction */
+ if (pcc_a->pvr < pcc_b->pvr) {
+ return -1;
+ } else if (pcc_a->pvr > pcc_b->pvr) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static void ppc_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ const char *typename = object_class_get_name(oc);
+ char *name;
+ int i;
+
+ if (!ppc_cpu_is_valid(pcc)) {
+ return;
+ }
+ if (unlikely(strcmp(typename, TYPE_HOST_POWERPC_CPU) == 0)) {
+ return;
+ }
+
+ name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_POWERPC_CPU));
+ (*s->cpu_fprintf)(s->file, "PowerPC %-16s PVR %08x\n",
+ name, pcc->pvr);
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
+ ObjectClass *alias_oc = ppc_cpu_class_by_alias(alias);
+
+ if (alias_oc != oc) {
+ continue;
+ }
+ (*s->cpu_fprintf)(s->file, "PowerPC %-16s (alias for %s)\n",
+ alias->alias, name);
+ }
+ g_free(name);
+}
+
+void ppc_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ list = g_slist_sort(list, ppc_cpu_list_compare);
+ g_slist_foreach(list, ppc_cpu_list_entry, &s);
+ g_slist_free(list);
+
+#ifdef CONFIG_KVM
+ cpu_fprintf(f, "\n");
+ cpu_fprintf(f, "PowerPC %-16s\n", "host");
+#endif
+}
+
+static void ppc_cpu_defs_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CpuDefinitionInfoList **first = user_data;
+ const char *typename;
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+
+ if (!ppc_cpu_is_valid(pcc)) {
+ return;
+ }
+
+ typename = object_class_get_name(oc);
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strndup(typename,
+ strlen(typename) - strlen("-" TYPE_POWERPC_CPU));
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = *first;
+ *first = entry;
+}
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *cpu_list = NULL;
+ GSList *list;
+ int i;
+
+ list = object_class_get_list(TYPE_POWERPC_CPU, false);
+ g_slist_foreach(list, ppc_cpu_defs_entry, &cpu_list);
+ g_slist_free(list);
+
+ for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
+ PowerPCCPUAlias *alias = &ppc_cpu_aliases[i];
+ ObjectClass *oc;
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+
+ oc = ppc_cpu_class_by_alias(alias);
+ if (oc == NULL) {
+ continue;
+ }
+
+ info = g_malloc0(sizeof(*info));
+ info->name = g_strdup(alias->alias);
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = cpu_list;
+ cpu_list = entry;
+ }
+
+ return cpu_list;
+}
+
+static void ppc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+
+ cpu->env.nip = value;
+}
+
+static bool ppc_cpu_has_work(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD);
+}
+
+static void ppc_cpu_exec_enter(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ env->reserve_addr = -1;
+}
+
+/* CPUClass::reset() */
+static void ppc_cpu_reset(CPUState *s)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(s);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ target_ulong msr;
+ int i;
+
+ pcc->parent_reset(s);
+
+ msr = (target_ulong)0;
+ msr |= (target_ulong)MSR_HVB;
+ msr |= (target_ulong)0 << MSR_AP; /* TO BE CHECKED */
+ msr |= (target_ulong)0 << MSR_SA; /* TO BE CHECKED */
+ msr |= (target_ulong)1 << MSR_EP;
+#if defined(DO_SINGLE_STEP) && 0
+ /* Single step trace mode */
+ msr |= (target_ulong)1 << MSR_SE;
+ msr |= (target_ulong)1 << MSR_BE;
+#endif
+#if defined(CONFIG_USER_ONLY)
+ msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */
+ msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */
+ msr |= (target_ulong)1 << MSR_VSX; /* Allow VSX usage */
+ msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */
+ msr |= (target_ulong)1 << MSR_PR;
+#if defined(TARGET_PPC64)
+ msr |= (target_ulong)1 << MSR_TM; /* Transactional memory */
+#endif
+#if !defined(TARGET_WORDS_BIGENDIAN)
+ msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */
+ if (!((env->msr_mask >> MSR_LE) & 1)) {
+ fprintf(stderr, "Selected CPU does not support little-endian.\n");
+ exit(1);
+ }
+#endif
+#endif
+
+#if defined(TARGET_PPC64)
+ if (env->mmu_model & POWERPC_MMU_64) {
+ msr |= (1ULL << MSR_SF);
+ }
+#endif
+
+ hreg_store_msr(env, msr, 1);
+
+#if !defined(CONFIG_USER_ONLY)
+ env->nip = env->hreset_vector | env->excp_prefix;
+ if (env->mmu_model != POWERPC_MMU_REAL) {
+ ppc_tlb_invalidate_all(env);
+ }
+#endif
+
+ hreg_compute_hflags(env);
+ env->reserve_addr = (target_ulong)-1ULL;
+ /* Be sure no exception or interrupt is pending */
+ env->pending_interrupts = 0;
+ s->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+
+#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY)
+ env->vpa_addr = 0;
+ env->slb_shadow_addr = 0;
+ env->slb_shadow_size = 0;
+ env->dtl_addr = 0;
+ env->dtl_size = 0;
+#endif /* TARGET_PPC64 */
+
+ for (i = 0; i < ARRAY_SIZE(env->spr_cb); i++) {
+ ppc_spr_t *spr = &env->spr_cb[i];
+
+ if (!spr->name) {
+ continue;
+ }
+ env->spr[i] = spr->default_value;
+ }
+
+ /* Flush all TLBs */
+ tlb_flush(s, 1);
+}
+
+#ifndef CONFIG_USER_ONLY
+static bool ppc_cpu_is_big_endian(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+
+ cpu_synchronize_state(cs);
+
+ return !msr_le;
+}
+#endif
+
+static void ppc_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ PowerPCCPU *cpu = POWERPC_CPU(obj);
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ cs->env_ptr = env;
+
+ env->msr_mask = pcc->msr_mask;
+ env->mmu_model = pcc->mmu_model;
+ env->excp_model = pcc->excp_model;
+ env->bus_model = pcc->bus_model;
+ env->insns_flags = pcc->insns_flags;
+ env->insns_flags2 = pcc->insns_flags2;
+ env->flags = pcc->flags;
+ env->bfd_mach = pcc->bfd_mach;
+ env->check_pow = pcc->check_pow;
+
+ /* Mark HV mode as supported if the CPU has an MSR_HV bit
+ * in the msr_mask. The mask can later be cleared by PAPR
+ * mode but the hv mode support will remain, thus enforcing
+ * that we cannot use priv. instructions in guest in PAPR
+ * mode. For 970 we currently simply don't set HV in msr_mask
+ * thus simulating an "Apple mode" 970. If we ever want to
+ * support 970 HV mode, we'll have to add a processor attribute
+ * of some sort.
+ */
+#if !defined(CONFIG_USER_ONLY)
+ env->has_hv_mode = !!(env->msr_mask & MSR_HVB);
+#endif
+
+#if defined(TARGET_PPC64)
+ if (pcc->sps) {
+ env->sps = *pcc->sps;
+ } else if (env->mmu_model & POWERPC_MMU_64) {
+ /* Use default sets of page sizes. We don't support MPSS */
+ static const struct ppc_segment_page_sizes defsps_4k = {
+ .sps = {
+ { .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 } }
+ },
+ { .page_shift = 24, /* 16M */
+ .slb_enc = 0x100,
+ .enc = { { .page_shift = 24, .pte_enc = 0 } }
+ },
+ },
+ };
+ static const struct ppc_segment_page_sizes defsps_64k = {
+ .sps = {
+ { .page_shift = 12, /* 4K */
+ .slb_enc = 0,
+ .enc = { { .page_shift = 12, .pte_enc = 0 } }
+ },
+ { .page_shift = 16, /* 64K */
+ .slb_enc = 0x110,
+ .enc = { { .page_shift = 16, .pte_enc = 1 } }
+ },
+ { .page_shift = 24, /* 16M */
+ .slb_enc = 0x100,
+ .enc = { { .page_shift = 24, .pte_enc = 0 } }
+ },
+ },
+ };
+ env->sps = (env->mmu_model & POWERPC_MMU_64K) ? defsps_64k : defsps_4k;
+ }
+#endif /* defined(TARGET_PPC64) */
+
+ if (tcg_enabled()) {
+ ppc_translate_init();
+ }
+}
+
+static bool ppc_pvr_match_default(PowerPCCPUClass *pcc, uint32_t pvr)
+{
+ return pcc->pvr == pvr;
+}
+
+static gchar *ppc_gdb_arch_name(CPUState *cs)
+{
+#if defined(TARGET_PPC64)
+ return g_strdup("powerpc:common64");
+#else
+ return g_strdup("powerpc:common");
+#endif
+}
+
+static Property ppc_cpu_properties[] = {
+ DEFINE_PROP_BOOL("pre-2.8-migration", PowerPCCPU, pre_2_8_migration, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ppc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ pcc->parent_realize = dc->realize;
+ pcc->parent_unrealize = dc->unrealize;
+ pcc->pvr_match = ppc_pvr_match_default;
+ pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_always;
+ dc->realize = ppc_cpu_realizefn;
+ dc->unrealize = ppc_cpu_unrealizefn;
+ dc->props = ppc_cpu_properties;
+
+ pcc->parent_reset = cc->reset;
+ cc->reset = ppc_cpu_reset;
+
+ cc->class_by_name = ppc_cpu_class_by_name;
+ cc->has_work = ppc_cpu_has_work;
+ cc->do_interrupt = ppc_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = ppc_cpu_exec_interrupt;
+ cc->dump_state = ppc_cpu_dump_state;
+ cc->dump_statistics = ppc_cpu_dump_statistics;
+ cc->set_pc = ppc_cpu_set_pc;
+ cc->gdb_read_register = ppc_cpu_gdb_read_register;
+ cc->gdb_write_register = ppc_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = ppc_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug;
+ cc->vmsd = &vmstate_ppc_cpu;
+#if defined(TARGET_PPC64)
+ cc->write_elf64_note = ppc64_cpu_write_elf64_note;
+#endif
+#endif
+ cc->cpu_exec_enter = ppc_cpu_exec_enter;
+
+ cc->gdb_num_core_regs = 71;
+
+#ifdef USE_APPLE_GDB
+ cc->gdb_read_register = ppc_cpu_gdb_read_register_apple;
+ cc->gdb_write_register = ppc_cpu_gdb_write_register_apple;
+ cc->gdb_num_core_regs = 71 + 32;
+#endif
+
+ cc->gdb_arch_name = ppc_gdb_arch_name;
+#if defined(TARGET_PPC64)
+ cc->gdb_core_xml_file = "power64-core.xml";
+#else
+ cc->gdb_core_xml_file = "power-core.xml";
+#endif
+#ifndef CONFIG_USER_ONLY
+ cc->virtio_is_big_endian = ppc_cpu_is_big_endian;
+#endif
+
+ dc->fw_name = "PowerPC,UNKNOWN";
+}
+
+static const TypeInfo ppc_cpu_type_info = {
+ .name = TYPE_POWERPC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(PowerPCCPU),
+ .instance_init = ppc_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(PowerPCCPUClass),
+ .class_init = ppc_cpu_class_init,
+};
+
+static void ppc_cpu_register_types(void)
+{
+ type_register_static(&ppc_cpu_type_info);
+}
+
+type_init(ppc_cpu_register_types)
diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c
new file mode 100644
index 0000000000..6aff34713f
--- /dev/null
+++ b/target/ppc/user_only_helper.c
@@ -0,0 +1,47 @@
+/*
+ * PowerPC MMU stub handling for user mode emulation
+ *
+ * Copyright (c) 2003-2007 Jocelyn Mayer
+ * Copyright (c) 2013 David Gibson, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+
+int ppc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ int exception, error_code;
+
+ if (rw == 2) {
+ exception = POWERPC_EXCP_ISI;
+ error_code = 0x40000000;
+ } else {
+ exception = POWERPC_EXCP_DSI;
+ error_code = 0x40000000;
+ if (rw) {
+ error_code |= 0x02000000;
+ }
+ env->spr[SPR_DAR] = address;
+ env->spr[SPR_DSISR] = error_code;
+ }
+ cs->exception_index = exception;
+ env->error_code = error_code;
+
+ return 1;
+}
diff --git a/target/s390x/Makefile.objs b/target/s390x/Makefile.objs
new file mode 100644
index 0000000000..c573633bd1
--- /dev/null
+++ b/target/s390x/Makefile.objs
@@ -0,0 +1,25 @@
+obj-y += translate.o helper.o cpu.o interrupt.o
+obj-y += int_helper.o fpu_helper.o cc_helper.o mem_helper.o misc_helper.o
+obj-y += gdbstub.o cpu_models.o cpu_features.o
+obj-$(CONFIG_SOFTMMU) += machine.o ioinst.o arch_dump.o mmu_helper.o
+obj-$(CONFIG_KVM) += kvm.o
+
+# build and run feature list generator
+feat-src = $(SRC_PATH)/target/$(TARGET_BASE_ARCH)/
+feat-dst = $(BUILD_DIR)/$(TARGET_DIR)
+ifneq ($(MAKECMDGOALS),clean)
+GENERATED_HEADERS += $(feat-dst)gen-features.h
+endif
+
+$(feat-dst)gen-features.h: $(feat-dst)gen-features.h-timestamp
+ @cmp $< $@ >/dev/null 2>&1 || cp $< $@
+$(feat-dst)gen-features.h-timestamp: $(feat-dst)gen-features
+ $(call quiet-command,$< >$@,"GEN","$(TARGET_DIR)gen-features.h")
+
+$(feat-dst)gen-features: $(feat-src)gen-features.c
+ $(call quiet-command,$(HOST_CC) $(QEMU_INCLUDES) -o $@ $<,"CC","$(TARGET_DIR)gen-features")
+
+clean-target:
+ rm -f gen-features.h-timestamp
+ rm -f gen-features.h
+ rm -f gen-features
diff --git a/target/s390x/arch_dump.c b/target/s390x/arch_dump.c
new file mode 100644
index 0000000000..4731869f6b
--- /dev/null
+++ b/target/s390x/arch_dump.c
@@ -0,0 +1,249 @@
+/*
+ * writing ELF notes for s390x arch
+ *
+ *
+ * Copyright IBM Corp. 2012, 2013
+ *
+ * Ekaterina Tumanova <tumanova@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "elf.h"
+#include "exec/cpu-all.h"
+#include "sysemu/dump.h"
+#include "sysemu/kvm.h"
+
+
+struct S390xUserRegsStruct {
+ uint64_t psw[2];
+ uint64_t gprs[16];
+ uint32_t acrs[16];
+} QEMU_PACKED;
+
+typedef struct S390xUserRegsStruct S390xUserRegs;
+
+struct S390xElfPrstatusStruct {
+ uint8_t pad1[32];
+ uint32_t pid;
+ uint8_t pad2[76];
+ S390xUserRegs regs;
+ uint8_t pad3[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfPrstatusStruct S390xElfPrstatus;
+
+struct S390xElfFpregsetStruct {
+ uint32_t fpc;
+ uint32_t pad;
+ uint64_t fprs[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfFpregsetStruct S390xElfFpregset;
+
+struct S390xElfVregsLoStruct {
+ uint64_t vregs[16];
+} QEMU_PACKED;
+
+typedef struct S390xElfVregsLoStruct S390xElfVregsLo;
+
+struct S390xElfVregsHiStruct {
+ uint64_t vregs[16][2];
+} QEMU_PACKED;
+
+typedef struct S390xElfVregsHiStruct S390xElfVregsHi;
+
+typedef struct noteStruct {
+ Elf64_Nhdr hdr;
+ char name[5];
+ char pad3[3];
+ union {
+ S390xElfPrstatus prstatus;
+ S390xElfFpregset fpregset;
+ S390xElfVregsLo vregslo;
+ S390xElfVregsHi vregshi;
+ uint32_t prefix;
+ uint64_t timer;
+ uint64_t todcmp;
+ uint32_t todpreg;
+ uint64_t ctrs[16];
+ } contents;
+} QEMU_PACKED Note;
+
+static void s390x_write_elf64_prstatus(Note *note, S390CPU *cpu)
+{
+ int i;
+ S390xUserRegs *regs;
+
+ note->hdr.n_type = cpu_to_be32(NT_PRSTATUS);
+
+ regs = &(note->contents.prstatus.regs);
+ regs->psw[0] = cpu_to_be64(cpu->env.psw.mask);
+ regs->psw[1] = cpu_to_be64(cpu->env.psw.addr);
+ for (i = 0; i <= 15; i++) {
+ regs->acrs[i] = cpu_to_be32(cpu->env.aregs[i]);
+ regs->gprs[i] = cpu_to_be64(cpu->env.regs[i]);
+ }
+}
+
+static void s390x_write_elf64_fpregset(Note *note, S390CPU *cpu)
+{
+ int i;
+ CPUS390XState *cs = &cpu->env;
+
+ note->hdr.n_type = cpu_to_be32(NT_FPREGSET);
+ note->contents.fpregset.fpc = cpu_to_be32(cpu->env.fpc);
+ for (i = 0; i <= 15; i++) {
+ note->contents.fpregset.fprs[i] = cpu_to_be64(get_freg(cs, i)->ll);
+ }
+}
+
+static void s390x_write_elf64_vregslo(Note *note, S390CPU *cpu)
+{
+ int i;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_VXRS_LOW);
+ for (i = 0; i <= 15; i++) {
+ note->contents.vregslo.vregs[i] = cpu_to_be64(cpu->env.vregs[i][1].ll);
+ }
+}
+
+static void s390x_write_elf64_vregshi(Note *note, S390CPU *cpu)
+{
+ int i;
+ S390xElfVregsHi *temp_vregshi;
+
+ temp_vregshi = &note->contents.vregshi;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_VXRS_HIGH);
+ for (i = 0; i <= 15; i++) {
+ temp_vregshi->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i + 16][0].ll);
+ temp_vregshi->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i + 16][1].ll);
+ }
+}
+
+static void s390x_write_elf64_timer(Note *note, S390CPU *cpu)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_TIMER);
+ note->contents.timer = cpu_to_be64((uint64_t)(cpu->env.cputm));
+}
+
+static void s390x_write_elf64_todcmp(Note *note, S390CPU *cpu)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_TODCMP);
+ note->contents.todcmp = cpu_to_be64((uint64_t)(cpu->env.ckc));
+}
+
+static void s390x_write_elf64_todpreg(Note *note, S390CPU *cpu)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_TODPREG);
+ note->contents.todpreg = cpu_to_be32((uint32_t)(cpu->env.todpr));
+}
+
+static void s390x_write_elf64_ctrs(Note *note, S390CPU *cpu)
+{
+ int i;
+
+ note->hdr.n_type = cpu_to_be32(NT_S390_CTRS);
+
+ for (i = 0; i <= 15; i++) {
+ note->contents.ctrs[i] = cpu_to_be64(cpu->env.cregs[i]);
+ }
+}
+
+static void s390x_write_elf64_prefix(Note *note, S390CPU *cpu)
+{
+ note->hdr.n_type = cpu_to_be32(NT_S390_PREFIX);
+ note->contents.prefix = cpu_to_be32((uint32_t)(cpu->env.psa));
+}
+
+
+static const struct NoteFuncDescStruct {
+ int contents_size;
+ void (*note_contents_func)(Note *note, S390CPU *cpu);
+} note_func[] = {
+ {sizeof(((Note *)0)->contents.prstatus), s390x_write_elf64_prstatus},
+ {sizeof(((Note *)0)->contents.prefix), s390x_write_elf64_prefix},
+ {sizeof(((Note *)0)->contents.fpregset), s390x_write_elf64_fpregset},
+ {sizeof(((Note *)0)->contents.ctrs), s390x_write_elf64_ctrs},
+ {sizeof(((Note *)0)->contents.timer), s390x_write_elf64_timer},
+ {sizeof(((Note *)0)->contents.todcmp), s390x_write_elf64_todcmp},
+ {sizeof(((Note *)0)->contents.todpreg), s390x_write_elf64_todpreg},
+ {sizeof(((Note *)0)->contents.vregslo), s390x_write_elf64_vregslo},
+ {sizeof(((Note *)0)->contents.vregshi), s390x_write_elf64_vregshi},
+ { 0, NULL}
+};
+
+typedef struct NoteFuncDescStruct NoteFuncDesc;
+
+
+static int s390x_write_all_elf64_notes(const char *note_name,
+ WriteCoreDumpFunction f,
+ S390CPU *cpu, int id,
+ void *opaque)
+{
+ Note note;
+ const NoteFuncDesc *nf;
+ int note_size;
+ int ret = -1;
+
+ for (nf = note_func; nf->note_contents_func; nf++) {
+ memset(&note, 0, sizeof(note));
+ note.hdr.n_namesz = cpu_to_be32(sizeof(note.name));
+ note.hdr.n_descsz = cpu_to_be32(nf->contents_size);
+ strncpy(note.name, note_name, sizeof(note.name));
+ (*nf->note_contents_func)(&note, cpu);
+
+ note_size = sizeof(note) - sizeof(note.contents) + nf->contents_size;
+ ret = f(&note, note_size, opaque);
+
+ if (ret < 0) {
+ return -1;
+ }
+
+ }
+
+ return 0;
+}
+
+
+int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ return s390x_write_all_elf64_notes("CORE", f, cpu, cpuid, opaque);
+}
+
+int cpu_get_dump_info(ArchDumpInfo *info,
+ const struct GuestPhysBlockList *guest_phys_blocks)
+{
+ info->d_machine = EM_S390;
+ info->d_endian = ELFDATA2MSB;
+ info->d_class = ELFCLASS64;
+
+ return 0;
+}
+
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
+{
+ int name_size = 8; /* "CORE" or "QEMU" rounded */
+ size_t elf_note_size = 0;
+ int note_head_size;
+ const NoteFuncDesc *nf;
+
+ assert(class == ELFCLASS64);
+ assert(machine == EM_S390);
+
+ note_head_size = sizeof(Elf64_Nhdr);
+
+ for (nf = note_func; nf->note_contents_func; nf++) {
+ elf_note_size = elf_note_size + note_head_size + name_size +
+ nf->contents_size;
+ }
+
+ return (elf_note_size) * nr_cpus;
+}
diff --git a/target/s390x/cc_helper.c b/target/s390x/cc_helper.c
new file mode 100644
index 0000000000..1cf855133e
--- /dev/null
+++ b/target/s390x/cc_helper.c
@@ -0,0 +1,570 @@
+/*
+ * S/390 condition code helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+static uint32_t cc_calc_ltgt_32(int32_t src, int32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_ltgt0_32(int32_t dst)
+{
+ return cc_calc_ltgt_32(dst, 0);
+}
+
+static uint32_t cc_calc_ltgt_64(int64_t src, int64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_ltgt0_64(int64_t dst)
+{
+ return cc_calc_ltgt_64(dst, 0);
+}
+
+static uint32_t cc_calc_ltugtu_32(uint32_t src, uint32_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_ltugtu_64(uint64_t src, uint64_t dst)
+{
+ if (src == dst) {
+ return 0;
+ } else if (src < dst) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+static uint32_t cc_calc_tm_32(uint32_t val, uint32_t mask)
+{
+ uint32_t r = val & mask;
+
+ if (r == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ return 1;
+ }
+}
+
+static uint32_t cc_calc_tm_64(uint64_t val, uint64_t mask)
+{
+ uint64_t r = val & mask;
+
+ if (r == 0) {
+ return 0;
+ } else if (r == mask) {
+ return 3;
+ } else {
+ int top = clz64(mask);
+ if ((int64_t)(val << top) < 0) {
+ return 2;
+ } else {
+ return 1;
+ }
+ }
+}
+
+static uint32_t cc_calc_nz(uint64_t dst)
+{
+ return !!dst;
+}
+
+static uint32_t cc_calc_add_64(int64_t a1, int64_t a2, int64_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_addu_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+ return (ar != 0) + 2 * (ar < a1);
+}
+
+static uint32_t cc_calc_addc_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+ /* Recover a2 + carry_in. */
+ uint64_t a2c = ar - a1;
+ /* Check for a2+carry_in overflow, then a1+a2c overflow. */
+ int carry_out = (a2c < a2) || (ar < a1);
+
+ return (ar != 0) + 2 * carry_out;
+}
+
+static uint32_t cc_calc_sub_64(int64_t a1, int64_t a2, int64_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_subu_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+ if (ar == 0) {
+ return 2;
+ } else {
+ if (a2 > a1) {
+ return 1;
+ } else {
+ return 3;
+ }
+ }
+}
+
+static uint32_t cc_calc_subb_64(uint64_t a1, uint64_t a2, uint64_t ar)
+{
+ int borrow_out;
+
+ if (ar != a1 - a2) { /* difference means borrow-in */
+ borrow_out = (a2 >= a1);
+ } else {
+ borrow_out = (a2 > a1);
+ }
+
+ return (ar != 0) + 2 * !borrow_out;
+}
+
+static uint32_t cc_calc_abs_64(int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t cc_calc_nabs_64(int64_t dst)
+{
+ return !!dst;
+}
+
+static uint32_t cc_calc_comp_64(int64_t dst)
+{
+ if ((uint64_t)dst == 0x8000000000000000ULL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+
+static uint32_t cc_calc_add_32(int32_t a1, int32_t a2, int32_t ar)
+{
+ if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_addu_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+ return (ar != 0) + 2 * (ar < a1);
+}
+
+static uint32_t cc_calc_addc_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+ /* Recover a2 + carry_in. */
+ uint32_t a2c = ar - a1;
+ /* Check for a2+carry_in overflow, then a1+a2c overflow. */
+ int carry_out = (a2c < a2) || (ar < a1);
+
+ return (ar != 0) + 2 * carry_out;
+}
+
+static uint32_t cc_calc_sub_32(int32_t a1, int32_t a2, int32_t ar)
+{
+ if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
+ return 3; /* overflow */
+ } else {
+ if (ar < 0) {
+ return 1;
+ } else if (ar > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+ }
+}
+
+static uint32_t cc_calc_subu_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+ if (ar == 0) {
+ return 2;
+ } else {
+ if (a2 > a1) {
+ return 1;
+ } else {
+ return 3;
+ }
+ }
+}
+
+static uint32_t cc_calc_subb_32(uint32_t a1, uint32_t a2, uint32_t ar)
+{
+ int borrow_out;
+
+ if (ar != a1 - a2) { /* difference means borrow-in */
+ borrow_out = (a2 >= a1);
+ } else {
+ borrow_out = (a2 > a1);
+ }
+
+ return (ar != 0) + 2 * !borrow_out;
+}
+
+static uint32_t cc_calc_abs_32(int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+static uint32_t cc_calc_nabs_32(int32_t dst)
+{
+ return !!dst;
+}
+
+static uint32_t cc_calc_comp_32(int32_t dst)
+{
+ if ((uint32_t)dst == 0x80000000UL) {
+ return 3;
+ } else if (dst < 0) {
+ return 1;
+ } else if (dst > 0) {
+ return 2;
+ } else {
+ return 0;
+ }
+}
+
+/* calculate condition code for insert character under mask insn */
+static uint32_t cc_calc_icm(uint64_t mask, uint64_t val)
+{
+ if ((val & mask) == 0) {
+ return 0;
+ } else {
+ int top = clz64(mask);
+ if ((int64_t)(val << top) < 0) {
+ return 1;
+ } else {
+ return 2;
+ }
+ }
+}
+
+static uint32_t cc_calc_sla_32(uint32_t src, int shift)
+{
+ uint32_t mask = ((1U << shift) - 1U) << (32 - shift);
+ uint32_t sign = 1U << 31;
+ uint32_t match;
+ int32_t r;
+
+ /* Check if the sign bit stays the same. */
+ if (src & sign) {
+ match = mask;
+ } else {
+ match = 0;
+ }
+ if ((src & mask) != match) {
+ /* Overflow. */
+ return 3;
+ }
+
+ r = ((src << shift) & ~sign) | (src & sign);
+ if (r == 0) {
+ return 0;
+ } else if (r < 0) {
+ return 1;
+ }
+ return 2;
+}
+
+static uint32_t cc_calc_sla_64(uint64_t src, int shift)
+{
+ uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
+ uint64_t sign = 1ULL << 63;
+ uint64_t match;
+ int64_t r;
+
+ /* Check if the sign bit stays the same. */
+ if (src & sign) {
+ match = mask;
+ } else {
+ match = 0;
+ }
+ if ((src & mask) != match) {
+ /* Overflow. */
+ return 3;
+ }
+
+ r = ((src << shift) & ~sign) | (src & sign);
+ if (r == 0) {
+ return 0;
+ } else if (r < 0) {
+ return 1;
+ }
+ return 2;
+}
+
+static uint32_t cc_calc_flogr(uint64_t dst)
+{
+ return dst ? 2 : 0;
+}
+
+static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op,
+ uint64_t src, uint64_t dst, uint64_t vr)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ uint32_t r = 0;
+
+ switch (cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* cc_op value _is_ cc */
+ r = cc_op;
+ break;
+ case CC_OP_LTGT0_32:
+ r = cc_calc_ltgt0_32(dst);
+ break;
+ case CC_OP_LTGT0_64:
+ r = cc_calc_ltgt0_64(dst);
+ break;
+ case CC_OP_LTGT_32:
+ r = cc_calc_ltgt_32(src, dst);
+ break;
+ case CC_OP_LTGT_64:
+ r = cc_calc_ltgt_64(src, dst);
+ break;
+ case CC_OP_LTUGTU_32:
+ r = cc_calc_ltugtu_32(src, dst);
+ break;
+ case CC_OP_LTUGTU_64:
+ r = cc_calc_ltugtu_64(src, dst);
+ break;
+ case CC_OP_TM_32:
+ r = cc_calc_tm_32(src, dst);
+ break;
+ case CC_OP_TM_64:
+ r = cc_calc_tm_64(src, dst);
+ break;
+ case CC_OP_NZ:
+ r = cc_calc_nz(dst);
+ break;
+ case CC_OP_ADD_64:
+ r = cc_calc_add_64(src, dst, vr);
+ break;
+ case CC_OP_ADDU_64:
+ r = cc_calc_addu_64(src, dst, vr);
+ break;
+ case CC_OP_ADDC_64:
+ r = cc_calc_addc_64(src, dst, vr);
+ break;
+ case CC_OP_SUB_64:
+ r = cc_calc_sub_64(src, dst, vr);
+ break;
+ case CC_OP_SUBU_64:
+ r = cc_calc_subu_64(src, dst, vr);
+ break;
+ case CC_OP_SUBB_64:
+ r = cc_calc_subb_64(src, dst, vr);
+ break;
+ case CC_OP_ABS_64:
+ r = cc_calc_abs_64(dst);
+ break;
+ case CC_OP_NABS_64:
+ r = cc_calc_nabs_64(dst);
+ break;
+ case CC_OP_COMP_64:
+ r = cc_calc_comp_64(dst);
+ break;
+
+ case CC_OP_ADD_32:
+ r = cc_calc_add_32(src, dst, vr);
+ break;
+ case CC_OP_ADDU_32:
+ r = cc_calc_addu_32(src, dst, vr);
+ break;
+ case CC_OP_ADDC_32:
+ r = cc_calc_addc_32(src, dst, vr);
+ break;
+ case CC_OP_SUB_32:
+ r = cc_calc_sub_32(src, dst, vr);
+ break;
+ case CC_OP_SUBU_32:
+ r = cc_calc_subu_32(src, dst, vr);
+ break;
+ case CC_OP_SUBB_32:
+ r = cc_calc_subb_32(src, dst, vr);
+ break;
+ case CC_OP_ABS_32:
+ r = cc_calc_abs_32(dst);
+ break;
+ case CC_OP_NABS_32:
+ r = cc_calc_nabs_32(dst);
+ break;
+ case CC_OP_COMP_32:
+ r = cc_calc_comp_32(dst);
+ break;
+
+ case CC_OP_ICM:
+ r = cc_calc_icm(src, dst);
+ break;
+ case CC_OP_SLA_32:
+ r = cc_calc_sla_32(src, dst);
+ break;
+ case CC_OP_SLA_64:
+ r = cc_calc_sla_64(src, dst);
+ break;
+ case CC_OP_FLOGR:
+ r = cc_calc_flogr(dst);
+ break;
+
+ case CC_OP_NZ_F32:
+ r = set_cc_nz_f32(dst);
+ break;
+ case CC_OP_NZ_F64:
+ r = set_cc_nz_f64(dst);
+ break;
+ case CC_OP_NZ_F128:
+ r = set_cc_nz_f128(make_float128(src, dst));
+ break;
+
+ default:
+ cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op));
+ }
+
+ HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__,
+ cc_name(cc_op), src, dst, vr, r);
+ return r;
+}
+
+uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+uint32_t HELPER(calc_cc)(CPUS390XState *env, uint32_t cc_op, uint64_t src,
+ uint64_t dst, uint64_t vr)
+{
+ return do_calc_cc(env, cc_op, src, dst, vr);
+}
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+ load_psw(env, mask, addr);
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+}
+
+void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
+{
+ HELPER_LOG("%s: %16" PRIx64 "\n", __func__, a1);
+
+ switch (a1 & 0xf00) {
+ case 0x000:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_PRIMARY;
+ break;
+ case 0x100:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_SECONDARY;
+ break;
+ case 0x300:
+ env->psw.mask &= ~PSW_MASK_ASC;
+ env->psw.mask |= PSW_ASC_HOME;
+ break;
+ default:
+ HELPER_LOG("unknown sacf mode: %" PRIx64 "\n", a1);
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ break;
+ }
+}
+#endif
diff --git a/target/s390x/cpu-qom.h b/target/s390x/cpu-qom.h
new file mode 100644
index 0000000000..4e936e7788
--- /dev/null
+++ b/target/s390x/cpu-qom.h
@@ -0,0 +1,66 @@
+/*
+ * QEMU S/390 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_S390_CPU_QOM_H
+#define QEMU_S390_CPU_QOM_H
+
+#include "qom/cpu.h"
+#include "cpu_models.h"
+
+#define TYPE_S390_CPU "s390-cpu"
+
+#define S390_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(S390CPUClass, (klass), TYPE_S390_CPU)
+#define S390_CPU(obj) \
+ OBJECT_CHECK(S390CPU, (obj), TYPE_S390_CPU)
+#define S390_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(S390CPUClass, (obj), TYPE_S390_CPU)
+
+/**
+ * S390CPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @load_normal: Performs a load normal.
+ * @cpu_reset: Performs a CPU reset.
+ * @initial_cpu_reset: Performs an initial CPU reset.
+ *
+ * An S/390 CPU model.
+ */
+typedef struct S390CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+ const S390CPUDef *cpu_def;
+ bool kvm_required;
+ bool is_static;
+ bool is_migration_safe;
+ const char *desc;
+
+ int64_t next_cpu_id;
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+ void (*load_normal)(CPUState *cpu);
+ void (*cpu_reset)(CPUState *cpu);
+ void (*initial_cpu_reset)(CPUState *cpu);
+} S390CPUClass;
+
+typedef struct S390CPU S390CPU;
+
+#endif
diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c
new file mode 100644
index 0000000000..0a39d31237
--- /dev/null
+++ b/target/s390x/cpu.c
@@ -0,0 +1,462 @@
+/*
+ * QEMU S/390 CPU
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * Copyright (c) 2012 IBM Corp.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ * Contributions after 2012-12-11 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "qemu/cutils.h"
+#include "qemu/timer.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+#include "qapi/visitor.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/hw.h"
+#include "sysemu/arch_init.h"
+#include "sysemu/sysemu.h"
+#include "hw/s390x/sclp.h"
+#endif
+
+#define CR0_RESET 0xE0UL
+#define CR14_RESET 0xC2000000UL;
+
+static void s390_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ cpu->env.psw.addr = value;
+}
+
+static bool s390_cpu_has_work(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->psw.mask & PSW_MASK_EXT);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+/* S390CPUClass::load_normal() */
+static void s390_cpu_load_normal(CPUState *s)
+{
+ S390CPU *cpu = S390_CPU(s);
+ cpu->env.psw.addr = ldl_phys(s->as, 4) & PSW_MASK_ESA_ADDR;
+ cpu->env.psw.mask = PSW_MASK_32 | PSW_MASK_64;
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+}
+#endif
+
+/* S390CPUClass::cpu_reset() */
+static void s390_cpu_reset(CPUState *s)
+{
+ S390CPU *cpu = S390_CPU(s);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ CPUS390XState *env = &cpu->env;
+
+ env->pfault_token = -1UL;
+ scc->parent_reset(s);
+ cpu->env.sigp_order = 0;
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+ tlb_flush(s, 1);
+}
+
+/* S390CPUClass::initial_reset() */
+static void s390_cpu_initial_reset(CPUState *s)
+{
+ S390CPU *cpu = S390_CPU(s);
+ CPUS390XState *env = &cpu->env;
+ int i;
+
+ s390_cpu_reset(s);
+ /* initial reset does not touch regs,fregs and aregs */
+ memset(&env->fpc, 0, offsetof(CPUS390XState, cpu_num) -
+ offsetof(CPUS390XState, fpc));
+
+ /* architectured initial values for CR 0 and 14 */
+ env->cregs[0] = CR0_RESET;
+ env->cregs[14] = CR14_RESET;
+
+ /* architectured initial value for Breaking-Event-Address register */
+ env->gbea = 1;
+
+ env->pfault_token = -1UL;
+ env->ext_index = -1;
+ for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
+ env->io_index[i] = -1;
+ }
+
+ /* tininess for underflow is detected before rounding */
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->fpu_status);
+
+ /* Reset state inside the kernel that we cannot access yet from QEMU. */
+ if (kvm_enabled()) {
+ kvm_s390_reset_vcpu(cpu);
+ }
+ tlb_flush(s, 1);
+}
+
+/* CPUClass:reset() */
+static void s390_cpu_full_reset(CPUState *s)
+{
+ S390CPU *cpu = S390_CPU(s);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ CPUS390XState *env = &cpu->env;
+ int i;
+
+ scc->parent_reset(s);
+ cpu->env.sigp_order = 0;
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+
+ memset(env, 0, offsetof(CPUS390XState, cpu_num));
+
+ /* architectured initial values for CR 0 and 14 */
+ env->cregs[0] = CR0_RESET;
+ env->cregs[14] = CR14_RESET;
+
+ /* architectured initial value for Breaking-Event-Address register */
+ env->gbea = 1;
+
+ env->pfault_token = -1UL;
+ env->ext_index = -1;
+ for (i = 0; i < ARRAY_SIZE(env->io_index); i++) {
+ env->io_index[i] = -1;
+ }
+
+ /* tininess for underflow is detected before rounding */
+ set_float_detect_tininess(float_tininess_before_rounding,
+ &env->fpu_status);
+
+ /* Reset state inside the kernel that we cannot access yet from QEMU. */
+ if (kvm_enabled()) {
+ kvm_s390_reset_vcpu(cpu);
+ }
+ tlb_flush(s, 1);
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void s390_cpu_machine_reset_cb(void *opaque)
+{
+ S390CPU *cpu = opaque;
+
+ run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+}
+#endif
+
+static void s390_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_s390_64;
+ info->print_insn = print_insn_s390;
+}
+
+static void s390_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(dev);
+ S390CPU *cpu = S390_CPU(dev);
+ CPUS390XState *env = &cpu->env;
+ Error *err = NULL;
+
+ /* the model has to be realized before qemu_init_vcpu() due to kvm */
+ s390_realize_cpu_model(cs, &err);
+ if (err) {
+ goto out;
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu->id >= max_cpus) {
+ error_setg(&err, "Unable to add CPU: %" PRIi64
+ ", max allowed: %d", cpu->id, max_cpus - 1);
+ goto out;
+ }
+#endif
+ if (cpu_exists(cpu->id)) {
+ error_setg(&err, "Unable to add CPU: %" PRIi64
+ ", it already exists", cpu->id);
+ goto out;
+ }
+ if (cpu->id != scc->next_cpu_id) {
+ error_setg(&err, "Unable to add CPU: %" PRIi64
+ ", The next available id is %" PRIi64, cpu->id,
+ scc->next_cpu_id);
+ goto out;
+ }
+
+ cpu_exec_realizefn(cs, &err);
+ if (err != NULL) {
+ goto out;
+ }
+ scc->next_cpu_id++;
+
+#if !defined(CONFIG_USER_ONLY)
+ qemu_register_reset(s390_cpu_machine_reset_cb, cpu);
+#endif
+ env->cpu_num = cpu->id;
+ s390_cpu_gdb_init(cs);
+ qemu_init_vcpu(cs);
+#if !defined(CONFIG_USER_ONLY)
+ run_on_cpu(cs, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+#else
+ cpu_reset(cs);
+#endif
+
+ scc->parent_realize(dev, &err);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (dev->hotplugged) {
+ raise_irq_cpu_hotplug();
+ }
+#endif
+
+out:
+ error_propagate(errp, err);
+}
+
+static void s390x_cpu_get_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390CPU *cpu = S390_CPU(obj);
+ int64_t value = cpu->id;
+
+ visit_type_int(v, name, &value, errp);
+}
+
+static void s390x_cpu_set_id(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390CPU *cpu = S390_CPU(obj);
+ DeviceState *dev = DEVICE(obj);
+ const int64_t min = 0;
+ const int64_t max = UINT32_MAX;
+ Error *err = NULL;
+ int64_t value;
+
+ if (dev->realized) {
+ error_setg(errp, "Attempt to set property '%s' on '%s' after "
+ "it was realized", name, object_get_typename(obj));
+ return;
+ }
+
+ visit_type_int(v, name, &value, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+ if (value < min || value > max) {
+ error_setg(errp, "Property %s.%s doesn't take value %" PRId64
+ " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
+ object_get_typename(obj), name, value, min, max);
+ return;
+ }
+ cpu->id = value;
+}
+
+static void s390_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ S390CPU *cpu = S390_CPU(obj);
+ CPUS390XState *env = &cpu->env;
+ static bool inited;
+#if !defined(CONFIG_USER_ONLY)
+ struct tm tm;
+#endif
+
+ cs->env_ptr = env;
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ object_property_add(OBJECT(cpu), "id", "int64_t", s390x_cpu_get_id,
+ s390x_cpu_set_id, NULL, NULL, NULL);
+ s390_cpu_model_register_props(obj);
+#if !defined(CONFIG_USER_ONLY)
+ qemu_get_timedate(&tm, 0);
+ env->tod_offset = TOD_UNIX_EPOCH +
+ (time2tod(mktimegm(&tm)) * 1000000000ULL);
+ env->tod_basetime = 0;
+ env->tod_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu);
+ env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu);
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+#endif
+
+ if (tcg_enabled() && !inited) {
+ inited = true;
+ s390x_translate_init();
+ }
+}
+
+static void s390_cpu_finalize(Object *obj)
+{
+#if !defined(CONFIG_USER_ONLY)
+ S390CPU *cpu = S390_CPU(obj);
+
+ qemu_unregister_reset(s390_cpu_machine_reset_cb, cpu);
+ g_free(cpu->irqstate);
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static bool disabled_wait(CPUState *cpu)
+{
+ return cpu->halted && !(S390_CPU(cpu)->env.psw.mask &
+ (PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK));
+}
+
+static unsigned s390_count_running_cpus(void)
+{
+ CPUState *cpu;
+ int nr_running = 0;
+
+ CPU_FOREACH(cpu) {
+ uint8_t state = S390_CPU(cpu)->env.cpu_state;
+ if (state == CPU_STATE_OPERATING ||
+ state == CPU_STATE_LOAD) {
+ if (!disabled_wait(cpu)) {
+ nr_running++;
+ }
+ }
+ }
+
+ return nr_running;
+}
+
+unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ trace_cpu_halt(cs->cpu_index);
+
+ if (!cs->halted) {
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ }
+
+ return s390_count_running_cpus();
+}
+
+void s390_cpu_unhalt(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ trace_cpu_unhalt(cs->cpu_index);
+
+ if (cs->halted) {
+ cs->halted = 0;
+ cs->exception_index = -1;
+ }
+}
+
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+ {
+ trace_cpu_set_state(CPU(cpu)->cpu_index, cpu_state);
+
+ switch (cpu_state) {
+ case CPU_STATE_STOPPED:
+ case CPU_STATE_CHECK_STOP:
+ /* halt the cpu for common infrastructure */
+ s390_cpu_halt(cpu);
+ break;
+ case CPU_STATE_OPERATING:
+ case CPU_STATE_LOAD:
+ /* unhalt the cpu for common infrastructure */
+ s390_cpu_unhalt(cpu);
+ break;
+ default:
+ error_report("Requested CPU state is not a valid S390 CPU state: %u",
+ cpu_state);
+ exit(1);
+ }
+ if (kvm_enabled() && cpu->env.cpu_state != cpu_state) {
+ kvm_s390_set_cpu_state(cpu, cpu_state);
+ }
+ cpu->env.cpu_state = cpu_state;
+
+ return s390_count_running_cpus();
+}
+#endif
+
+static gchar *s390_gdb_arch_name(CPUState *cs)
+{
+ return g_strdup("s390:64-bit");
+}
+
+static void s390_cpu_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *scc = S390_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(scc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ scc->next_cpu_id = 0;
+ scc->parent_realize = dc->realize;
+ dc->realize = s390_cpu_realizefn;
+
+ scc->parent_reset = cc->reset;
+#if !defined(CONFIG_USER_ONLY)
+ scc->load_normal = s390_cpu_load_normal;
+#endif
+ scc->cpu_reset = s390_cpu_reset;
+ scc->initial_cpu_reset = s390_cpu_initial_reset;
+ cc->reset = s390_cpu_full_reset;
+ cc->class_by_name = s390_cpu_class_by_name,
+ cc->has_work = s390_cpu_has_work;
+ cc->do_interrupt = s390_cpu_do_interrupt;
+ cc->dump_state = s390_cpu_dump_state;
+ cc->set_pc = s390_cpu_set_pc;
+ cc->gdb_read_register = s390_cpu_gdb_read_register;
+ cc->gdb_write_register = s390_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = s390_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = s390_cpu_get_phys_page_debug;
+ cc->vmsd = &vmstate_s390_cpu;
+ cc->write_elf64_note = s390_cpu_write_elf64_note;
+ cc->cpu_exec_interrupt = s390_cpu_exec_interrupt;
+ cc->debug_excp_handler = s390x_cpu_debug_excp_handler;
+#endif
+ cc->disas_set_info = s390_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = S390_NUM_CORE_REGS;
+ cc->gdb_core_xml_file = "s390x-core64.xml";
+ cc->gdb_arch_name = s390_gdb_arch_name;
+
+ s390_cpu_model_class_register_props(oc);
+}
+
+static const TypeInfo s390_cpu_type_info = {
+ .name = TYPE_S390_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(S390CPU),
+ .instance_init = s390_cpu_initfn,
+ .instance_finalize = s390_cpu_finalize,
+ .abstract = true,
+ .class_size = sizeof(S390CPUClass),
+ .class_init = s390_cpu_class_init,
+};
+
+static void s390_cpu_register_types(void)
+{
+ type_register_static(&s390_cpu_type_info);
+}
+
+type_init(s390_cpu_register_types)
diff --git a/target/s390x/cpu.h b/target/s390x/cpu.h
new file mode 100644
index 0000000000..fd36a25cf5
--- /dev/null
+++ b/target/s390x/cpu.h
@@ -0,0 +1,1296 @@
+/*
+ * S/390 virtual CPU header
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contributions after 2012-10-29 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU (Lesser) General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef S390X_CPU_H
+#define S390X_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+
+#define TARGET_LONG_BITS 64
+
+#define ELF_MACHINE_UNAME "S390X"
+
+#define CPUArchState struct CPUS390XState
+
+#include "exec/cpu-defs.h"
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+
+#include "exec/cpu-all.h"
+
+#include "fpu/softfloat.h"
+
+#define NB_MMU_MODES 3
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+#define MMU_MODE0_SUFFIX _primary
+#define MMU_MODE1_SUFFIX _secondary
+#define MMU_MODE2_SUFFIX _home
+
+#define MMU_USER_IDX 0
+
+#define MAX_EXT_QUEUE 16
+#define MAX_IO_QUEUE 16
+#define MAX_MCHK_QUEUE 16
+
+#define PSW_MCHK_MASK 0x0004000000000000
+#define PSW_IO_MASK 0x0200000000000000
+
+typedef struct PSW {
+ uint64_t mask;
+ uint64_t addr;
+} PSW;
+
+typedef struct ExtQueue {
+ uint32_t code;
+ uint32_t param;
+ uint32_t param64;
+} ExtQueue;
+
+typedef struct IOIntQueue {
+ uint16_t id;
+ uint16_t nr;
+ uint32_t parm;
+ uint32_t word;
+} IOIntQueue;
+
+typedef struct MchkQueue {
+ uint16_t type;
+} MchkQueue;
+
+typedef struct CPUS390XState {
+ uint64_t regs[16]; /* GP registers */
+ /*
+ * The floating point registers are part of the vector registers.
+ * vregs[0][0] -> vregs[15][0] are 16 floating point registers
+ */
+ CPU_DoubleU vregs[32][2]; /* vector registers */
+ uint32_t aregs[16]; /* access registers */
+
+ uint32_t fpc; /* floating-point control register */
+ uint32_t cc_op;
+
+ float_status fpu_status; /* passed to softfloat lib */
+
+ /* The low part of a 128-bit return, or remainder of a divide. */
+ uint64_t retxl;
+
+ PSW psw;
+
+ uint64_t cc_src;
+ uint64_t cc_dst;
+ uint64_t cc_vr;
+
+ uint64_t __excp_addr;
+ uint64_t psa;
+
+ uint32_t int_pgm_code;
+ uint32_t int_pgm_ilen;
+
+ uint32_t int_svc_code;
+ uint32_t int_svc_ilen;
+
+ uint64_t per_address;
+ uint16_t per_perc_atmid;
+
+ uint64_t cregs[16]; /* control registers */
+
+ ExtQueue ext_queue[MAX_EXT_QUEUE];
+ IOIntQueue io_queue[MAX_IO_QUEUE][8];
+ MchkQueue mchk_queue[MAX_MCHK_QUEUE];
+
+ int pending_int;
+ int ext_index;
+ int io_index[8];
+ int mchk_index;
+
+ uint64_t ckc;
+ uint64_t cputm;
+ uint32_t todpr;
+
+ uint64_t pfault_token;
+ uint64_t pfault_compare;
+ uint64_t pfault_select;
+
+ uint64_t gbea;
+ uint64_t pp;
+
+ uint8_t riccb[64];
+
+ CPU_COMMON
+
+ /* reset does memset(0) up to here */
+
+ uint32_t cpu_num;
+ uint32_t machine_type;
+
+ uint64_t tod_offset;
+ uint64_t tod_basetime;
+ QEMUTimer *tod_timer;
+
+ QEMUTimer *cpu_timer;
+
+ /*
+ * The cpu state represents the logical state of a cpu. In contrast to other
+ * architectures, there is a difference between a halt and a stop on s390.
+ * If all cpus are either stopped (including check stop) or in the disabled
+ * wait state, the vm can be shut down.
+ */
+#define CPU_STATE_UNINITIALIZED 0x00
+#define CPU_STATE_STOPPED 0x01
+#define CPU_STATE_CHECK_STOP 0x02
+#define CPU_STATE_OPERATING 0x03
+#define CPU_STATE_LOAD 0x04
+ uint8_t cpu_state;
+
+ /* currently processed sigp order */
+ uint8_t sigp_order;
+
+} CPUS390XState;
+
+static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
+{
+ return &cs->vregs[nr][0];
+}
+
+/**
+ * S390CPU:
+ * @env: #CPUS390XState.
+ *
+ * An S/390 CPU.
+ */
+struct S390CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUS390XState env;
+ int64_t id;
+ S390CPUModel *model;
+ /* needed for live migration */
+ void *irqstate;
+ uint32_t irqstate_saved_size;
+};
+
+static inline S390CPU *s390_env_get_cpu(CPUS390XState *env)
+{
+ return container_of(env, S390CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(s390_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(S390CPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_s390_cpu;
+#endif
+
+void s390_cpu_do_interrupt(CPUState *cpu);
+bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void s390_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
+ int flags);
+int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
+ int cpuid, void *opaque);
+
+hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
+int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void s390_cpu_gdb_init(CPUState *cs);
+void s390x_cpu_debug_excp_handler(CPUState *cs);
+
+#include "sysemu/kvm.h"
+
+/* distinguish between 24 bit and 31 bit addressing */
+#define HIGH_ORDER_BIT 0x80000000
+
+/* Interrupt Codes */
+/* Program Interrupts */
+#define PGM_OPERATION 0x0001
+#define PGM_PRIVILEGED 0x0002
+#define PGM_EXECUTE 0x0003
+#define PGM_PROTECTION 0x0004
+#define PGM_ADDRESSING 0x0005
+#define PGM_SPECIFICATION 0x0006
+#define PGM_DATA 0x0007
+#define PGM_FIXPT_OVERFLOW 0x0008
+#define PGM_FIXPT_DIVIDE 0x0009
+#define PGM_DEC_OVERFLOW 0x000a
+#define PGM_DEC_DIVIDE 0x000b
+#define PGM_HFP_EXP_OVERFLOW 0x000c
+#define PGM_HFP_EXP_UNDERFLOW 0x000d
+#define PGM_HFP_SIGNIFICANCE 0x000e
+#define PGM_HFP_DIVIDE 0x000f
+#define PGM_SEGMENT_TRANS 0x0010
+#define PGM_PAGE_TRANS 0x0011
+#define PGM_TRANS_SPEC 0x0012
+#define PGM_SPECIAL_OP 0x0013
+#define PGM_OPERAND 0x0015
+#define PGM_TRACE_TABLE 0x0016
+#define PGM_SPACE_SWITCH 0x001c
+#define PGM_HFP_SQRT 0x001d
+#define PGM_PC_TRANS_SPEC 0x001f
+#define PGM_AFX_TRANS 0x0020
+#define PGM_ASX_TRANS 0x0021
+#define PGM_LX_TRANS 0x0022
+#define PGM_EX_TRANS 0x0023
+#define PGM_PRIM_AUTH 0x0024
+#define PGM_SEC_AUTH 0x0025
+#define PGM_ALET_SPEC 0x0028
+#define PGM_ALEN_SPEC 0x0029
+#define PGM_ALE_SEQ 0x002a
+#define PGM_ASTE_VALID 0x002b
+#define PGM_ASTE_SEQ 0x002c
+#define PGM_EXT_AUTH 0x002d
+#define PGM_STACK_FULL 0x0030
+#define PGM_STACK_EMPTY 0x0031
+#define PGM_STACK_SPEC 0x0032
+#define PGM_STACK_TYPE 0x0033
+#define PGM_STACK_OP 0x0034
+#define PGM_ASCE_TYPE 0x0038
+#define PGM_REG_FIRST_TRANS 0x0039
+#define PGM_REG_SEC_TRANS 0x003a
+#define PGM_REG_THIRD_TRANS 0x003b
+#define PGM_MONITOR 0x0040
+#define PGM_PER 0x0080
+#define PGM_CRYPTO 0x0119
+
+/* External Interrupts */
+#define EXT_INTERRUPT_KEY 0x0040
+#define EXT_CLOCK_COMP 0x1004
+#define EXT_CPU_TIMER 0x1005
+#define EXT_MALFUNCTION 0x1200
+#define EXT_EMERGENCY 0x1201
+#define EXT_EXTERNAL_CALL 0x1202
+#define EXT_ETR 0x1406
+#define EXT_SERVICE 0x2401
+#define EXT_VIRTIO 0x2603
+
+/* PSW defines */
+#undef PSW_MASK_PER
+#undef PSW_MASK_DAT
+#undef PSW_MASK_IO
+#undef PSW_MASK_EXT
+#undef PSW_MASK_KEY
+#undef PSW_SHIFT_KEY
+#undef PSW_MASK_MCHECK
+#undef PSW_MASK_WAIT
+#undef PSW_MASK_PSTATE
+#undef PSW_MASK_ASC
+#undef PSW_MASK_CC
+#undef PSW_MASK_PM
+#undef PSW_MASK_64
+#undef PSW_MASK_32
+#undef PSW_MASK_ESA_ADDR
+
+#define PSW_MASK_PER 0x4000000000000000ULL
+#define PSW_MASK_DAT 0x0400000000000000ULL
+#define PSW_MASK_IO 0x0200000000000000ULL
+#define PSW_MASK_EXT 0x0100000000000000ULL
+#define PSW_MASK_KEY 0x00F0000000000000ULL
+#define PSW_SHIFT_KEY 56
+#define PSW_MASK_MCHECK 0x0004000000000000ULL
+#define PSW_MASK_WAIT 0x0002000000000000ULL
+#define PSW_MASK_PSTATE 0x0001000000000000ULL
+#define PSW_MASK_ASC 0x0000C00000000000ULL
+#define PSW_MASK_CC 0x0000300000000000ULL
+#define PSW_MASK_PM 0x00000F0000000000ULL
+#define PSW_MASK_64 0x0000000100000000ULL
+#define PSW_MASK_32 0x0000000080000000ULL
+#define PSW_MASK_ESA_ADDR 0x000000007fffffffULL
+
+#undef PSW_ASC_PRIMARY
+#undef PSW_ASC_ACCREG
+#undef PSW_ASC_SECONDARY
+#undef PSW_ASC_HOME
+
+#define PSW_ASC_PRIMARY 0x0000000000000000ULL
+#define PSW_ASC_ACCREG 0x0000400000000000ULL
+#define PSW_ASC_SECONDARY 0x0000800000000000ULL
+#define PSW_ASC_HOME 0x0000C00000000000ULL
+
+/* tb flags */
+
+#define FLAG_MASK_PER (PSW_MASK_PER >> 32)
+#define FLAG_MASK_DAT (PSW_MASK_DAT >> 32)
+#define FLAG_MASK_IO (PSW_MASK_IO >> 32)
+#define FLAG_MASK_EXT (PSW_MASK_EXT >> 32)
+#define FLAG_MASK_KEY (PSW_MASK_KEY >> 32)
+#define FLAG_MASK_MCHECK (PSW_MASK_MCHECK >> 32)
+#define FLAG_MASK_WAIT (PSW_MASK_WAIT >> 32)
+#define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> 32)
+#define FLAG_MASK_ASC (PSW_MASK_ASC >> 32)
+#define FLAG_MASK_CC (PSW_MASK_CC >> 32)
+#define FLAG_MASK_PM (PSW_MASK_PM >> 32)
+#define FLAG_MASK_64 (PSW_MASK_64 >> 32)
+#define FLAG_MASK_32 0x00001000
+
+/* Control register 0 bits */
+#define CR0_LOWPROT 0x0000000010000000ULL
+#define CR0_EDAT 0x0000000000800000ULL
+
+/* MMU */
+#define MMU_PRIMARY_IDX 0
+#define MMU_SECONDARY_IDX 1
+#define MMU_HOME_IDX 2
+
+static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
+{
+ switch (env->psw.mask & PSW_MASK_ASC) {
+ case PSW_ASC_PRIMARY:
+ return MMU_PRIMARY_IDX;
+ case PSW_ASC_SECONDARY:
+ return MMU_SECONDARY_IDX;
+ case PSW_ASC_HOME:
+ return MMU_HOME_IDX;
+ case PSW_ASC_ACCREG:
+ /* Fallthrough: access register mode is not yet supported */
+ default:
+ abort();
+ }
+}
+
+static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
+{
+ switch (mmu_idx) {
+ case MMU_PRIMARY_IDX:
+ return PSW_ASC_PRIMARY;
+ case MMU_SECONDARY_IDX:
+ return PSW_ASC_SECONDARY;
+ case MMU_HOME_IDX:
+ return PSW_ASC_HOME;
+ default:
+ abort();
+ }
+}
+
+static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->psw.addr;
+ *cs_base = 0;
+ *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
+ ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
+}
+
+#define MAX_ILEN 6
+
+/* While the PoO talks about ILC (a number between 1-3) what is actually
+ stored in LowCore is shifted left one bit (an even between 2-6). As
+ this is the actual length of the insn and therefore more useful, that
+ is what we want to pass around and manipulate. To make sure that we
+ have applied this distinction universally, rename the "ILC" to "ILEN". */
+static inline int get_ilen(uint8_t opc)
+{
+ switch (opc >> 6) {
+ case 0:
+ return 2;
+ case 1:
+ case 2:
+ return 4;
+ default:
+ return 6;
+ }
+}
+
+/* PER bits from control register 9 */
+#define PER_CR9_EVENT_BRANCH 0x80000000
+#define PER_CR9_EVENT_IFETCH 0x40000000
+#define PER_CR9_EVENT_STORE 0x20000000
+#define PER_CR9_EVENT_STORE_REAL 0x08000000
+#define PER_CR9_EVENT_NULLIFICATION 0x01000000
+#define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
+#define PER_CR9_CONTROL_ALTERATION 0x00200000
+
+/* PER bits from the PER CODE/ATMID/AI in lowcore */
+#define PER_CODE_EVENT_BRANCH 0x8000
+#define PER_CODE_EVENT_IFETCH 0x4000
+#define PER_CODE_EVENT_STORE 0x2000
+#define PER_CODE_EVENT_STORE_REAL 0x0800
+#define PER_CODE_EVENT_NULLIFICATION 0x0100
+
+/* Compute the ATMID field that is stored in the per_perc_atmid lowcore
+ entry when a PER exception is triggered. */
+static inline uint8_t get_per_atmid(CPUS390XState *env)
+{
+ return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
+ ( (1 << 6) ) |
+ ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
+ ((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) |
+ ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
+ ((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0);
+}
+
+/* Check if an address is within the PER starting address and the PER
+ ending address. The address range might loop. */
+static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
+{
+ if (env->cregs[10] <= env->cregs[11]) {
+ return env->cregs[10] <= addr && addr <= env->cregs[11];
+ } else {
+ return env->cregs[10] <= addr || addr <= env->cregs[11];
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+/* In several cases of runtime exceptions, we havn't recorded the true
+ instruction length. Use these codes when raising exceptions in order
+ to re-compute the length by examining the insn in memory. */
+#define ILEN_LATER 0x20
+#define ILEN_LATER_INC 0x21
+void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
+#endif
+
+S390CPU *cpu_s390x_init(const char *cpu_model);
+S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp);
+S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp);
+void s390x_translate_init(void);
+
+/* you can call this signal handler from your SIGBUS and SIGSEGV
+ signal handlers to inform the virtual CPU of exceptions. non zero
+ is returned if the signal was handled by the virtual CPU. */
+int cpu_s390x_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+
+#ifndef CONFIG_USER_ONLY
+void do_restart_interrupt(CPUS390XState *env);
+
+static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
+ uint8_t *ar)
+{
+ hwaddr addr = 0;
+ uint8_t reg;
+
+ reg = ipb >> 28;
+ if (reg > 0) {
+ addr = env->regs[reg];
+ }
+ addr += (ipb >> 16) & 0xfff;
+ if (ar) {
+ *ar = reg;
+ }
+
+ return addr;
+}
+
+/* Base/displacement are at the same locations. */
+#define decode_basedisp_rs decode_basedisp_s
+
+/* helper functions for run_on_cpu() */
+static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
+
+ scc->cpu_reset(cs);
+}
+static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ cpu_reset(cs);
+}
+
+void s390x_tod_timer(void *opaque);
+void s390x_cpu_timer(void *opaque);
+
+int s390_virtio_hypercall(CPUS390XState *env);
+
+#ifdef CONFIG_KVM
+void kvm_s390_service_interrupt(uint32_t parm);
+void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
+void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
+int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
+void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+ int len, bool is_write);
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
+int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
+#else
+static inline void kvm_s390_service_interrupt(uint32_t parm)
+{
+}
+static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ return -ENOSYS;
+}
+static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ return -ENOSYS;
+}
+static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
+ void *hostbuf, int len, bool is_write)
+{
+ return -ENOSYS;
+}
+static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
+ uint64_t te_code)
+{
+}
+#endif
+
+static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_get_clock(tod_high, tod_low);
+ }
+ /* Fixme TCG */
+ *tod_high = 0;
+ *tod_low = 0;
+ return 0;
+}
+
+static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_set_clock(tod_high, tod_low);
+ }
+ /* Fixme TCG */
+ return 0;
+}
+
+S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
+unsigned int s390_cpu_halt(S390CPU *cpu);
+void s390_cpu_unhalt(S390CPU *cpu);
+unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
+static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
+{
+ return cpu->env.cpu_state;
+}
+
+void gtod_save(QEMUFile *f, void *opaque);
+int gtod_load(QEMUFile *f, void *opaque, int version_id);
+
+void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
+ uint64_t param64);
+
+/* ioinst.c */
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
+int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
+void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
+ uint32_t ipb);
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1);
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1);
+
+/* service interrupts are floating therefore we must not pass an cpustate */
+void s390_sclp_extint(uint32_t parm);
+
+#else
+static inline unsigned int s390_cpu_halt(S390CPU *cpu)
+{
+ return 0;
+}
+
+static inline void s390_cpu_unhalt(S390CPU *cpu)
+{
+}
+
+static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
+{
+ return 0;
+}
+#endif
+
+extern void subsystem_reset(void);
+
+#define cpu_init(model) CPU(cpu_s390x_init(model))
+#define cpu_signal_handler cpu_s390x_signal_handler
+
+void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+#define cpu_list s390_cpu_list
+void s390_cpu_model_register_props(Object *obj);
+void s390_cpu_model_class_register_props(ObjectClass *oc);
+void s390_realize_cpu_model(CPUState *cs, Error **errp);
+ObjectClass *s390_cpu_class_by_name(const char *name);
+
+#define EXCP_EXT 1 /* external interrupt */
+#define EXCP_SVC 2 /* supervisor call (syscall) */
+#define EXCP_PGM 3 /* program interruption */
+#define EXCP_IO 7 /* I/O interrupt */
+#define EXCP_MCHK 8 /* machine check */
+
+#define INTERRUPT_EXT (1 << 0)
+#define INTERRUPT_TOD (1 << 1)
+#define INTERRUPT_CPUTIMER (1 << 2)
+#define INTERRUPT_IO (1 << 3)
+#define INTERRUPT_MCHK (1 << 4)
+
+/* Program Status Word. */
+#define S390_PSWM_REGNUM 0
+#define S390_PSWA_REGNUM 1
+/* General Purpose Registers. */
+#define S390_R0_REGNUM 2
+#define S390_R1_REGNUM 3
+#define S390_R2_REGNUM 4
+#define S390_R3_REGNUM 5
+#define S390_R4_REGNUM 6
+#define S390_R5_REGNUM 7
+#define S390_R6_REGNUM 8
+#define S390_R7_REGNUM 9
+#define S390_R8_REGNUM 10
+#define S390_R9_REGNUM 11
+#define S390_R10_REGNUM 12
+#define S390_R11_REGNUM 13
+#define S390_R12_REGNUM 14
+#define S390_R13_REGNUM 15
+#define S390_R14_REGNUM 16
+#define S390_R15_REGNUM 17
+/* Total Core Registers. */
+#define S390_NUM_CORE_REGS 18
+
+/* CC optimization */
+
+/* Instead of computing the condition codes after each x86 instruction,
+ * QEMU just stores the result (called CC_DST), the type of operation
+ * (called CC_OP) and whatever operands are needed (CC_SRC and possibly
+ * CC_VR). When the condition codes are needed, the condition codes can
+ * be calculated using this information. Condition codes are not generated
+ * if they are only needed for conditional branches.
+ */
+enum cc_op {
+ CC_OP_CONST0 = 0, /* CC is 0 */
+ CC_OP_CONST1, /* CC is 1 */
+ CC_OP_CONST2, /* CC is 2 */
+ CC_OP_CONST3, /* CC is 3 */
+
+ CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
+ CC_OP_STATIC, /* CC value is env->cc_op */
+
+ CC_OP_NZ, /* env->cc_dst != 0 */
+ CC_OP_LTGT_32, /* signed less/greater than (32bit) */
+ CC_OP_LTGT_64, /* signed less/greater than (64bit) */
+ CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
+ CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
+ CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
+ CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
+
+ CC_OP_ADD_64, /* overflow on add (64bit) */
+ CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
+ CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
+ CC_OP_SUB_64, /* overflow on subtraction (64bit) */
+ CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
+ CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
+ CC_OP_ABS_64, /* sign eval on abs (64bit) */
+ CC_OP_NABS_64, /* sign eval on nabs (64bit) */
+
+ CC_OP_ADD_32, /* overflow on add (32bit) */
+ CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
+ CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
+ CC_OP_SUB_32, /* overflow on subtraction (32bit) */
+ CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
+ CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
+ CC_OP_ABS_32, /* sign eval on abs (64bit) */
+ CC_OP_NABS_32, /* sign eval on nabs (64bit) */
+
+ CC_OP_COMP_32, /* complement */
+ CC_OP_COMP_64, /* complement */
+
+ CC_OP_TM_32, /* test under mask (32bit) */
+ CC_OP_TM_64, /* test under mask (64bit) */
+
+ CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
+ CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
+ CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
+
+ CC_OP_ICM, /* insert characters under mask */
+ CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
+ CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
+ CC_OP_FLOGR, /* find leftmost one */
+ CC_OP_MAX
+};
+
+static const char *cc_names[] = {
+ [CC_OP_CONST0] = "CC_OP_CONST0",
+ [CC_OP_CONST1] = "CC_OP_CONST1",
+ [CC_OP_CONST2] = "CC_OP_CONST2",
+ [CC_OP_CONST3] = "CC_OP_CONST3",
+ [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
+ [CC_OP_STATIC] = "CC_OP_STATIC",
+ [CC_OP_NZ] = "CC_OP_NZ",
+ [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
+ [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
+ [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
+ [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
+ [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
+ [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
+ [CC_OP_ADD_64] = "CC_OP_ADD_64",
+ [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
+ [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
+ [CC_OP_SUB_64] = "CC_OP_SUB_64",
+ [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
+ [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
+ [CC_OP_ABS_64] = "CC_OP_ABS_64",
+ [CC_OP_NABS_64] = "CC_OP_NABS_64",
+ [CC_OP_ADD_32] = "CC_OP_ADD_32",
+ [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
+ [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
+ [CC_OP_SUB_32] = "CC_OP_SUB_32",
+ [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
+ [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
+ [CC_OP_ABS_32] = "CC_OP_ABS_32",
+ [CC_OP_NABS_32] = "CC_OP_NABS_32",
+ [CC_OP_COMP_32] = "CC_OP_COMP_32",
+ [CC_OP_COMP_64] = "CC_OP_COMP_64",
+ [CC_OP_TM_32] = "CC_OP_TM_32",
+ [CC_OP_TM_64] = "CC_OP_TM_64",
+ [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
+ [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
+ [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
+ [CC_OP_ICM] = "CC_OP_ICM",
+ [CC_OP_SLA_32] = "CC_OP_SLA_32",
+ [CC_OP_SLA_64] = "CC_OP_SLA_64",
+ [CC_OP_FLOGR] = "CC_OP_FLOGR",
+};
+
+static inline const char *cc_name(int cc_op)
+{
+ return cc_names[cc_op];
+}
+
+static inline void setcc(S390CPU *cpu, uint64_t cc)
+{
+ CPUS390XState *env = &cpu->env;
+
+ env->psw.mask &= ~(3ull << 44);
+ env->psw.mask |= (cc & 3) << 44;
+ env->cc_op = cc;
+}
+
+typedef struct LowCore
+{
+ /* prefix area: defined by architecture */
+ uint32_t ccw1[2]; /* 0x000 */
+ uint32_t ccw2[4]; /* 0x008 */
+ uint8_t pad1[0x80-0x18]; /* 0x018 */
+ uint32_t ext_params; /* 0x080 */
+ uint16_t cpu_addr; /* 0x084 */
+ uint16_t ext_int_code; /* 0x086 */
+ uint16_t svc_ilen; /* 0x088 */
+ uint16_t svc_code; /* 0x08a */
+ uint16_t pgm_ilen; /* 0x08c */
+ uint16_t pgm_code; /* 0x08e */
+ uint32_t data_exc_code; /* 0x090 */
+ uint16_t mon_class_num; /* 0x094 */
+ uint16_t per_perc_atmid; /* 0x096 */
+ uint64_t per_address; /* 0x098 */
+ uint8_t exc_access_id; /* 0x0a0 */
+ uint8_t per_access_id; /* 0x0a1 */
+ uint8_t op_access_id; /* 0x0a2 */
+ uint8_t ar_access_id; /* 0x0a3 */
+ uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */
+ uint64_t trans_exc_code; /* 0x0a8 */
+ uint64_t monitor_code; /* 0x0b0 */
+ uint16_t subchannel_id; /* 0x0b8 */
+ uint16_t subchannel_nr; /* 0x0ba */
+ uint32_t io_int_parm; /* 0x0bc */
+ uint32_t io_int_word; /* 0x0c0 */
+ uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */
+ uint32_t stfl_fac_list; /* 0x0c8 */
+ uint8_t pad4[0xe8-0xcc]; /* 0x0cc */
+ uint32_t mcck_interruption_code[2]; /* 0x0e8 */
+ uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
+ uint32_t external_damage_code; /* 0x0f4 */
+ uint64_t failing_storage_address; /* 0x0f8 */
+ uint8_t pad6[0x110-0x100]; /* 0x100 */
+ uint64_t per_breaking_event_addr; /* 0x110 */
+ uint8_t pad7[0x120-0x118]; /* 0x118 */
+ PSW restart_old_psw; /* 0x120 */
+ PSW external_old_psw; /* 0x130 */
+ PSW svc_old_psw; /* 0x140 */
+ PSW program_old_psw; /* 0x150 */
+ PSW mcck_old_psw; /* 0x160 */
+ PSW io_old_psw; /* 0x170 */
+ uint8_t pad8[0x1a0-0x180]; /* 0x180 */
+ PSW restart_new_psw; /* 0x1a0 */
+ PSW external_new_psw; /* 0x1b0 */
+ PSW svc_new_psw; /* 0x1c0 */
+ PSW program_new_psw; /* 0x1d0 */
+ PSW mcck_new_psw; /* 0x1e0 */
+ PSW io_new_psw; /* 0x1f0 */
+ PSW return_psw; /* 0x200 */
+ uint8_t irb[64]; /* 0x210 */
+ uint64_t sync_enter_timer; /* 0x250 */
+ uint64_t async_enter_timer; /* 0x258 */
+ uint64_t exit_timer; /* 0x260 */
+ uint64_t last_update_timer; /* 0x268 */
+ uint64_t user_timer; /* 0x270 */
+ uint64_t system_timer; /* 0x278 */
+ uint64_t last_update_clock; /* 0x280 */
+ uint64_t steal_clock; /* 0x288 */
+ PSW return_mcck_psw; /* 0x290 */
+ uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */
+ /* System info area */
+ uint64_t save_area[16]; /* 0xc00 */
+ uint8_t pad10[0xd40-0xc80]; /* 0xc80 */
+ uint64_t kernel_stack; /* 0xd40 */
+ uint64_t thread_info; /* 0xd48 */
+ uint64_t async_stack; /* 0xd50 */
+ uint64_t kernel_asce; /* 0xd58 */
+ uint64_t user_asce; /* 0xd60 */
+ uint64_t panic_stack; /* 0xd68 */
+ uint64_t user_exec_asce; /* 0xd70 */
+ uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */
+
+ /* SMP info area: defined by DJB */
+ uint64_t clock_comparator; /* 0xdc0 */
+ uint64_t ext_call_fast; /* 0xdc8 */
+ uint64_t percpu_offset; /* 0xdd0 */
+ uint64_t current_task; /* 0xdd8 */
+ uint32_t softirq_pending; /* 0xde0 */
+ uint32_t pad_0x0de4; /* 0xde4 */
+ uint64_t int_clock; /* 0xde8 */
+ uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */
+
+ /* 0xe00 is used as indicator for dump tools */
+ /* whether the kernel died with panic() or not */
+ uint32_t panic_magic; /* 0xe00 */
+
+ uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */
+
+ /* 64 bit extparam used for pfault, diag 250 etc */
+ uint64_t ext_params2; /* 0x11B8 */
+
+ uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */
+
+ /* System info area */
+
+ uint64_t floating_pt_save_area[16]; /* 0x1200 */
+ uint64_t gpregs_save_area[16]; /* 0x1280 */
+ uint32_t st_status_fixed_logout[4]; /* 0x1300 */
+ uint8_t pad15[0x1318-0x1310]; /* 0x1310 */
+ uint32_t prefixreg_save_area; /* 0x1318 */
+ uint32_t fpt_creg_save_area; /* 0x131c */
+ uint8_t pad16[0x1324-0x1320]; /* 0x1320 */
+ uint32_t tod_progreg_save_area; /* 0x1324 */
+ uint32_t cpu_timer_save_area[2]; /* 0x1328 */
+ uint32_t clock_comp_save_area[2]; /* 0x1330 */
+ uint8_t pad17[0x1340-0x1338]; /* 0x1338 */
+ uint32_t access_regs_save_area[16]; /* 0x1340 */
+ uint64_t cregs_save_area[16]; /* 0x1380 */
+
+ /* align to the top of the prefix area */
+
+ uint8_t pad18[0x2000-0x1400]; /* 0x1400 */
+} QEMU_PACKED LowCore;
+
+/* STSI */
+#define STSI_LEVEL_MASK 0x00000000f0000000ULL
+#define STSI_LEVEL_CURRENT 0x0000000000000000ULL
+#define STSI_LEVEL_1 0x0000000010000000ULL
+#define STSI_LEVEL_2 0x0000000020000000ULL
+#define STSI_LEVEL_3 0x0000000030000000ULL
+#define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
+#define STSI_R0_SEL1_MASK 0x00000000000000ffULL
+#define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
+#define STSI_R1_SEL2_MASK 0x000000000000ffffULL
+
+/* Basic Machine Configuration */
+struct sysib_111 {
+ uint32_t res1[8];
+ uint8_t manuf[16];
+ uint8_t type[4];
+ uint8_t res2[12];
+ uint8_t model[16];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint8_t res3[156];
+};
+
+/* Basic Machine CPU */
+struct sysib_121 {
+ uint32_t res1[80];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint8_t res2[2];
+ uint16_t cpu_addr;
+ uint8_t res3[152];
+};
+
+/* Basic Machine CPUs */
+struct sysib_122 {
+ uint8_t res1[32];
+ uint32_t capability;
+ uint16_t total_cpus;
+ uint16_t active_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint16_t adjustments[2026];
+};
+
+/* LPAR CPU */
+struct sysib_221 {
+ uint32_t res1[80];
+ uint8_t sequence[16];
+ uint8_t plant[4];
+ uint16_t cpu_id;
+ uint16_t cpu_addr;
+ uint8_t res3[152];
+};
+
+/* LPAR CPUs */
+struct sysib_222 {
+ uint32_t res1[32];
+ uint16_t lpar_num;
+ uint8_t res2;
+ uint8_t lcpuc;
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint8_t name[8];
+ uint32_t caf;
+ uint8_t res3[16];
+ uint16_t dedicated_cpus;
+ uint16_t shared_cpus;
+ uint8_t res4[180];
+};
+
+/* VM CPUs */
+struct sysib_322 {
+ uint8_t res1[31];
+ uint8_t count;
+ struct {
+ uint8_t res2[4];
+ uint16_t total_cpus;
+ uint16_t conf_cpus;
+ uint16_t standby_cpus;
+ uint16_t reserved_cpus;
+ uint8_t name[8];
+ uint32_t caf;
+ uint8_t cpi[16];
+ uint8_t res5[3];
+ uint8_t ext_name_encoding;
+ uint32_t res3;
+ uint8_t uuid[16];
+ } vm[8];
+ uint8_t res4[1504];
+ uint8_t ext_names[8][256];
+};
+
+/* MMU defines */
+#define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
+#define _ASCE_SUBSPACE 0x200 /* subspace group control */
+#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
+#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
+#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
+#define _ASCE_REAL_SPACE 0x20 /* real space control */
+#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
+#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
+#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
+#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
+#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
+#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
+
+#define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
+#define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */
+#define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */
+#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
+#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
+#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
+#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
+#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
+#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
+
+#define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */
+#define _SEGMENT_ENTRY_FC 0x400 /* format control */
+#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+
+#define _PAGE_RO 0x200 /* HW read-only bit */
+#define _PAGE_INVALID 0x400 /* HW invalid bit */
+#define _PAGE_RES0 0x800 /* bit must be zero */
+
+#define SK_C (0x1 << 1)
+#define SK_R (0x1 << 2)
+#define SK_F (0x1 << 3)
+#define SK_ACC_MASK (0xf << 4)
+
+/* SIGP order codes */
+#define SIGP_SENSE 0x01
+#define SIGP_EXTERNAL_CALL 0x02
+#define SIGP_EMERGENCY 0x03
+#define SIGP_START 0x04
+#define SIGP_STOP 0x05
+#define SIGP_RESTART 0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET 0x0c
+#define SIGP_SET_PREFIX 0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH 0x12
+#define SIGP_STORE_ADTL_STATUS 0x17
+
+/* SIGP condition codes */
+#define SIGP_CC_ORDER_CODE_ACCEPTED 0
+#define SIGP_CC_STATUS_STORED 1
+#define SIGP_CC_BUSY 2
+#define SIGP_CC_NOT_OPERATIONAL 3
+
+/* SIGP status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
+#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STAT_STOPPED 0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
+#define SIGP_STAT_CHECK_STOP 0x00000010UL
+#define SIGP_STAT_INOPERATIVE 0x00000004UL
+#define SIGP_STAT_INVALID_ORDER 0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
+
+/* SIGP SET ARCHITECTURE modes */
+#define SIGP_MODE_ESA_S390 0
+#define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
+#define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
+
+void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
+int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
+ target_ulong *raddr, int *flags, bool exc);
+int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
+uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
+ uint64_t vr);
+void s390_cpu_recompute_watchpoints(CPUState *cs);
+
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+ int len, bool is_write);
+
+#define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
+#define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
+#define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
+ s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
+
+/* The value of the TOD clock for 1.1.1970. */
+#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+
+/* Converts ns to s390's clock format */
+static inline uint64_t time2tod(uint64_t ns) {
+ return (ns << 9) / 125;
+}
+
+/* Converts s390's clock format to ns */
+static inline uint64_t tod2time(uint64_t t) {
+ return (t * 125) >> 9;
+}
+
+/* from s390-virtio-ccw */
+#define MEM_SECTION_SIZE 0x10000000UL
+#define MAX_AVAIL_SLOTS 32
+
+/* fpu_helper.c */
+uint32_t set_cc_nz_f32(float32 v);
+uint32_t set_cc_nz_f64(float64 v);
+uint32_t set_cc_nz_f128(float128 v);
+
+/* misc_helper.c */
+#ifndef CONFIG_USER_ONLY
+int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
+#endif
+void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
+void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
+ uintptr_t retaddr);
+
+#ifdef CONFIG_KVM
+void kvm_s390_io_interrupt(uint16_t subchannel_id,
+ uint16_t subchannel_nr, uint32_t io_int_parm,
+ uint32_t io_int_word);
+void kvm_s390_crw_mchk(void);
+void kvm_s390_enable_css_support(S390CPU *cpu);
+int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
+ int vq, bool assign);
+int kvm_s390_cpu_restart(S390CPU *cpu);
+int kvm_s390_get_memslot_count(KVMState *s);
+void kvm_s390_cmma_reset(void);
+int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
+void kvm_s390_reset_vcpu(S390CPU *cpu);
+int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit);
+void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
+int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
+int kvm_s390_get_ri(void);
+void kvm_s390_crypto_reset(void);
+#else
+static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
+ uint16_t subchannel_nr,
+ uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+}
+static inline void kvm_s390_crw_mchk(void)
+{
+}
+static inline void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+}
+static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
+ uint32_t sch, int vq,
+ bool assign)
+{
+ return -ENOSYS;
+}
+static inline int kvm_s390_cpu_restart(S390CPU *cpu)
+{
+ return -ENOSYS;
+}
+static inline void kvm_s390_cmma_reset(void)
+{
+}
+static inline int kvm_s390_get_memslot_count(KVMState *s)
+{
+ return MAX_AVAIL_SLOTS;
+}
+static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
+{
+ return -ENOSYS;
+}
+static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
+{
+}
+static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit,
+ uint64_t *hw_limit)
+{
+ return 0;
+}
+static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
+{
+}
+static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
+{
+ return 0;
+}
+static inline int kvm_s390_get_ri(void)
+{
+ return 0;
+}
+static inline void kvm_s390_crypto_reset(void)
+{
+}
+#endif
+
+static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit);
+ }
+ return 0;
+}
+
+static inline void s390_cmma_reset(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_cmma_reset();
+ }
+}
+
+static inline int s390_cpu_restart(S390CPU *cpu)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_cpu_restart(cpu);
+ }
+ return -ENOSYS;
+}
+
+static inline int s390_get_memslot_count(KVMState *s)
+{
+ if (kvm_enabled()) {
+ return kvm_s390_get_memslot_count(s);
+ } else {
+ return MAX_AVAIL_SLOTS;
+ }
+}
+
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+ uint32_t io_int_parm, uint32_t io_int_word);
+void s390_crw_mchk(void);
+
+static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
+ uint32_t sch_id, int vq,
+ bool assign)
+{
+ return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
+}
+
+static inline void s390_crypto_reset(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_crypto_reset();
+ }
+}
+
+/* machine check interruption code */
+
+/* subclasses */
+#define MCIC_SC_SD 0x8000000000000000ULL
+#define MCIC_SC_PD 0x4000000000000000ULL
+#define MCIC_SC_SR 0x2000000000000000ULL
+#define MCIC_SC_CD 0x0800000000000000ULL
+#define MCIC_SC_ED 0x0400000000000000ULL
+#define MCIC_SC_DG 0x0100000000000000ULL
+#define MCIC_SC_W 0x0080000000000000ULL
+#define MCIC_SC_CP 0x0040000000000000ULL
+#define MCIC_SC_SP 0x0020000000000000ULL
+#define MCIC_SC_CK 0x0010000000000000ULL
+
+/* subclass modifiers */
+#define MCIC_SCM_B 0x0002000000000000ULL
+#define MCIC_SCM_DA 0x0000000020000000ULL
+#define MCIC_SCM_AP 0x0000000000080000ULL
+
+/* storage errors */
+#define MCIC_SE_SE 0x0000800000000000ULL
+#define MCIC_SE_SC 0x0000400000000000ULL
+#define MCIC_SE_KE 0x0000200000000000ULL
+#define MCIC_SE_DS 0x0000100000000000ULL
+#define MCIC_SE_IE 0x0000000080000000ULL
+
+/* validity bits */
+#define MCIC_VB_WP 0x0000080000000000ULL
+#define MCIC_VB_MS 0x0000040000000000ULL
+#define MCIC_VB_PM 0x0000020000000000ULL
+#define MCIC_VB_IA 0x0000010000000000ULL
+#define MCIC_VB_FA 0x0000008000000000ULL
+#define MCIC_VB_VR 0x0000004000000000ULL
+#define MCIC_VB_EC 0x0000002000000000ULL
+#define MCIC_VB_FP 0x0000001000000000ULL
+#define MCIC_VB_GR 0x0000000800000000ULL
+#define MCIC_VB_CR 0x0000000400000000ULL
+#define MCIC_VB_ST 0x0000000100000000ULL
+#define MCIC_VB_AR 0x0000000040000000ULL
+#define MCIC_VB_PR 0x0000000000200000ULL
+#define MCIC_VB_FC 0x0000000000100000ULL
+#define MCIC_VB_CT 0x0000000000020000ULL
+#define MCIC_VB_CC 0x0000000000010000ULL
+
+#endif
diff --git a/target/s390x/cpu_features.c b/target/s390x/cpu_features.c
new file mode 100644
index 0000000000..42fd9d792b
--- /dev/null
+++ b/target/s390x/cpu_features.c
@@ -0,0 +1,404 @@
+/*
+ * CPU features/facilities for s390x
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "cpu_features.h"
+#include "gen-features.h"
+
+#define FEAT_INIT(_name, _type, _bit, _desc) \
+ { \
+ .name = _name, \
+ .type = _type, \
+ .bit = _bit, \
+ .desc = _desc, \
+ }
+
+/* indexed by feature number for easy lookup */
+static const S390FeatDef s390_features[] = {
+ FEAT_INIT("esan3", S390_FEAT_TYPE_STFL, 0, "Instructions marked as n3"),
+ FEAT_INIT("zarch", S390_FEAT_TYPE_STFL, 1, "z/Architecture architectural mode"),
+ FEAT_INIT("dateh", S390_FEAT_TYPE_STFL, 3, "DAT-enhancement facility"),
+ FEAT_INIT("idtes", S390_FEAT_TYPE_STFL, 4, "IDTE selective TLB segment-table clearing"),
+ FEAT_INIT("idter", S390_FEAT_TYPE_STFL, 5, "IDTE selective TLB region-table clearing"),
+ FEAT_INIT("asnlxr", S390_FEAT_TYPE_STFL, 6, "ASN-and-LX reuse facility"),
+ FEAT_INIT("stfle", S390_FEAT_TYPE_STFL, 7, "Store-facility-list-extended facility"),
+ FEAT_INIT("edat", S390_FEAT_TYPE_STFL, 8, "Enhanced-DAT facility"),
+ FEAT_INIT("srs", S390_FEAT_TYPE_STFL, 9, "Sense-running-status facility"),
+ FEAT_INIT("csske", S390_FEAT_TYPE_STFL, 10, "Conditional-SSKE facility"),
+ FEAT_INIT("ctop", S390_FEAT_TYPE_STFL, 11, "Configuration-topology facility"),
+ FEAT_INIT("ipter", S390_FEAT_TYPE_STFL, 13, "IPTE-range facility"),
+ FEAT_INIT("nonqks", S390_FEAT_TYPE_STFL, 14, "Nonquiescing key-setting facility"),
+ FEAT_INIT("etf2", S390_FEAT_TYPE_STFL, 16, "Extended-translation facility 2"),
+ FEAT_INIT("msa-base", S390_FEAT_TYPE_STFL, 17, "Message-security-assist facility (excluding subfunctions)"),
+ FEAT_INIT("ldisp", S390_FEAT_TYPE_STFL, 18, "Long-displacement facility"),
+ FEAT_INIT("ldisphp", S390_FEAT_TYPE_STFL, 19, "Long-displacement facility has high performance"),
+ FEAT_INIT("hfpm", S390_FEAT_TYPE_STFL, 20, "HFP-multiply-add/subtract facility"),
+ FEAT_INIT("eimm", S390_FEAT_TYPE_STFL, 21, "Extended-immediate facility"),
+ FEAT_INIT("etf3", S390_FEAT_TYPE_STFL, 22, "Extended-translation facility 3"),
+ FEAT_INIT("hfpue", S390_FEAT_TYPE_STFL, 23, "HFP-unnormalized-extension facility"),
+ FEAT_INIT("etf2eh", S390_FEAT_TYPE_STFL, 24, "ETF2-enhancement facility"),
+ FEAT_INIT("stckf", S390_FEAT_TYPE_STFL, 25, "Store-clock-fast facility"),
+ FEAT_INIT("parseh", S390_FEAT_TYPE_STFL, 26, "Parsing-enhancement facility"),
+ FEAT_INIT("mvcos", S390_FEAT_TYPE_STFL, 27, "Move-with-optional-specification facility"),
+ FEAT_INIT("tods-base", S390_FEAT_TYPE_STFL, 28, "TOD-clock-steering facility (excluding subfunctions)"),
+ FEAT_INIT("etf3eh", S390_FEAT_TYPE_STFL, 30, "ETF3-enhancement facility"),
+ FEAT_INIT("ectg", S390_FEAT_TYPE_STFL, 31, "Extract-CPU-time facility"),
+ FEAT_INIT("csst", S390_FEAT_TYPE_STFL, 32, "Compare-and-swap-and-store facility"),
+ FEAT_INIT("csst2", S390_FEAT_TYPE_STFL, 33, "Compare-and-swap-and-store facility 2"),
+ FEAT_INIT("ginste", S390_FEAT_TYPE_STFL, 34, "General-instructions-extension facility"),
+ FEAT_INIT("exrl", S390_FEAT_TYPE_STFL, 35, "Execute-extensions facility"),
+ FEAT_INIT("emon", S390_FEAT_TYPE_STFL, 36, "Enhanced-monitor facility"),
+ FEAT_INIT("fpe", S390_FEAT_TYPE_STFL, 37, "Floating-point extension facility"),
+ FEAT_INIT("sprogp", S390_FEAT_TYPE_STFL, 40, "Set-program-parameters facility"),
+ FEAT_INIT("fpseh", S390_FEAT_TYPE_STFL, 41, "Floating-point-support-enhancement facilities"),
+ FEAT_INIT("dfp", S390_FEAT_TYPE_STFL, 42, "DFP (decimal-floating-point) facility"),
+ FEAT_INIT("dfphp", S390_FEAT_TYPE_STFL, 43, "DFP (decimal-floating-point) facility has high performance"),
+ FEAT_INIT("pfpo", S390_FEAT_TYPE_STFL, 44, "PFPO instruction"),
+ FEAT_INIT("stfle45", S390_FEAT_TYPE_STFL, 45, "Various facilities introduced with z196"),
+ FEAT_INIT("cmpsceh", S390_FEAT_TYPE_STFL, 47, "CMPSC-enhancement facility"),
+ FEAT_INIT("dfpzc", S390_FEAT_TYPE_STFL, 48, "Decimal-floating-point zoned-conversion facility"),
+ FEAT_INIT("stfle49", S390_FEAT_TYPE_STFL, 49, "Various facilities introduced with zEC12"),
+ FEAT_INIT("cte", S390_FEAT_TYPE_STFL, 50, "Constrained transactional-execution facility"),
+ FEAT_INIT("ltlbc", S390_FEAT_TYPE_STFL, 51, "Local-TLB-clearing facility"),
+ FEAT_INIT("iacc2", S390_FEAT_TYPE_STFL, 52, "Interlocked-access facility 2"),
+ FEAT_INIT("stfle53", S390_FEAT_TYPE_STFL, 53, "Various facilities introduced with z13"),
+ FEAT_INIT("msa5-base", S390_FEAT_TYPE_STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)"),
+ FEAT_INIT("ri", S390_FEAT_TYPE_STFL, 64, "CPU runtime-instrumentation facility"),
+ FEAT_INIT("te", S390_FEAT_TYPE_STFL, 73, "Transactional-execution facility"),
+ FEAT_INIT("sthyi", S390_FEAT_TYPE_STFL, 74, "Store-hypervisor-information facility"),
+ FEAT_INIT("aefsi", S390_FEAT_TYPE_STFL, 75, "Access-exception-fetch/store-indication facility"),
+ FEAT_INIT("msa3-base", S390_FEAT_TYPE_STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)"),
+ FEAT_INIT("msa4-base", S390_FEAT_TYPE_STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)"),
+ FEAT_INIT("edat2", S390_FEAT_TYPE_STFL, 78, "Enhanced-DAT facility 2"),
+ FEAT_INIT("dfppc", S390_FEAT_TYPE_STFL, 80, "Decimal-floating-point packed-conversion facility"),
+ FEAT_INIT("vx", S390_FEAT_TYPE_STFL, 129, "Vector facility"),
+
+ FEAT_INIT("gsls", S390_FEAT_TYPE_SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility"),
+ FEAT_INIT("esop", S390_FEAT_TYPE_SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility"),
+
+ FEAT_INIT("64bscao", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility"),
+ FEAT_INIT("cmma", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist"),
+ FEAT_INIT("pfmfi", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility"),
+ FEAT_INIT("ibs", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility"),
+
+ FEAT_INIT("sief2", S390_FEAT_TYPE_SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)"),
+ FEAT_INIT("skey", S390_FEAT_TYPE_SCLP_CPU, 5, "SIE: Storage-key facility"),
+ FEAT_INIT("gpereh", S390_FEAT_TYPE_SCLP_CPU, 10, "SIE: Guest-PER enhancement facility"),
+ FEAT_INIT("siif", S390_FEAT_TYPE_SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility"),
+ FEAT_INIT("sigpif", S390_FEAT_TYPE_SCLP_CPU, 12, "SIE: SIGP interpretation facility"),
+ FEAT_INIT("ib", S390_FEAT_TYPE_SCLP_CPU, 42, "SIE: Intervention bypass facility"),
+ FEAT_INIT("cei", S390_FEAT_TYPE_SCLP_CPU, 43, "SIE: Conditional-external-interception facility"),
+
+ FEAT_INIT("dateh2", S390_FEAT_TYPE_MISC, 0, "DAT-enhancement facility 2"),
+ FEAT_INIT("cmm", S390_FEAT_TYPE_MISC, 0, "Collaborative-memory-management facility"),
+
+ FEAT_INIT("plo-cl", S390_FEAT_TYPE_PLO, 0, "PLO Compare and load (32 bit in general registers)"),
+ FEAT_INIT("plo-clg", S390_FEAT_TYPE_PLO, 1, "PLO Compare and load (64 bit in parameter list)"),
+ FEAT_INIT("plo-clgr", S390_FEAT_TYPE_PLO, 2, "PLO Compare and load (32 bit in general registers)"),
+ FEAT_INIT("plo-clx", S390_FEAT_TYPE_PLO, 3, "PLO Compare and load (128 bit in parameter list)"),
+ FEAT_INIT("plo-cs", S390_FEAT_TYPE_PLO, 4, "PLO Compare and swap (32 bit in general registers)"),
+ FEAT_INIT("plo-csg", S390_FEAT_TYPE_PLO, 5, "PLO Compare and swap (64 bit in parameter list)"),
+ FEAT_INIT("plo-csgr", S390_FEAT_TYPE_PLO, 6, "PLO Compare and swap (32 bit in general registers)"),
+ FEAT_INIT("plo-csx", S390_FEAT_TYPE_PLO, 7, "PLO Compare and swap (128 bit in parameter list)"),
+ FEAT_INIT("plo-dcs", S390_FEAT_TYPE_PLO, 8, "PLO Double compare and swap (32 bit in general registers)"),
+ FEAT_INIT("plo-dcsg", S390_FEAT_TYPE_PLO, 9, "PLO Double compare and swap (64 bit in parameter list)"),
+ FEAT_INIT("plo-dcsgr", S390_FEAT_TYPE_PLO, 10, "PLO Double compare and swap (32 bit in general registers)"),
+ FEAT_INIT("plo-dcsx", S390_FEAT_TYPE_PLO, 11, "PLO Double compare and swap (128 bit in parameter list)"),
+ FEAT_INIT("plo-csst", S390_FEAT_TYPE_PLO, 12, "PLO Compare and swap and store (32 bit in general registers)"),
+ FEAT_INIT("plo-csstg", S390_FEAT_TYPE_PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)"),
+ FEAT_INIT("plo-csstgr", S390_FEAT_TYPE_PLO, 14, "PLO Compare and swap and store (32 bit in general registers)"),
+ FEAT_INIT("plo-csstx", S390_FEAT_TYPE_PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)"),
+ FEAT_INIT("plo-csdst", S390_FEAT_TYPE_PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)"),
+ FEAT_INIT("plo-csdstg", S390_FEAT_TYPE_PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)"),
+ FEAT_INIT("plo-csdstgr", S390_FEAT_TYPE_PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)"),
+ FEAT_INIT("plo-csdstx", S390_FEAT_TYPE_PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)"),
+ FEAT_INIT("plo-cstst", S390_FEAT_TYPE_PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)"),
+ FEAT_INIT("plo-cststg", S390_FEAT_TYPE_PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)"),
+ FEAT_INIT("plo-cststgr", S390_FEAT_TYPE_PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)"),
+ FEAT_INIT("plo-cststx", S390_FEAT_TYPE_PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)"),
+
+ FEAT_INIT("ptff-qto", S390_FEAT_TYPE_PTFF, 1, "PTFF Query TOD Offset"),
+ FEAT_INIT("ptff-qsi", S390_FEAT_TYPE_PTFF, 2, "PTFF Query Steering Information"),
+ FEAT_INIT("ptff-qpc", S390_FEAT_TYPE_PTFF, 3, "PTFF Query Physical Clock"),
+ FEAT_INIT("ptff-qui", S390_FEAT_TYPE_PTFF, 4, "PTFF Query UTC Information"),
+ FEAT_INIT("ptff-qtou", S390_FEAT_TYPE_PTFF, 5, "PTFF Query TOD Offset User"),
+ FEAT_INIT("ptff-sto", S390_FEAT_TYPE_PTFF, 65, "PTFF Set TOD Offset"),
+ FEAT_INIT("ptff-stou", S390_FEAT_TYPE_PTFF, 69, "PTFF Set TOD Offset User"),
+
+ FEAT_INIT("kmac-dea", S390_FEAT_TYPE_KMAC, 1, "KMAC DEA"),
+ FEAT_INIT("kmac-tdea-128", S390_FEAT_TYPE_KMAC, 2, "KMAC TDEA-128"),
+ FEAT_INIT("kmac-tdea-192", S390_FEAT_TYPE_KMAC, 3, "KMAC TDEA-192"),
+ FEAT_INIT("kmac-edea", S390_FEAT_TYPE_KMAC, 9, "KMAC Encrypted-DEA"),
+ FEAT_INIT("kmac-etdea-128", S390_FEAT_TYPE_KMAC, 10, "KMAC Encrypted-TDEA-128"),
+ FEAT_INIT("kmac-etdea-192", S390_FEAT_TYPE_KMAC, 11, "KMAC Encrypted-TDEA-192"),
+ FEAT_INIT("kmac-aes-128", S390_FEAT_TYPE_KMAC, 18, "KMAC AES-128"),
+ FEAT_INIT("kmac-aes-192", S390_FEAT_TYPE_KMAC, 19, "KMAC AES-192"),
+ FEAT_INIT("kmac-aes-256", S390_FEAT_TYPE_KMAC, 20, "KMAC AES-256"),
+ FEAT_INIT("kmac-eaes-128", S390_FEAT_TYPE_KMAC, 26, "KMAC Encrypted-AES-128"),
+ FEAT_INIT("kmac-eaes-192", S390_FEAT_TYPE_KMAC, 27, "KMAC Encrypted-AES-192"),
+ FEAT_INIT("kmac-eaes-256", S390_FEAT_TYPE_KMAC, 28, "KMAC Encrypted-AES-256"),
+
+ FEAT_INIT("kmc-dea", S390_FEAT_TYPE_KMC, 1, "KMC DEA"),
+ FEAT_INIT("kmc-tdea-128", S390_FEAT_TYPE_KMC, 2, "KMC TDEA-128"),
+ FEAT_INIT("kmc-tdea-192", S390_FEAT_TYPE_KMC, 3, "KMC TDEA-192"),
+ FEAT_INIT("kmc-edea", S390_FEAT_TYPE_KMC, 9, "KMC Encrypted-DEA"),
+ FEAT_INIT("kmc-etdea-128", S390_FEAT_TYPE_KMC, 10, "KMC Encrypted-TDEA-128"),
+ FEAT_INIT("kmc-etdea-192", S390_FEAT_TYPE_KMC, 11, "KMC Encrypted-TDEA-192"),
+ FEAT_INIT("kmc-aes-128", S390_FEAT_TYPE_KMC, 18, "KMC AES-128"),
+ FEAT_INIT("kmc-aes-192", S390_FEAT_TYPE_KMC, 19, "KMC AES-192"),
+ FEAT_INIT("kmc-aes-256", S390_FEAT_TYPE_KMC, 20, "KMC AES-256"),
+ FEAT_INIT("kmc-eaes-128", S390_FEAT_TYPE_KMC, 26, "KMC Encrypted-AES-128"),
+ FEAT_INIT("kmc-eaes-192", S390_FEAT_TYPE_KMC, 27, "KMC Encrypted-AES-192"),
+ FEAT_INIT("kmc-eaes-256", S390_FEAT_TYPE_KMC, 28, "KMC Encrypted-AES-256"),
+ FEAT_INIT("kmc-prng", S390_FEAT_TYPE_KMC, 67, "KMC PRNG"),
+
+ FEAT_INIT("km-dea", S390_FEAT_TYPE_KM, 1, "KM DEA"),
+ FEAT_INIT("km-tdea-128", S390_FEAT_TYPE_KM, 2, "KM TDEA-128"),
+ FEAT_INIT("km-tdea-192", S390_FEAT_TYPE_KM, 3, "KM TDEA-192"),
+ FEAT_INIT("km-edea", S390_FEAT_TYPE_KM, 9, "KM Encrypted-DEA"),
+ FEAT_INIT("km-etdea-128", S390_FEAT_TYPE_KM, 10, "KM Encrypted-TDEA-128"),
+ FEAT_INIT("km-etdea-192", S390_FEAT_TYPE_KM, 11, "KM Encrypted-TDEA-192"),
+ FEAT_INIT("km-aes-128", S390_FEAT_TYPE_KM, 18, "KM AES-128"),
+ FEAT_INIT("km-aes-192", S390_FEAT_TYPE_KM, 19, "KM AES-192"),
+ FEAT_INIT("km-aes-256", S390_FEAT_TYPE_KM, 20, "KM AES-256"),
+ FEAT_INIT("km-eaes-128", S390_FEAT_TYPE_KM, 26, "KM Encrypted-AES-128"),
+ FEAT_INIT("km-eaes-192", S390_FEAT_TYPE_KM, 27, "KM Encrypted-AES-192"),
+ FEAT_INIT("km-eaes-256", S390_FEAT_TYPE_KM, 28, "KM Encrypted-AES-256"),
+ FEAT_INIT("km-xts-aes-128", S390_FEAT_TYPE_KM, 50, "KM XTS-AES-128"),
+ FEAT_INIT("km-xts-aes-256", S390_FEAT_TYPE_KM, 52, "KM XTS-AES-256"),
+ FEAT_INIT("km-xts-eaes-128", S390_FEAT_TYPE_KM, 58, "KM XTS-Encrypted-AES-128"),
+ FEAT_INIT("km-xts-eaes-256", S390_FEAT_TYPE_KM, 60, "KM XTS-Encrypted-AES-256"),
+
+ FEAT_INIT("kimd-sha-1", S390_FEAT_TYPE_KIMD, 1, "KIMD SHA-1"),
+ FEAT_INIT("kimd-sha-256", S390_FEAT_TYPE_KIMD, 2, "KIMD SHA-256"),
+ FEAT_INIT("kimd-sha-512", S390_FEAT_TYPE_KIMD, 3, "KIMD SHA-512"),
+ FEAT_INIT("kimd-ghash", S390_FEAT_TYPE_KIMD, 65, "KIMD GHASH"),
+ FEAT_INIT("klmd-sha-1", S390_FEAT_TYPE_KLMD, 1, "KLMD SHA-1"),
+ FEAT_INIT("klmd-sha-256", S390_FEAT_TYPE_KLMD, 2, "KLMD SHA-256"),
+ FEAT_INIT("klmd-sha-512", S390_FEAT_TYPE_KLMD, 3, "KLMD SHA-512"),
+
+ FEAT_INIT("pckmo-edea", S390_FEAT_TYPE_PCKMO, 1, "PCKMO Encrypted-DEA-Key"),
+ FEAT_INIT("pckmo-etdea-128", S390_FEAT_TYPE_PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key"),
+ FEAT_INIT("pckmo-etdea-192", S390_FEAT_TYPE_PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key"),
+ FEAT_INIT("pckmo-aes-128", S390_FEAT_TYPE_PCKMO, 18, "PCKMO Encrypted-AES-128-Key"),
+ FEAT_INIT("pckmo-aes-192", S390_FEAT_TYPE_PCKMO, 19, "PCKMO Encrypted-AES-192-Key"),
+ FEAT_INIT("pckmo-aes-256", S390_FEAT_TYPE_PCKMO, 20, "PCKMO Encrypted-AES-256-Key"),
+
+ FEAT_INIT("kmctr-dea", S390_FEAT_TYPE_KMCTR, 1, "KMCTR DEA"),
+ FEAT_INIT("kmctr-tdea-128", S390_FEAT_TYPE_KMCTR, 2, "KMCTR TDEA-128"),
+ FEAT_INIT("kmctr-tdea-192", S390_FEAT_TYPE_KMCTR, 3, "KMCTR TDEA-192"),
+ FEAT_INIT("kmctr-edea", S390_FEAT_TYPE_KMCTR, 9, "KMCTR Encrypted-DEA"),
+ FEAT_INIT("kmctr-etdea-128", S390_FEAT_TYPE_KMCTR, 10, "KMCTR Encrypted-TDEA-128"),
+ FEAT_INIT("kmctr-etdea-192", S390_FEAT_TYPE_KMCTR, 11, "KMCTR Encrypted-TDEA-192"),
+ FEAT_INIT("kmctr-aes-128", S390_FEAT_TYPE_KMCTR, 18, "KMCTR AES-128"),
+ FEAT_INIT("kmctr-aes-192", S390_FEAT_TYPE_KMCTR, 19, "KMCTR AES-192"),
+ FEAT_INIT("kmctr-aes-256", S390_FEAT_TYPE_KMCTR, 20, "KMCTR AES-256"),
+ FEAT_INIT("kmctr-eaes-128", S390_FEAT_TYPE_KMCTR, 26, "KMCTR Encrypted-AES-128"),
+ FEAT_INIT("kmctr-eaes-192", S390_FEAT_TYPE_KMCTR, 27, "KMCTR Encrypted-AES-192"),
+ FEAT_INIT("kmctr-eaes-256", S390_FEAT_TYPE_KMCTR, 28, "KMCTR Encrypted-AES-256"),
+
+ FEAT_INIT("kmf-dea", S390_FEAT_TYPE_KMF, 1, "KMF DEA"),
+ FEAT_INIT("kmf-tdea-128", S390_FEAT_TYPE_KMF, 2, "KMF TDEA-128"),
+ FEAT_INIT("kmf-tdea-192", S390_FEAT_TYPE_KMF, 3, "KMF TDEA-192"),
+ FEAT_INIT("kmf-edea", S390_FEAT_TYPE_KMF, 9, "KMF Encrypted-DEA"),
+ FEAT_INIT("kmf-etdea-128", S390_FEAT_TYPE_KMF, 10, "KMF Encrypted-TDEA-128"),
+ FEAT_INIT("kmf-etdea-192", S390_FEAT_TYPE_KMF, 11, "KMF Encrypted-TDEA-192"),
+ FEAT_INIT("kmf-aes-128", S390_FEAT_TYPE_KMF, 18, "KMF AES-128"),
+ FEAT_INIT("kmf-aes-192", S390_FEAT_TYPE_KMF, 19, "KMF AES-192"),
+ FEAT_INIT("kmf-aes-256", S390_FEAT_TYPE_KMF, 20, "KMF AES-256"),
+ FEAT_INIT("kmf-eaes-128", S390_FEAT_TYPE_KMF, 26, "KMF Encrypted-AES-128"),
+ FEAT_INIT("kmf-eaes-192", S390_FEAT_TYPE_KMF, 27, "KMF Encrypted-AES-192"),
+ FEAT_INIT("kmf-eaes-256", S390_FEAT_TYPE_KMF, 28, "KMF Encrypted-AES-256"),
+
+ FEAT_INIT("kmo-dea", S390_FEAT_TYPE_KMO, 1, "KMO DEA"),
+ FEAT_INIT("kmo-tdea-128", S390_FEAT_TYPE_KMO, 2, "KMO TDEA-128"),
+ FEAT_INIT("kmo-tdea-192", S390_FEAT_TYPE_KMO, 3, "KMO TDEA-192"),
+ FEAT_INIT("kmo-edea", S390_FEAT_TYPE_KMO, 9, "KMO Encrypted-DEA"),
+ FEAT_INIT("kmo-etdea-128", S390_FEAT_TYPE_KMO, 10, "KMO Encrypted-TDEA-128"),
+ FEAT_INIT("kmo-etdea-192", S390_FEAT_TYPE_KMO, 11, "KMO Encrypted-TDEA-192"),
+ FEAT_INIT("kmo-aes-128", S390_FEAT_TYPE_KMO, 18, "KMO AES-128"),
+ FEAT_INIT("kmo-aes-192", S390_FEAT_TYPE_KMO, 19, "KMO AES-192"),
+ FEAT_INIT("kmo-aes-256", S390_FEAT_TYPE_KMO, 20, "KMO AES-256"),
+ FEAT_INIT("kmo-eaes-128", S390_FEAT_TYPE_KMO, 26, "KMO Encrypted-AES-128"),
+ FEAT_INIT("kmo-eaes-192", S390_FEAT_TYPE_KMO, 27, "KMO Encrypted-AES-192"),
+ FEAT_INIT("kmo-eaes-256", S390_FEAT_TYPE_KMO, 28, "KMO Encrypted-AES-256"),
+
+ FEAT_INIT("pcc-cmac-dea", S390_FEAT_TYPE_PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA"),
+ FEAT_INIT("pcc-cmac-tdea-128", S390_FEAT_TYPE_PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128"),
+ FEAT_INIT("pcc-cmac-tdea-192", S390_FEAT_TYPE_PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192"),
+ FEAT_INIT("pcc-cmac-edea", S390_FEAT_TYPE_PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA"),
+ FEAT_INIT("pcc-cmac-etdea-128", S390_FEAT_TYPE_PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128"),
+ FEAT_INIT("pcc-cmac-etdea-192", S390_FEAT_TYPE_PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192"),
+ FEAT_INIT("pcc-cmac-aes-128", S390_FEAT_TYPE_PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128"),
+ FEAT_INIT("pcc-cmac-aes-192", S390_FEAT_TYPE_PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192"),
+ FEAT_INIT("pcc-cmac-eaes-256", S390_FEAT_TYPE_PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256"),
+ FEAT_INIT("pcc-cmac-eaes-128", S390_FEAT_TYPE_PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128"),
+ FEAT_INIT("pcc-cmac-eaes-192", S390_FEAT_TYPE_PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192"),
+ FEAT_INIT("pcc-cmac-eaes-256", S390_FEAT_TYPE_PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256"),
+ FEAT_INIT("pcc-xts-aes-128", S390_FEAT_TYPE_PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128"),
+ FEAT_INIT("pcc-xts-aes-256", S390_FEAT_TYPE_PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256"),
+ FEAT_INIT("pcc-xts-eaes-128", S390_FEAT_TYPE_PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128"),
+ FEAT_INIT("pcc-xts-eaes-256", S390_FEAT_TYPE_PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256"),
+
+ FEAT_INIT("ppno-sha-512-drng", S390_FEAT_TYPE_PPNO, 3, "PPNO SHA-512-DRNG"),
+};
+
+const S390FeatDef *s390_feat_def(S390Feat feat)
+{
+ return &s390_features[feat];
+}
+
+S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit)
+{
+ S390Feat feat;
+
+ for (feat = 0; feat < ARRAY_SIZE(s390_features); feat++) {
+ if (s390_features[feat].type == type &&
+ s390_features[feat].bit == bit) {
+ return feat;
+ }
+ }
+ return S390_FEAT_MAX;
+}
+
+void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap)
+{
+ int i, j;
+
+ for (i = 0; i < (S390_FEAT_MAX / 64 + 1); i++) {
+ if (init[i]) {
+ for (j = 0; j < 64; j++) {
+ if (init[i] & 1ULL << j) {
+ set_bit(i * 64 + j, bitmap);
+ }
+ }
+ }
+ }
+}
+
+void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
+ uint8_t *data)
+{
+ S390Feat feat;
+ int bit_nr;
+
+ if (type == S390_FEAT_TYPE_STFL && test_bit(S390_FEAT_ZARCH, features)) {
+ /* z/Architecture is always active if around */
+ data[0] |= 0x20;
+ }
+
+ feat = find_first_bit(features, S390_FEAT_MAX);
+ while (feat < S390_FEAT_MAX) {
+ if (s390_features[feat].type == type) {
+ bit_nr = s390_features[feat].bit;
+ /* big endian on uint8_t array */
+ data[bit_nr / 8] |= 0x80 >> (bit_nr % 8);
+ }
+ feat = find_next_bit(features, S390_FEAT_MAX, feat + 1);
+ }
+}
+
+void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type,
+ uint8_t *data)
+{
+ int nr_bits, le_bit;
+
+ switch (type) {
+ case S390_FEAT_TYPE_STFL:
+ nr_bits = 2048;
+ break;
+ case S390_FEAT_TYPE_PLO:
+ nr_bits = 256;
+ break;
+ default:
+ /* all cpu subfunctions have 128 bit */
+ nr_bits = 128;
+ };
+
+ le_bit = find_first_bit((unsigned long *) data, nr_bits);
+ while (le_bit < nr_bits) {
+ /* convert the bit number to a big endian bit nr */
+ S390Feat feat = s390_feat_by_type_and_bit(type, BE_BIT_NR(le_bit));
+ /* ignore unknown bits */
+ if (feat < S390_FEAT_MAX) {
+ set_bit(feat, features);
+ }
+ le_bit = find_next_bit((unsigned long *) data, nr_bits, le_bit + 1);
+ }
+}
+
+void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque,
+ void (*fn)(const char *name, void *opaque))
+{
+ S390FeatBitmap bitmap, tmp;
+ S390FeatGroup group;
+ S390Feat feat;
+
+ bitmap_copy(bitmap, features, S390_FEAT_MAX);
+
+ /* process whole groups first */
+ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+
+ bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX);
+ if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) {
+ bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX);
+ fn(def->name, opaque);
+ }
+ }
+
+ /* report leftovers as separate features */
+ feat = find_first_bit(bitmap, S390_FEAT_MAX);
+ while (feat < S390_FEAT_MAX) {
+ fn(s390_feat_def(feat)->name, opaque);
+ feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1);
+ };
+}
+
+#define FEAT_GROUP_INIT(_name, _group, _desc) \
+ { \
+ .name = _name, \
+ .desc = _desc, \
+ .init = { S390_FEAT_GROUP_LIST_ ## _group }, \
+ }
+
+/* indexed by feature group number for easy lookup */
+static S390FeatGroupDef s390_feature_groups[] = {
+ FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"),
+ FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"),
+ FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"),
+ FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"),
+ FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"),
+ FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"),
+ FEAT_GROUP_INIT("msa3", MSA_EXT_3, "Message-security-assist-extension 3 facility"),
+ FEAT_GROUP_INIT("msa4", MSA_EXT_4, "Message-security-assist-extension 4 facility"),
+ FEAT_GROUP_INIT("msa5", MSA_EXT_5, "Message-security-assist-extension 5 facility"),
+};
+
+const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group)
+{
+ return &s390_feature_groups[group];
+}
+
+static void init_groups(void)
+{
+ int i;
+
+ /* init all bitmaps from gnerated data initially */
+ for (i = 0; i < ARRAY_SIZE(s390_feature_groups); i++) {
+ s390_init_feat_bitmap(s390_feature_groups[i].init,
+ s390_feature_groups[i].feat);
+ }
+}
+
+type_init(init_groups)
diff --git a/target/s390x/cpu_features.h b/target/s390x/cpu_features.h
new file mode 100644
index 0000000000..d669121786
--- /dev/null
+++ b/target/s390x/cpu_features.h
@@ -0,0 +1,93 @@
+/*
+ * CPU features/facilities helper structs and utility functions for s390
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef TARGET_S390X_CPU_FEATURES_H
+#define TARGET_S390X_CPU_FEATURES_H
+
+#include "qemu/bitmap.h"
+#include "cpu_features_def.h"
+
+/* CPU features are announced via different ways */
+typedef enum {
+ S390_FEAT_TYPE_STFL,
+ S390_FEAT_TYPE_SCLP_CONF_CHAR,
+ S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT,
+ S390_FEAT_TYPE_SCLP_CPU,
+ S390_FEAT_TYPE_MISC,
+ S390_FEAT_TYPE_PLO,
+ S390_FEAT_TYPE_PTFF,
+ S390_FEAT_TYPE_KMAC,
+ S390_FEAT_TYPE_KMC,
+ S390_FEAT_TYPE_KM,
+ S390_FEAT_TYPE_KIMD,
+ S390_FEAT_TYPE_KLMD,
+ S390_FEAT_TYPE_PCKMO,
+ S390_FEAT_TYPE_KMCTR,
+ S390_FEAT_TYPE_KMF,
+ S390_FEAT_TYPE_KMO,
+ S390_FEAT_TYPE_PCC,
+ S390_FEAT_TYPE_PPNO,
+} S390FeatType;
+
+/* Definition of a CPU feature */
+typedef struct {
+ const char *name; /* name exposed to the user */
+ const char *desc; /* description exposed to the user */
+ S390FeatType type; /* feature type (way of indication)*/
+ int bit; /* bit within the feature type area (fixed) */
+} S390FeatDef;
+
+/* use ordinary bitmap operations to work with features */
+typedef unsigned long S390FeatBitmap[BITS_TO_LONGS(S390_FEAT_MAX)];
+
+/* 64bit based bitmap used to init S390FeatBitmap from generated data */
+typedef uint64_t S390FeatInit[S390_FEAT_MAX / 64 + 1];
+
+const S390FeatDef *s390_feat_def(S390Feat feat);
+S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit);
+void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap);
+void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type,
+ uint8_t *data);
+void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type,
+ uint8_t *data);
+void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque,
+ void (*fn)(const char *name, void *opaque));
+
+/* static groups that will never change */
+typedef enum {
+ S390_FEAT_GROUP_PLO,
+ S390_FEAT_GROUP_TOD_CLOCK_STEERING,
+ S390_FEAT_GROUP_GEN13_PTFF_ENH,
+ S390_FEAT_GROUP_MSA,
+ S390_FEAT_GROUP_MSA_EXT_1,
+ S390_FEAT_GROUP_MSA_EXT_2,
+ S390_FEAT_GROUP_MSA_EXT_3,
+ S390_FEAT_GROUP_MSA_EXT_4,
+ S390_FEAT_GROUP_MSA_EXT_5,
+ S390_FEAT_GROUP_MAX,
+} S390FeatGroup;
+
+/* Definition of a CPU feature group */
+typedef struct {
+ const char *name; /* name exposed to the user */
+ const char *desc; /* description exposed to the user */
+ S390FeatBitmap feat; /* features contained in the group */
+ S390FeatInit init; /* used to init feat from generated data */
+} S390FeatGroupDef;
+
+const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group);
+
+#define BE_BIT_NR(BIT) (BIT ^ (BITS_PER_LONG - 1))
+#define BE_BIT(BIT) (1ULL < BE_BIT_NR(BIT))
+
+#endif /* TARGET_S390X_CPU_FEATURES_H */
diff --git a/target/s390x/cpu_features_def.h b/target/s390x/cpu_features_def.h
new file mode 100644
index 0000000000..aa5ab8d371
--- /dev/null
+++ b/target/s390x/cpu_features_def.h
@@ -0,0 +1,231 @@
+/*
+ * CPU features/facilities for s390
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef TARGET_S390X_CPU_FEATURES_DEF_H
+#define TARGET_S390X_CPU_FEATURES_DEF_H
+
+typedef enum {
+ S390_FEAT_ESAN3 = 0,
+ S390_FEAT_ZARCH,
+ S390_FEAT_DAT_ENH,
+ S390_FEAT_IDTE_SEGMENT,
+ S390_FEAT_IDTE_REGION,
+ S390_FEAT_ASN_LX_REUSE,
+ S390_FEAT_STFLE,
+ S390_FEAT_EDAT,
+ S390_FEAT_SENSE_RUNNING_STATUS,
+ S390_FEAT_CONDITIONAL_SSKE,
+ S390_FEAT_CONFIGURATION_TOPOLOGY,
+ S390_FEAT_IPTE_RANGE,
+ S390_FEAT_NONQ_KEY_SETTING,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_MSA,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_HFP_MADDSUB,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+ S390_FEAT_HFP_UNNORMALIZED_EXT,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_PARSING_ENH,
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_TOD_CLOCK_STEERING,
+ S390_FEAT_ETF3_ENH,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_ENHANCED_MONITOR,
+ S390_FEAT_FLOATING_POINT_EXT,
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_DFP,
+ S390_FEAT_DFP_FAST,
+ S390_FEAT_PFPO,
+ S390_FEAT_STFLE_45,
+ S390_FEAT_CMPSC_ENH,
+ S390_FEAT_DFP_ZONED_CONVERSION,
+ S390_FEAT_STFLE_49,
+ S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE,
+ S390_FEAT_LOCAL_TLB_CLEARING,
+ S390_FEAT_INTERLOCKED_ACCESS_2,
+ S390_FEAT_STFLE_53,
+ S390_FEAT_MSA_EXT_5,
+ S390_FEAT_RUNTIME_INSTRUMENTATION,
+ S390_FEAT_TRANSACTIONAL_EXE,
+ S390_FEAT_STORE_HYPERVISOR_INFO,
+ S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
+ S390_FEAT_MSA_EXT_3,
+ S390_FEAT_MSA_EXT_4,
+ S390_FEAT_EDAT_2,
+ S390_FEAT_DFP_PACKED_CONVERSION,
+ S390_FEAT_VECTOR,
+ S390_FEAT_SIE_GSLS,
+ S390_FEAT_ESOP,
+ S390_FEAT_SIE_64BSCAO,
+ S390_FEAT_SIE_CMMA,
+ S390_FEAT_SIE_PFMFI,
+ S390_FEAT_SIE_IBS,
+ S390_FEAT_SIE_F2,
+ S390_FEAT_SIE_SKEY,
+ S390_FEAT_SIE_GPERE,
+ S390_FEAT_SIE_SIIF,
+ S390_FEAT_SIE_SIGPIF,
+ S390_FEAT_SIE_IB,
+ S390_FEAT_SIE_CEI,
+ S390_FEAT_DAT_ENH_2,
+ S390_FEAT_CMM,
+ S390_FEAT_PLO_CL,
+ S390_FEAT_PLO_CLG,
+ S390_FEAT_PLO_CLGR,
+ S390_FEAT_PLO_CLX,
+ S390_FEAT_PLO_CS,
+ S390_FEAT_PLO_CSG,
+ S390_FEAT_PLO_CSGR,
+ S390_FEAT_PLO_CSX,
+ S390_FEAT_PLO_DCS,
+ S390_FEAT_PLO_DCSG,
+ S390_FEAT_PLO_DCSGR,
+ S390_FEAT_PLO_DCSX,
+ S390_FEAT_PLO_CSST,
+ S390_FEAT_PLO_CSSTG,
+ S390_FEAT_PLO_CSSTGR,
+ S390_FEAT_PLO_CSSTX,
+ S390_FEAT_PLO_CSDST,
+ S390_FEAT_PLO_CSDSTG,
+ S390_FEAT_PLO_CSDSTGR,
+ S390_FEAT_PLO_CSDSTX,
+ S390_FEAT_PLO_CSTST,
+ S390_FEAT_PLO_CSTSTG,
+ S390_FEAT_PLO_CSTSTGR,
+ S390_FEAT_PLO_CSTSTX,
+ S390_FEAT_PTFF_QTO,
+ S390_FEAT_PTFF_QSI,
+ S390_FEAT_PTFF_QPT,
+ S390_FEAT_PTFF_QUI,
+ S390_FEAT_PTFF_QTOU,
+ S390_FEAT_PTFF_STO,
+ S390_FEAT_PTFF_STOU,
+ S390_FEAT_KMAC_DEA,
+ S390_FEAT_KMAC_TDEA_128,
+ S390_FEAT_KMAC_TDEA_192,
+ S390_FEAT_KMAC_EDEA,
+ S390_FEAT_KMAC_ETDEA_128,
+ S390_FEAT_KMAC_ETDEA_192,
+ S390_FEAT_KMAC_AES_128,
+ S390_FEAT_KMAC_AES_192,
+ S390_FEAT_KMAC_AES_256,
+ S390_FEAT_KMAC_EAES_128,
+ S390_FEAT_KMAC_EAES_192,
+ S390_FEAT_KMAC_EAES_256,
+ S390_FEAT_KMC_DEA,
+ S390_FEAT_KMC_TDEA_128,
+ S390_FEAT_KMC_TDEA_192,
+ S390_FEAT_KMC_EDEA,
+ S390_FEAT_KMC_ETDEA_128,
+ S390_FEAT_KMC_ETDEA_192,
+ S390_FEAT_KMC_AES_128,
+ S390_FEAT_KMC_AES_192,
+ S390_FEAT_KMC_AES_256,
+ S390_FEAT_KMC_EAES_128,
+ S390_FEAT_KMC_EAES_192,
+ S390_FEAT_KMC_EAES_256,
+ S390_FEAT_KMC_PRNG,
+ S390_FEAT_KM_DEA,
+ S390_FEAT_KM_TDEA_128,
+ S390_FEAT_KM_TDEA_192,
+ S390_FEAT_KM_EDEA,
+ S390_FEAT_KM_ETDEA_128,
+ S390_FEAT_KM_ETDEA_192,
+ S390_FEAT_KM_AES_128,
+ S390_FEAT_KM_AES_192,
+ S390_FEAT_KM_AES_256,
+ S390_FEAT_KM_EAES_128,
+ S390_FEAT_KM_EAES_192,
+ S390_FEAT_KM_EAES_256,
+ S390_FEAT_KM_XTS_AES_128,
+ S390_FEAT_KM_XTS_AES_256,
+ S390_FEAT_KM_XTS_EAES_128,
+ S390_FEAT_KM_XTS_EAES_256,
+ S390_FEAT_KIMD_SHA_1,
+ S390_FEAT_KIMD_SHA_256,
+ S390_FEAT_KIMD_SHA_512,
+ S390_FEAT_KIMD_GHASH,
+ S390_FEAT_KLMD_SHA_1,
+ S390_FEAT_KLMD_SHA_256,
+ S390_FEAT_KLMD_SHA_512,
+ S390_FEAT_PCKMO_EDEA,
+ S390_FEAT_PCKMO_ETDEA_128,
+ S390_FEAT_PCKMO_ETDEA_256,
+ S390_FEAT_PCKMO_AES_128,
+ S390_FEAT_PCKMO_AES_192,
+ S390_FEAT_PCKMO_AES_256,
+ S390_FEAT_KMCTR_DEA,
+ S390_FEAT_KMCTR_TDEA_128,
+ S390_FEAT_KMCTR_TDEA_192,
+ S390_FEAT_KMCTR_EDEA,
+ S390_FEAT_KMCTR_ETDEA_128,
+ S390_FEAT_KMCTR_ETDEA_192,
+ S390_FEAT_KMCTR_AES_128,
+ S390_FEAT_KMCTR_AES_192,
+ S390_FEAT_KMCTR_AES_256,
+ S390_FEAT_KMCTR_EAES_128,
+ S390_FEAT_KMCTR_EAES_192,
+ S390_FEAT_KMCTR_EAES_256,
+ S390_FEAT_KMF_DEA,
+ S390_FEAT_KMF_TDEA_128,
+ S390_FEAT_KMF_TDEA_192,
+ S390_FEAT_KMF_EDEA,
+ S390_FEAT_KMF_ETDEA_128,
+ S390_FEAT_KMF_ETDEA_192,
+ S390_FEAT_KMF_AES_128,
+ S390_FEAT_KMF_AES_192,
+ S390_FEAT_KMF_AES_256,
+ S390_FEAT_KMF_EAES_128,
+ S390_FEAT_KMF_EAES_192,
+ S390_FEAT_KMF_EAES_256,
+ S390_FEAT_KMO_DEA,
+ S390_FEAT_KMO_TDEA_128,
+ S390_FEAT_KMO_TDEA_192,
+ S390_FEAT_KMO_EDEA,
+ S390_FEAT_KMO_ETDEA_128,
+ S390_FEAT_KMO_ETDEA_192,
+ S390_FEAT_KMO_AES_128,
+ S390_FEAT_KMO_AES_192,
+ S390_FEAT_KMO_AES_256,
+ S390_FEAT_KMO_EAES_128,
+ S390_FEAT_KMO_EAES_192,
+ S390_FEAT_KMO_EAES_256,
+ S390_FEAT_PCC_CMAC_DEA,
+ S390_FEAT_PCC_CMAC_TDEA_128,
+ S390_FEAT_PCC_CMAC_TDEA_192,
+ S390_FEAT_PCC_CMAC_ETDEA_128,
+ S390_FEAT_PCC_CMAC_ETDEA_192,
+ S390_FEAT_PCC_CMAC_TDEA,
+ S390_FEAT_PCC_CMAC_AES_128,
+ S390_FEAT_PCC_CMAC_AES_192,
+ S390_FEAT_PCC_CMAC_AES_256,
+ S390_FEAT_PCC_CMAC_EAES_128,
+ S390_FEAT_PCC_CMAC_EAES_192,
+ S390_FEAT_PCC_CMAC_EAES_256,
+ S390_FEAT_PCC_XTS_AES_128,
+ S390_FEAT_PCC_XTS_AES_256,
+ S390_FEAT_PCC_XTS_EAES_128,
+ S390_FEAT_PCC_XTS_EAES_256,
+ S390_FEAT_PPNO_SHA_512_DRNG,
+ S390_FEAT_MAX,
+} S390Feat;
+
+#endif /* TARGET_S390X_CPU_FEATURES_DEF_H */
diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c
new file mode 100644
index 0000000000..c1e729df5e
--- /dev/null
+++ b/target/s390x/cpu_models.c
@@ -0,0 +1,1100 @@
+/*
+ * CPU models for s390x
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "gen-features.h"
+#include "qapi/error.h"
+#include "qapi/visitor.h"
+#include "qemu/error-report.h"
+#include "qapi/qmp/qerror.h"
+#include "qapi/qobject-input-visitor.h"
+#include "qapi/qmp/qbool.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/arch_init.h"
+#endif
+
+#define CPUDEF_INIT(_type, _gen, _ec_ga, _mha_pow, _hmfai, _name, _desc) \
+ { \
+ .name = _name, \
+ .type = _type, \
+ .gen = _gen, \
+ .ec_ga = _ec_ga, \
+ .mha_pow = _mha_pow, \
+ .hmfai = _hmfai, \
+ .desc = _desc, \
+ .base_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _BASE }, \
+ .default_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _DEFAULT }, \
+ .full_init = { S390_FEAT_LIST_GEN ## _gen ## _GA ## _ec_ga ## _FULL }, \
+ }
+
+/*
+ * CPU definiton list in order of release. For now, base features of a
+ * following release are always a subset of base features of the previous
+ * release. Same is correct for the other feature sets.
+ * A BC release always follows the corresponding EC release.
+ */
+static S390CPUDef s390_cpu_defs[] = {
+ CPUDEF_INIT(0x2064, 7, 1, 38, 0x00000000U, "z900", "IBM zSeries 900 GA1"),
+ CPUDEF_INIT(0x2064, 7, 2, 38, 0x00000000U, "z900.2", "IBM zSeries 900 GA2"),
+ CPUDEF_INIT(0x2064, 7, 3, 38, 0x00000000U, "z900.3", "IBM zSeries 900 GA3"),
+ CPUDEF_INIT(0x2066, 7, 3, 38, 0x00000000U, "z800", "IBM zSeries 800 GA1"),
+ CPUDEF_INIT(0x2084, 8, 1, 38, 0x00000000U, "z990", "IBM zSeries 990 GA1"),
+ CPUDEF_INIT(0x2084, 8, 2, 38, 0x00000000U, "z990.2", "IBM zSeries 990 GA2"),
+ CPUDEF_INIT(0x2084, 8, 3, 38, 0x00000000U, "z990.3", "IBM zSeries 990 GA3"),
+ CPUDEF_INIT(0x2086, 8, 3, 38, 0x00000000U, "z890", "IBM zSeries 880 GA1"),
+ CPUDEF_INIT(0x2084, 8, 4, 38, 0x00000000U, "z990.4", "IBM zSeries 990 GA4"),
+ CPUDEF_INIT(0x2086, 8, 4, 38, 0x00000000U, "z890.2", "IBM zSeries 880 GA2"),
+ CPUDEF_INIT(0x2084, 8, 5, 38, 0x00000000U, "z990.5", "IBM zSeries 990 GA5"),
+ CPUDEF_INIT(0x2086, 8, 5, 38, 0x00000000U, "z890.3", "IBM zSeries 880 GA3"),
+ CPUDEF_INIT(0x2094, 9, 1, 40, 0x00000000U, "z9EC", "IBM System z9 EC GA1"),
+ CPUDEF_INIT(0x2094, 9, 2, 40, 0x00000000U, "z9EC.2", "IBM System z9 EC GA2"),
+ CPUDEF_INIT(0x2096, 9, 2, 40, 0x00000000U, "z9BC", "IBM System z9 BC GA1"),
+ CPUDEF_INIT(0x2094, 9, 3, 40, 0x00000000U, "z9EC.3", "IBM System z9 EC GA3"),
+ CPUDEF_INIT(0x2096, 9, 3, 40, 0x00000000U, "z9BC.2", "IBM System z9 BC GA2"),
+ CPUDEF_INIT(0x2097, 10, 1, 43, 0x00000000U, "z10EC", "IBM System z10 EC GA1"),
+ CPUDEF_INIT(0x2097, 10, 2, 43, 0x00000000U, "z10EC.2", "IBM System z10 EC GA2"),
+ CPUDEF_INIT(0x2098, 10, 2, 43, 0x00000000U, "z10BC", "IBM System z10 BC GA1"),
+ CPUDEF_INIT(0x2097, 10, 3, 43, 0x00000000U, "z10EC.3", "IBM System z10 EC GA3"),
+ CPUDEF_INIT(0x2098, 10, 3, 43, 0x00000000U, "z10BC.2", "IBM System z10 BC GA2"),
+ CPUDEF_INIT(0x2817, 11, 1, 44, 0x08000000U, "z196", "IBM zEnterprise 196 GA1"),
+ CPUDEF_INIT(0x2817, 11, 2, 44, 0x08000000U, "z196.2", "IBM zEnterprise 196 GA2"),
+ CPUDEF_INIT(0x2818, 11, 2, 44, 0x08000000U, "z114", "IBM zEnterprise 114 GA1"),
+ CPUDEF_INIT(0x2827, 12, 1, 44, 0x08000000U, "zEC12", "IBM zEnterprise EC12 GA1"),
+ CPUDEF_INIT(0x2827, 12, 2, 44, 0x08000000U, "zEC12.2", "IBM zEnterprise EC12 GA2"),
+ CPUDEF_INIT(0x2828, 12, 2, 44, 0x08000000U, "zBC12", "IBM zEnterprise BC12 GA1"),
+ CPUDEF_INIT(0x2964, 13, 1, 47, 0x08000000U, "z13", "IBM z13 GA1"),
+ CPUDEF_INIT(0x2964, 13, 2, 47, 0x08000000U, "z13.2", "IBM z13 GA2"),
+ CPUDEF_INIT(0x2965, 13, 2, 47, 0x08000000U, "z13s", "IBM z13s GA1"),
+};
+
+uint32_t s390_get_hmfai(void)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return 0;
+ }
+ return cpu->model->def->hmfai;
+}
+
+uint8_t s390_get_mha_pow(void)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return 0;
+ }
+ return cpu->model->def->mha_pow;
+}
+
+uint32_t s390_get_ibc_val(void)
+{
+ uint16_t unblocked_ibc, lowest_ibc;
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return 0;
+ }
+ unblocked_ibc = s390_ibc_from_cpu_model(cpu->model);
+ lowest_ibc = cpu->model->lowest_ibc;
+ /* the lowest_ibc always has to be <= unblocked_ibc */
+ if (!lowest_ibc || lowest_ibc > unblocked_ibc) {
+ return 0;
+ }
+ return ((uint32_t) lowest_ibc << 16) | unblocked_ibc;
+}
+
+void s390_get_feat_block(S390FeatType type, uint8_t *data)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+ return;
+ }
+ s390_fill_feat_block(cpu->model->features, type, data);
+}
+
+bool s390_has_feat(S390Feat feat)
+{
+ static S390CPU *cpu;
+
+ if (!cpu) {
+ cpu = S390_CPU(qemu_get_cpu(0));
+ }
+
+ if (!cpu || !cpu->model) {
+#ifdef CONFIG_KVM
+ if (kvm_enabled()) {
+ if (feat == S390_FEAT_VECTOR) {
+ return kvm_check_extension(kvm_state,
+ KVM_CAP_S390_VECTOR_REGISTERS);
+ }
+ if (feat == S390_FEAT_RUNTIME_INSTRUMENTATION) {
+ return kvm_s390_get_ri();
+ }
+ if (feat == S390_FEAT_MSA_EXT_3) {
+ return true;
+ }
+ }
+#endif
+ return 0;
+ }
+ return test_bit(feat, cpu->model->features);
+}
+
+uint8_t s390_get_gen_for_cpu_type(uint16_t type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ if (s390_cpu_defs[i].type == type) {
+ return s390_cpu_defs[i].gen;
+ }
+ }
+ return 0;
+}
+
+const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ S390FeatBitmap features)
+{
+ const S390CPUDef *last_compatible = NULL;
+ int i;
+
+ if (!gen) {
+ ec_ga = 0;
+ }
+ if (!gen && type) {
+ gen = s390_get_gen_for_cpu_type(type);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ const S390CPUDef *def = &s390_cpu_defs[i];
+ S390FeatBitmap missing;
+
+ /* don't even try newer generations if we know the generation */
+ if (gen) {
+ if (def->gen > gen) {
+ break;
+ } else if (def->gen == gen && ec_ga && def->ec_ga > ec_ga) {
+ break;
+ }
+ }
+
+ if (features) {
+ /* see if the model satisfies the minimum features */
+ bitmap_andnot(missing, def->base_feat, features, S390_FEAT_MAX);
+ if (!bitmap_empty(missing, S390_FEAT_MAX)) {
+ break;
+ }
+ }
+
+ /* stop the search if we found the exact model */
+ if (def->type == type && def->ec_ga == ec_ga) {
+ return def;
+ }
+ last_compatible = def;
+ }
+ return last_compatible;
+}
+
+struct S390PrintCpuListInfo {
+ FILE *f;
+ fprintf_function print;
+};
+
+static void print_cpu_model_list(ObjectClass *klass, void *opaque)
+{
+ struct S390PrintCpuListInfo *info = opaque;
+ S390CPUClass *scc = S390_CPU_CLASS(klass);
+ char *name = g_strdup(object_class_get_name(klass));
+ const char *details = "";
+
+ if (scc->is_static) {
+ details = "(static, migration-safe)";
+ } else if (scc->is_migration_safe) {
+ details = "(migration-safe)";
+ }
+
+ /* strip off the -s390-cpu */
+ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
+ (*info->print)(info->f, "s390 %-15s %-35s %s\n", name, scc->desc,
+ details);
+ g_free(name);
+}
+
+void s390_cpu_list(FILE *f, fprintf_function print)
+{
+ struct S390PrintCpuListInfo info = {
+ .f = f,
+ .print = print,
+ };
+ S390FeatGroup group;
+ S390Feat feat;
+
+ object_class_foreach(print_cpu_model_list, TYPE_S390_CPU, false, &info);
+
+ (*print)(f, "\nRecognized feature flags:\n");
+ for (feat = 0; feat < S390_FEAT_MAX; feat++) {
+ const S390FeatDef *def = s390_feat_def(feat);
+
+ (*print)(f, "%-20s %-50s\n", def->name, def->desc);
+ }
+
+ (*print)(f, "\nRecognized feature groups:\n");
+ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+
+ (*print)(f, "%-20s %-50s\n", def->name, def->desc);
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+static void create_cpu_model_list(ObjectClass *klass, void *opaque)
+{
+ CpuDefinitionInfoList **cpu_list = opaque;
+ CpuDefinitionInfoList *entry;
+ CpuDefinitionInfo *info;
+ char *name = g_strdup(object_class_get_name(klass));
+ S390CPUClass *scc = S390_CPU_CLASS(klass);
+
+ /* strip off the -s390-cpu */
+ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0;
+ info = g_malloc0(sizeof(*info));
+ info->name = name;
+ info->has_migration_safe = true;
+ info->migration_safe = scc->is_migration_safe;
+ info->q_static = scc->is_static;
+
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = info;
+ entry->next = *cpu_list;
+ *cpu_list = entry;
+}
+
+CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
+{
+ CpuDefinitionInfoList *list = NULL;
+
+ object_class_foreach(create_cpu_model_list, TYPE_S390_CPU, false, &list);
+
+ return list;
+}
+
+static void cpu_model_from_info(S390CPUModel *model, const CpuModelInfo *info,
+ Error **errp)
+{
+ const QDict *qdict = NULL;
+ const QDictEntry *e;
+ Visitor *visitor;
+ ObjectClass *oc;
+ S390CPU *cpu;
+ Object *obj;
+
+ if (info->props) {
+ qdict = qobject_to_qdict(info->props);
+ if (!qdict) {
+ error_setg(errp, QERR_INVALID_PARAMETER_TYPE, "props", "dict");
+ return;
+ }
+ }
+
+ oc = cpu_class_by_name(TYPE_S390_CPU, info->name);
+ if (!oc) {
+ error_setg(errp, "The CPU definition \'%s\' is unknown.", info->name);
+ return;
+ }
+ if (S390_CPU_CLASS(oc)->kvm_required && !kvm_enabled()) {
+ error_setg(errp, "The CPU definition '%s' requires KVM", info->name);
+ return;
+ }
+ obj = object_new(object_class_get_name(oc));
+ cpu = S390_CPU(obj);
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "it cannot be used.");
+ object_unref(obj);
+ return;
+ }
+
+ if (qdict) {
+ visitor = qobject_input_visitor_new(info->props, true);
+ visit_start_struct(visitor, NULL, NULL, 0, errp);
+ if (*errp) {
+ object_unref(obj);
+ return;
+ }
+ for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) {
+ object_property_set(obj, visitor, e->key, errp);
+ if (*errp) {
+ break;
+ }
+ }
+ if (!*errp) {
+ visit_check_struct(visitor, errp);
+ }
+ visit_end_struct(visitor, NULL);
+ visit_free(visitor);
+ if (*errp) {
+ object_unref(obj);
+ return;
+ }
+ }
+
+ /* copy the model and throw the cpu away */
+ memcpy(model, cpu->model, sizeof(*model));
+ object_unref(obj);
+}
+
+static void qdict_add_disabled_feat(const char *name, void *opaque)
+{
+ qdict_put((QDict *) opaque, name, qbool_from_bool(false));
+}
+
+static void qdict_add_enabled_feat(const char *name, void *opaque)
+{
+ qdict_put((QDict *) opaque, name, qbool_from_bool(true));
+}
+
+/* convert S390CPUDef into a static CpuModelInfo */
+static void cpu_info_from_model(CpuModelInfo *info, const S390CPUModel *model,
+ bool delta_changes)
+{
+ QDict *qdict = qdict_new();
+ S390FeatBitmap bitmap;
+
+ /* always fallback to the static base model */
+ info->name = g_strdup_printf("%s-base", model->def->name);
+
+ if (delta_changes) {
+ /* features deleted from the base feature set */
+ bitmap_andnot(bitmap, model->def->base_feat, model->features,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
+ }
+
+ /* features added to the base feature set */
+ bitmap_andnot(bitmap, model->features, model->def->base_feat,
+ S390_FEAT_MAX);
+ if (!bitmap_empty(bitmap, S390_FEAT_MAX)) {
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_enabled_feat);
+ }
+ } else {
+ /* expand all features */
+ s390_feat_bitmap_to_ascii(model->features, qdict,
+ qdict_add_enabled_feat);
+ bitmap_complement(bitmap, model->features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(bitmap, qdict, qdict_add_disabled_feat);
+ }
+
+ if (!qdict_size(qdict)) {
+ QDECREF(qdict);
+ } else {
+ info->props = QOBJECT(qdict);
+ info->has_props = true;
+ }
+}
+
+CpuModelExpansionInfo *arch_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ CpuModelExpansionInfo *expansion_info = NULL;
+ S390CPUModel s390_model;
+ bool delta_changes = false;
+
+ /* convert it to our internal representation */
+ cpu_model_from_info(&s390_model, model, errp);
+ if (*errp) {
+ return NULL;
+ }
+
+ if (type == CPU_MODEL_EXPANSION_TYPE_STATIC) {
+ delta_changes = true;
+ } else if (type != CPU_MODEL_EXPANSION_TYPE_FULL) {
+ error_setg(errp, "The requested expansion type is not supported.");
+ return NULL;
+ }
+
+ /* convert it back to a static representation */
+ expansion_info = g_malloc0(sizeof(*expansion_info));
+ expansion_info->model = g_malloc0(sizeof(*expansion_info->model));
+ cpu_info_from_model(expansion_info->model, &s390_model, delta_changes);
+ return expansion_info;
+}
+
+static void list_add_feat(const char *name, void *opaque)
+{
+ strList **last = (strList **) opaque;
+ strList *entry;
+
+ entry = g_malloc0(sizeof(*entry));
+ entry->value = g_strdup(name);
+ entry->next = *last;
+ *last = entry;
+}
+
+CpuModelCompareInfo *arch_query_cpu_model_comparison(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ CpuModelCompareResult feat_result, gen_result;
+ CpuModelCompareInfo *compare_info;
+ S390FeatBitmap missing, added;
+ S390CPUModel modela, modelb;
+
+ /* convert both models to our internal representation */
+ cpu_model_from_info(&modela, infoa, errp);
+ if (*errp) {
+ return NULL;
+ }
+ cpu_model_from_info(&modelb, infob, errp);
+ if (*errp) {
+ return NULL;
+ }
+ compare_info = g_malloc0(sizeof(*compare_info));
+
+ /* check the cpu generation and ga level */
+ if (modela.def->gen == modelb.def->gen) {
+ if (modela.def->ec_ga == modelb.def->ec_ga) {
+ /* ec and corresponding bc are identical */
+ gen_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
+ } else if (modela.def->ec_ga < modelb.def->ec_ga) {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ }
+ } else if (modela.def->gen < modelb.def->gen) {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else {
+ gen_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ }
+ if (gen_result != CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ /* both models cannot be made identical */
+ list_add_feat("type", &compare_info->responsible_properties);
+ }
+
+ /* check the feature set */
+ if (bitmap_equal(modela.features, modelb.features, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_IDENTICAL;
+ } else {
+ bitmap_andnot(missing, modela.features, modelb.features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(missing,
+ &compare_info->responsible_properties,
+ list_add_feat);
+ bitmap_andnot(added, modelb.features, modela.features, S390_FEAT_MAX);
+ s390_feat_bitmap_to_ascii(added, &compare_info->responsible_properties,
+ list_add_feat);
+ if (bitmap_empty(missing, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_SUBSET;
+ } else if (bitmap_empty(added, S390_FEAT_MAX)) {
+ feat_result = CPU_MODEL_COMPARE_RESULT_SUPERSET;
+ } else {
+ feat_result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
+ }
+ }
+
+ /* combine the results */
+ if (gen_result == feat_result) {
+ compare_info->result = gen_result;
+ } else if (feat_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ compare_info->result = gen_result;
+ } else if (gen_result == CPU_MODEL_COMPARE_RESULT_IDENTICAL) {
+ compare_info->result = feat_result;
+ } else {
+ compare_info->result = CPU_MODEL_COMPARE_RESULT_INCOMPATIBLE;
+ }
+ return compare_info;
+}
+
+CpuModelBaselineInfo *arch_query_cpu_model_baseline(CpuModelInfo *infoa,
+ CpuModelInfo *infob,
+ Error **errp)
+{
+ CpuModelBaselineInfo *baseline_info;
+ S390CPUModel modela, modelb, model;
+ uint16_t cpu_type;
+ uint8_t max_gen_ga;
+ uint8_t max_gen;
+
+ /* convert both models to our internal representation */
+ cpu_model_from_info(&modela, infoa, errp);
+ if (*errp) {
+ return NULL;
+ }
+
+ cpu_model_from_info(&modelb, infob, errp);
+ if (*errp) {
+ return NULL;
+ }
+
+ /* features both models support */
+ bitmap_and(model.features, modela.features, modelb.features, S390_FEAT_MAX);
+
+ /* detect the maximum model not regarding features */
+ if (modela.def->gen == modelb.def->gen) {
+ if (modela.def->type == modelb.def->type) {
+ cpu_type = modela.def->type;
+ } else {
+ cpu_type = 0;
+ }
+ max_gen = modela.def->gen;
+ max_gen_ga = MIN(modela.def->ec_ga, modelb.def->ec_ga);
+ } else if (modela.def->gen > modelb.def->gen) {
+ cpu_type = modelb.def->type;
+ max_gen = modelb.def->gen;
+ max_gen_ga = modelb.def->ec_ga;
+ } else {
+ cpu_type = modela.def->type;
+ max_gen = modela.def->gen;
+ max_gen_ga = modela.def->ec_ga;
+ }
+
+ model.def = s390_find_cpu_def(cpu_type, max_gen, max_gen_ga,
+ model.features);
+ /* strip off features not part of the max model */
+ bitmap_and(model.features, model.features, model.def->full_feat,
+ S390_FEAT_MAX);
+
+ baseline_info = g_malloc0(sizeof(*baseline_info));
+ baseline_info->model = g_malloc0(sizeof(*baseline_info->model));
+ cpu_info_from_model(baseline_info->model, &model, true);
+ return baseline_info;
+}
+#endif
+
+static void check_consistency(const S390CPUModel *model)
+{
+ static int dep[][2] = {
+ { S390_FEAT_IPTE_RANGE, S390_FEAT_DAT_ENH },
+ { S390_FEAT_IDTE_SEGMENT, S390_FEAT_DAT_ENH },
+ { S390_FEAT_IDTE_REGION, S390_FEAT_DAT_ENH },
+ { S390_FEAT_IDTE_REGION, S390_FEAT_IDTE_SEGMENT },
+ { S390_FEAT_LOCAL_TLB_CLEARING, S390_FEAT_DAT_ENH},
+ { S390_FEAT_LONG_DISPLACEMENT_FAST, S390_FEAT_LONG_DISPLACEMENT },
+ { S390_FEAT_DFP_FAST, S390_FEAT_DFP },
+ { S390_FEAT_TRANSACTIONAL_EXE, S390_FEAT_STFLE_49 },
+ { S390_FEAT_EDAT_2, S390_FEAT_EDAT},
+ { S390_FEAT_MSA_EXT_5, S390_FEAT_KIMD_SHA_512 },
+ { S390_FEAT_MSA_EXT_5, S390_FEAT_KLMD_SHA_512 },
+ { S390_FEAT_MSA_EXT_4, S390_FEAT_MSA_EXT_3 },
+ { S390_FEAT_SIE_CMMA, S390_FEAT_CMM },
+ { S390_FEAT_SIE_CMMA, S390_FEAT_SIE_GSLS },
+ { S390_FEAT_SIE_PFMFI, S390_FEAT_EDAT },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dep); i++) {
+ if (test_bit(dep[i][0], model->features) &&
+ !test_bit(dep[i][1], model->features)) {
+ error_report("Warning: \'%s\' requires \'%s\'.",
+ s390_feat_def(dep[i][0])->name,
+ s390_feat_def(dep[i][1])->name);
+ }
+ }
+}
+
+static void error_prepend_missing_feat(const char *name, void *opaque)
+{
+ error_prepend((Error **) opaque, "%s ", name);
+}
+
+static void check_compatibility(const S390CPUModel *max_model,
+ const S390CPUModel *model, Error **errp)
+{
+ S390FeatBitmap missing;
+
+ if (model->def->gen > max_model->def->gen) {
+ error_setg(errp, "Selected CPU generation is too new. Maximum "
+ "supported model in the configuration: \'%s\'",
+ max_model->def->name);
+ return;
+ } else if (model->def->gen == max_model->def->gen &&
+ model->def->ec_ga > max_model->def->ec_ga) {
+ error_setg(errp, "Selected CPU GA level is too new. Maximum "
+ "supported model in the configuration: \'%s\'",
+ max_model->def->name);
+ return;
+ }
+
+ /* detect the missing features to properly report them */
+ bitmap_andnot(missing, model->features, max_model->features, S390_FEAT_MAX);
+ if (bitmap_empty(missing, S390_FEAT_MAX)) {
+ return;
+ }
+
+ error_setg(errp, " ");
+ s390_feat_bitmap_to_ascii(missing, errp, error_prepend_missing_feat);
+ error_prepend(errp, "Some features requested in the CPU model are not "
+ "available in the configuration: ");
+}
+
+static S390CPUModel *get_max_cpu_model(Error **errp)
+{
+#ifndef CONFIG_USER_ONLY
+ static S390CPUModel max_model;
+ static bool cached;
+
+ if (cached) {
+ return &max_model;
+ }
+
+ if (kvm_enabled()) {
+ kvm_s390_get_host_cpu_model(&max_model, errp);
+ } else {
+ /* TCG enulates a z900 */
+ max_model.def = &s390_cpu_defs[0];
+ bitmap_copy(max_model.features, max_model.def->default_feat,
+ S390_FEAT_MAX);
+ }
+ if (!*errp) {
+ cached = true;
+ return &max_model;
+ }
+#endif
+ return NULL;
+}
+
+static inline void apply_cpu_model(const S390CPUModel *model, Error **errp)
+{
+#ifndef CONFIG_USER_ONLY
+ static S390CPUModel applied_model;
+ static bool applied;
+
+ /*
+ * We have the same model for all VCPUs. KVM can only be configured before
+ * any VCPUs are defined in KVM.
+ */
+ if (applied) {
+ if (model && memcmp(&applied_model, model, sizeof(S390CPUModel))) {
+ error_setg(errp, "Mixed CPU models are not supported on s390x.");
+ }
+ return;
+ }
+
+ if (kvm_enabled()) {
+ kvm_s390_apply_cpu_model(model, errp);
+ } else if (model) {
+ /* FIXME TCG - use data for stdip/stfl */
+ }
+
+ if (!*errp) {
+ applied = true;
+ if (model) {
+ applied_model = *model;
+ }
+ }
+#endif
+}
+
+void s390_realize_cpu_model(CPUState *cs, Error **errp)
+{
+ S390CPUClass *xcc = S390_CPU_GET_CLASS(cs);
+ S390CPU *cpu = S390_CPU(cs);
+ const S390CPUModel *max_model;
+
+ if (xcc->kvm_required && !kvm_enabled()) {
+ error_setg(errp, "CPU definition requires KVM");
+ return;
+ }
+
+ if (!cpu->model) {
+ /* no host model support -> perform compatibility stuff */
+ apply_cpu_model(NULL, errp);
+ return;
+ }
+
+ max_model = get_max_cpu_model(errp);
+ if (*errp) {
+ error_prepend(errp, "CPU models are not available: ");
+ return;
+ }
+
+ /* copy over properties that can vary */
+ cpu->model->lowest_ibc = max_model->lowest_ibc;
+ cpu->model->cpu_id = max_model->cpu_id;
+ cpu->model->cpu_ver = max_model->cpu_ver;
+
+ check_consistency(cpu->model);
+ check_compatibility(max_model, cpu->model, errp);
+ if (*errp) {
+ return;
+ }
+
+ apply_cpu_model(cpu->model, errp);
+}
+
+static void get_feature(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390Feat feat = (S390Feat) opaque;
+ S390CPU *cpu = S390_CPU(obj);
+ bool value;
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be queried.");
+ return;
+ }
+
+ value = test_bit(feat, cpu->model->features);
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void set_feature(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390Feat feat = (S390Feat) opaque;
+ DeviceState *dev = DEVICE(obj);
+ S390CPU *cpu = S390_CPU(obj);
+ bool value;
+
+ if (dev->realized) {
+ error_setg(errp, "Attempt to set property '%s' on '%s' after "
+ "it was realized", name, object_get_typename(obj));
+ return;
+ } else if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be changed.");
+ return;
+ }
+
+ visit_type_bool(v, name, &value, errp);
+ if (*errp) {
+ return;
+ }
+ if (value) {
+ if (!test_bit(feat, cpu->model->def->full_feat)) {
+ error_setg(errp, "Feature '%s' is not available for CPU model '%s',"
+ " it was introduced with later models.",
+ name, cpu->model->def->name);
+ return;
+ }
+ set_bit(feat, cpu->model->features);
+ } else {
+ clear_bit(feat, cpu->model->features);
+ }
+}
+
+static void get_feature_group(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390FeatGroup group = (S390FeatGroup) opaque;
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+ S390CPU *cpu = S390_CPU(obj);
+ S390FeatBitmap tmp;
+ bool value;
+
+ if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be queried.");
+ return;
+ }
+
+ /* a group is enabled if all features are enabled */
+ bitmap_and(tmp, cpu->model->features, def->feat, S390_FEAT_MAX);
+ value = bitmap_equal(tmp, def->feat, S390_FEAT_MAX);
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void set_feature_group(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ S390FeatGroup group = (S390FeatGroup) opaque;
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+ DeviceState *dev = DEVICE(obj);
+ S390CPU *cpu = S390_CPU(obj);
+ bool value;
+
+ if (dev->realized) {
+ error_setg(errp, "Attempt to set property '%s' on '%s' after "
+ "it was realized", name, object_get_typename(obj));
+ return;
+ } else if (!cpu->model) {
+ error_setg(errp, "Details about the host CPU model are not available, "
+ "features cannot be changed.");
+ return;
+ }
+
+ visit_type_bool(v, name, &value, errp);
+ if (*errp) {
+ return;
+ }
+ if (value) {
+ /* groups are added in one shot, so an intersect is sufficient */
+ if (!bitmap_intersects(def->feat, cpu->model->def->full_feat,
+ S390_FEAT_MAX)) {
+ error_setg(errp, "Group '%s' is not available for CPU model '%s',"
+ " it was introduced with later models.",
+ name, cpu->model->def->name);
+ return;
+ }
+ bitmap_or(cpu->model->features, cpu->model->features, def->feat,
+ S390_FEAT_MAX);
+ } else {
+ bitmap_andnot(cpu->model->features, cpu->model->features, def->feat,
+ S390_FEAT_MAX);
+ }
+}
+
+void s390_cpu_model_register_props(Object *obj)
+{
+ S390FeatGroup group;
+ S390Feat feat;
+
+ for (feat = 0; feat < S390_FEAT_MAX; feat++) {
+ const S390FeatDef *def = s390_feat_def(feat);
+ object_property_add(obj, def->name, "bool", get_feature,
+ set_feature, NULL, (void *) feat, NULL);
+ object_property_set_description(obj, def->name, def->desc , NULL);
+ }
+ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) {
+ const S390FeatGroupDef *def = s390_feat_group_def(group);
+ object_property_add(obj, def->name, "bool", get_feature_group,
+ set_feature_group, NULL, (void *) group, NULL);
+ object_property_set_description(obj, def->name, def->desc , NULL);
+ }
+}
+
+static void s390_cpu_model_initfn(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+ S390CPUClass *xcc = S390_CPU_GET_CLASS(cpu);
+
+ cpu->model = g_malloc0(sizeof(*cpu->model));
+ /* copy the model, so we can modify it */
+ cpu->model->def = xcc->cpu_def;
+ if (xcc->is_static) {
+ /* base model - features will never change */
+ bitmap_copy(cpu->model->features, cpu->model->def->base_feat,
+ S390_FEAT_MAX);
+ } else {
+ /* latest model - features can change */
+ bitmap_copy(cpu->model->features,
+ cpu->model->def->default_feat, S390_FEAT_MAX);
+ }
+}
+
+#ifdef CONFIG_KVM
+static void s390_host_cpu_model_initfn(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+ Error *err = NULL;
+
+ if (!kvm_enabled() || !kvm_s390_cpu_models_supported()) {
+ return;
+ }
+
+ cpu->model = g_malloc0(sizeof(*cpu->model));
+ kvm_s390_get_host_cpu_model(cpu->model, &err);
+ if (err) {
+ error_report_err(err);
+ g_free(cpu->model);
+ /* fallback to unsupported cpu models */
+ cpu->model = NULL;
+ }
+}
+#endif
+
+static void s390_qemu_cpu_model_initfn(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+
+ cpu->model = g_malloc0(sizeof(*cpu->model));
+ /* TCG emulates a z900 */
+ cpu->model->def = &s390_cpu_defs[0];
+ bitmap_copy(cpu->model->features, cpu->model->def->default_feat,
+ S390_FEAT_MAX);
+}
+
+static void s390_cpu_model_finalize(Object *obj)
+{
+ S390CPU *cpu = S390_CPU(obj);
+
+ g_free(cpu->model);
+ cpu->model = NULL;
+}
+
+static bool get_is_migration_safe(Object *obj, Error **errp)
+{
+ return S390_CPU_GET_CLASS(obj)->is_migration_safe;
+}
+
+static bool get_is_static(Object *obj, Error **errp)
+{
+ return S390_CPU_GET_CLASS(obj)->is_static;
+}
+
+static char *get_description(Object *obj, Error **errp)
+{
+ return g_strdup(S390_CPU_GET_CLASS(obj)->desc);
+}
+
+void s390_cpu_model_class_register_props(ObjectClass *oc)
+{
+ object_class_property_add_bool(oc, "migration-safe", get_is_migration_safe,
+ NULL, NULL);
+ object_class_property_add_bool(oc, "static", get_is_static,
+ NULL, NULL);
+ object_class_property_add_str(oc, "description", get_description, NULL,
+ NULL);
+}
+
+#ifdef CONFIG_KVM
+static void s390_host_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ xcc->kvm_required = true;
+ xcc->desc = "KVM only: All recognized features";
+}
+#endif
+
+static void s390_base_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ /* all base models are migration safe */
+ xcc->cpu_def = (const S390CPUDef *) data;
+ xcc->is_migration_safe = true;
+ xcc->is_static = true;
+ xcc->desc = xcc->cpu_def->desc;
+}
+
+static void s390_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ /* model that can change between QEMU versions */
+ xcc->cpu_def = (const S390CPUDef *) data;
+ xcc->is_migration_safe = true;
+ xcc->desc = xcc->cpu_def->desc;
+}
+
+static void s390_qemu_cpu_model_class_init(ObjectClass *oc, void *data)
+{
+ S390CPUClass *xcc = S390_CPU_CLASS(oc);
+
+ xcc->is_migration_safe = true;
+ xcc->desc = g_strdup_printf("QEMU Virtual CPU version %s",
+ qemu_hw_version());
+}
+
+#define S390_CPU_TYPE_SUFFIX "-" TYPE_S390_CPU
+#define S390_CPU_TYPE_NAME(name) (name S390_CPU_TYPE_SUFFIX)
+
+/* Generate type name for a cpu model. Caller has to free the string. */
+static char *s390_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(S390_CPU_TYPE_NAME("%s"), model_name);
+}
+
+/* Generate type name for a base cpu model. Caller has to free the string. */
+static char *s390_base_cpu_type_name(const char *model_name)
+{
+ return g_strdup_printf(S390_CPU_TYPE_NAME("%s-base"), model_name);
+}
+
+ObjectClass *s390_cpu_class_by_name(const char *name)
+{
+ char *typename = s390_cpu_type_name(name);
+ ObjectClass *oc;
+
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ return oc;
+}
+
+static const TypeInfo qemu_s390_cpu_type_info = {
+ .name = S390_CPU_TYPE_NAME("qemu"),
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_qemu_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_qemu_cpu_model_class_init,
+};
+
+#ifdef CONFIG_KVM
+static const TypeInfo host_s390_cpu_type_info = {
+ .name = S390_CPU_TYPE_NAME("host"),
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_host_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_host_cpu_model_class_init,
+};
+#endif
+
+static void register_types(void)
+{
+ int i;
+
+ /* init all bitmaps from gnerated data initially */
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ s390_init_feat_bitmap(s390_cpu_defs[i].base_init,
+ s390_cpu_defs[i].base_feat);
+ s390_init_feat_bitmap(s390_cpu_defs[i].default_init,
+ s390_cpu_defs[i].default_feat);
+ s390_init_feat_bitmap(s390_cpu_defs[i].full_init,
+ s390_cpu_defs[i].full_feat);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s390_cpu_defs); i++) {
+ char *base_name = s390_base_cpu_type_name(s390_cpu_defs[i].name);
+ TypeInfo ti_base = {
+ .name = base_name,
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_base_cpu_model_class_init,
+ .class_data = (void *) &s390_cpu_defs[i],
+ };
+ char *name = s390_cpu_type_name(s390_cpu_defs[i].name);
+ TypeInfo ti = {
+ .name = name,
+ .parent = TYPE_S390_CPU,
+ .instance_init = s390_cpu_model_initfn,
+ .instance_finalize = s390_cpu_model_finalize,
+ .class_init = s390_cpu_model_class_init,
+ .class_data = (void *) &s390_cpu_defs[i],
+ };
+
+ type_register_static(&ti_base);
+ type_register_static(&ti);
+ g_free(base_name);
+ g_free(name);
+ }
+
+ type_register_static(&qemu_s390_cpu_type_info);
+#ifdef CONFIG_KVM
+ type_register_static(&host_s390_cpu_type_info);
+#endif
+}
+
+type_init(register_types)
diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h
new file mode 100644
index 0000000000..136a602313
--- /dev/null
+++ b/target/s390x/cpu_models.h
@@ -0,0 +1,119 @@
+/*
+ * CPU models for s390x
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#ifndef TARGET_S390X_CPU_MODELS_H
+#define TARGET_S390X_CPU_MODELS_H
+
+#include "cpu_features.h"
+#include "qom/cpu.h"
+
+/* static CPU definition */
+typedef struct S390CPUDef {
+ const char *name; /* name exposed to the user */
+ const char *desc; /* description exposed to the user */
+ uint8_t gen; /* hw generation identification */
+ uint16_t type; /* cpu type identification */
+ uint8_t ec_ga; /* EC GA version (on which also the BC is based) */
+ uint8_t mha_pow; /* Maximum Host Adress Power, mha = 2^pow-1 */
+ uint32_t hmfai; /* hypervisor-managed facilities */
+ /* base/min features, must never be changed between QEMU versions */
+ S390FeatBitmap base_feat;
+ /* used to init base_feat from generated data */
+ S390FeatInit base_init;
+ /* deafault features, QEMU version specific */
+ S390FeatBitmap default_feat;
+ /* used to init default_feat from generated data */
+ S390FeatInit default_init;
+ /* max allowed features, QEMU version specific */
+ S390FeatBitmap full_feat;
+ /* used to init full_feat from generated data */
+ S390FeatInit full_init;
+} S390CPUDef;
+
+/* CPU model based on a CPU definition */
+typedef struct S390CPUModel {
+ const S390CPUDef *def;
+ S390FeatBitmap features;
+ /* values copied from the "host" model, can change during migration */
+ uint16_t lowest_ibc; /* lowest IBC that the hardware supports */
+ uint32_t cpu_id; /* CPU id */
+ uint8_t cpu_ver; /* CPU version, usually "ff" for kvm */
+} S390CPUModel;
+
+/*
+ * CPU ID
+ *
+ * bits 0-7: Zeroes (ff for kvm)
+ * bits 8-31: CPU ID (serial number)
+ * bits 32-48: Machine type
+ * bits 48-63: Zeroes
+ */
+#define cpuid_type(x) (((x) >> 16) & 0xffff)
+#define cpuid_id(x) (((x) >> 32) & 0xffffff)
+#define cpuid_ver(x) (((x) >> 56) & 0xff)
+
+#define lowest_ibc(x) (((uint32_t)(x) >> 16) & 0xfff)
+#define unblocked_ibc(x) ((uint32_t)(x) & 0xfff)
+#define has_ibc(x) (lowest_ibc(x) != 0)
+
+#define S390_GEN_Z10 0xa
+#define ibc_gen(x) (x == 0 ? 0 : ((x >> 4) + S390_GEN_Z10))
+#define ibc_ec_ga(x) (x & 0xf)
+
+uint32_t s390_get_hmfai(void);
+uint8_t s390_get_mha_pow(void);
+uint32_t s390_get_ibc_val(void);
+static inline uint16_t s390_ibc_from_cpu_model(const S390CPUModel *model)
+{
+ uint16_t ibc = 0;
+
+ if (model->def->gen >= S390_GEN_Z10) {
+ ibc = ((model->def->gen - S390_GEN_Z10) << 4) + model->def->ec_ga;
+ }
+ return ibc;
+}
+void s390_get_feat_block(S390FeatType type, uint8_t *data);
+bool s390_has_feat(S390Feat feat);
+uint8_t s390_get_gen_for_cpu_type(uint16_t type);
+static inline bool s390_known_cpu_type(uint16_t type)
+{
+ return s390_get_gen_for_cpu_type(type) != 0;
+}
+static inline uint64_t s390_cpuid_from_cpu_model(const S390CPUModel *model)
+{
+ return ((uint64_t)model->cpu_ver << 56) |
+ ((uint64_t)model->cpu_id << 32) |
+ ((uint64_t)model->def->type << 16);
+}
+S390CPUDef const *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga,
+ S390FeatBitmap features);
+
+#ifdef CONFIG_KVM
+bool kvm_s390_cpu_models_supported(void);
+void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp);
+void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp);
+#else
+static inline void kvm_s390_get_host_cpu_model(S390CPUModel *model,
+ Error **errp)
+{
+}
+static inline void kvm_s390_apply_cpu_model(const S390CPUModel *model,
+ Error **errp)
+{
+}
+static inline bool kvm_s390_cpu_models_supported(void)
+{
+ return false;
+}
+#endif
+
+#endif /* TARGET_S390X_CPU_MODELS_H */
diff --git a/target/s390x/fpu_helper.c b/target/s390x/fpu_helper.c
new file mode 100644
index 0000000000..e604e9f7be
--- /dev/null
+++ b/target/s390x/fpu_helper.c
@@ -0,0 +1,749 @@
+/*
+ * S/390 FPU helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/helper-proto.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+#define RET128(F) (env->retxl = F.low, F.high)
+
+#define convert_bit(mask, from, to) \
+ (to < from \
+ ? (mask / (from / to)) & to \
+ : (mask & from) * (to / from))
+
+static void ieee_exception(CPUS390XState *env, uint32_t dxc, uintptr_t retaddr)
+{
+ /* Install the DXC code. */
+ env->fpc = (env->fpc & ~0xff00) | (dxc << 8);
+ /* Trap. */
+ runtime_exception(env, PGM_DATA, retaddr);
+}
+
+/* Should be called after any operation that may raise IEEE exceptions. */
+static void handle_exceptions(CPUS390XState *env, uintptr_t retaddr)
+{
+ unsigned s390_exc, qemu_exc;
+
+ /* Get the exceptions raised by the current operation. Reset the
+ fpu_status contents so that the next operation has a clean slate. */
+ qemu_exc = env->fpu_status.float_exception_flags;
+ if (qemu_exc == 0) {
+ return;
+ }
+ env->fpu_status.float_exception_flags = 0;
+
+ /* Convert softfloat exception bits to s390 exception bits. */
+ s390_exc = 0;
+ s390_exc |= convert_bit(qemu_exc, float_flag_invalid, 0x80);
+ s390_exc |= convert_bit(qemu_exc, float_flag_divbyzero, 0x40);
+ s390_exc |= convert_bit(qemu_exc, float_flag_overflow, 0x20);
+ s390_exc |= convert_bit(qemu_exc, float_flag_underflow, 0x10);
+ s390_exc |= convert_bit(qemu_exc, float_flag_inexact, 0x08);
+
+ /* Install the exceptions that we raised. */
+ env->fpc |= s390_exc << 16;
+
+ /* Send signals for enabled exceptions. */
+ s390_exc &= env->fpc >> 24;
+ if (s390_exc) {
+ ieee_exception(env, s390_exc, retaddr);
+ }
+}
+
+static inline int float_comp_to_cc(CPUS390XState *env, int float_compare)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ switch (float_compare) {
+ case float_relation_equal:
+ return 0;
+ case float_relation_less:
+ return 1;
+ case float_relation_greater:
+ return 2;
+ case float_relation_unordered:
+ return 3;
+ default:
+ cpu_abort(CPU(cpu), "unknown return value for float compare\n");
+ }
+}
+
+/* condition codes for unary FP ops */
+uint32_t set_cc_nz_f32(float32 v)
+{
+ if (float32_is_any_nan(v)) {
+ return 3;
+ } else if (float32_is_zero(v)) {
+ return 0;
+ } else if (float32_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+uint32_t set_cc_nz_f64(float64 v)
+{
+ if (float64_is_any_nan(v)) {
+ return 3;
+ } else if (float64_is_zero(v)) {
+ return 0;
+ } else if (float64_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+uint32_t set_cc_nz_f128(float128 v)
+{
+ if (float128_is_any_nan(v)) {
+ return 3;
+ } else if (float128_is_zero(v)) {
+ return 0;
+ } else if (float128_is_neg(v)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+/* 32-bit FP addition */
+uint64_t HELPER(aeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_add(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64-bit FP addition */
+uint64_t HELPER(adb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_add(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 128-bit FP addition */
+uint64_t HELPER(axb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_add(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP subtraction */
+uint64_t HELPER(seb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_sub(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64-bit FP subtraction */
+uint64_t HELPER(sdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_sub(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 128-bit FP subtraction */
+uint64_t HELPER(sxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_sub(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP division */
+uint64_t HELPER(deb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_div(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64-bit FP division */
+uint64_t HELPER(ddb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_div(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 128-bit FP division */
+uint64_t HELPER(dxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_div(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP multiplication */
+uint64_t HELPER(meeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float32 ret = float32_mul(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64-bit FP multiplication */
+uint64_t HELPER(mdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float64_mul(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64/32-bit FP multiplication */
+uint64_t HELPER(mdeb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ float64 ret = float32_to_float64(f2, &env->fpu_status);
+ ret = float64_mul(f1, ret, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 128-bit FP multiplication */
+uint64_t HELPER(mxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ float128 ret = float128_mul(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* 128/64-bit FP multiplication */
+uint64_t HELPER(mxdb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t f2)
+{
+ float128 ret = float64_to_float128(f2, &env->fpu_status);
+ ret = float128_mul(make_float128(ah, al), ret, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* convert 32-bit float to 64-bit float */
+uint64_t HELPER(ldeb)(CPUS390XState *env, uint64_t f2)
+{
+ float64 ret = float32_to_float64(f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float64_maybe_silence_nan(ret, &env->fpu_status);
+}
+
+/* convert 128-bit float to 64-bit float */
+uint64_t HELPER(ldxb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+ float64 ret = float128_to_float64(make_float128(ah, al), &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float64_maybe_silence_nan(ret, &env->fpu_status);
+}
+
+/* convert 64-bit float to 128-bit float */
+uint64_t HELPER(lxdb)(CPUS390XState *env, uint64_t f2)
+{
+ float128 ret = float64_to_float128(f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(float128_maybe_silence_nan(ret, &env->fpu_status));
+}
+
+/* convert 32-bit float to 128-bit float */
+uint64_t HELPER(lxeb)(CPUS390XState *env, uint64_t f2)
+{
+ float128 ret = float32_to_float128(f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(float128_maybe_silence_nan(ret, &env->fpu_status));
+}
+
+/* convert 64-bit float to 32-bit float */
+uint64_t HELPER(ledb)(CPUS390XState *env, uint64_t f2)
+{
+ float32 ret = float64_to_float32(f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float32_maybe_silence_nan(ret, &env->fpu_status);
+}
+
+/* convert 128-bit float to 32-bit float */
+uint64_t HELPER(lexb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+ float32 ret = float128_to_float32(make_float128(ah, al), &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float32_maybe_silence_nan(ret, &env->fpu_status);
+}
+
+/* 32-bit FP compare */
+uint32_t HELPER(ceb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ int cmp = float32_compare_quiet(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 64-bit FP compare */
+uint32_t HELPER(cdb)(CPUS390XState *env, uint64_t f1, uint64_t f2)
+{
+ int cmp = float64_compare_quiet(f1, f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+/* 128-bit FP compare */
+uint32_t HELPER(cxb)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t bh, uint64_t bl)
+{
+ int cmp = float128_compare_quiet(make_float128(ah, al),
+ make_float128(bh, bl),
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return float_comp_to_cc(env, cmp);
+}
+
+static int swap_round_mode(CPUS390XState *env, int m3)
+{
+ int ret = env->fpu_status.float_rounding_mode;
+ switch (m3) {
+ case 0:
+ /* current mode */
+ break;
+ case 1:
+ /* biased round no nearest */
+ case 4:
+ /* round to nearest */
+ set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
+ break;
+ case 5:
+ /* round to zero */
+ set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
+ break;
+ case 6:
+ /* round to +inf */
+ set_float_rounding_mode(float_round_up, &env->fpu_status);
+ break;
+ case 7:
+ /* round to -inf */
+ set_float_rounding_mode(float_round_down, &env->fpu_status);
+ break;
+ }
+ return ret;
+}
+
+/* convert 64-bit int to 32-bit float */
+uint64_t HELPER(cegb)(CPUS390XState *env, int64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float32 ret = int64_to_float32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit int to 64-bit float */
+uint64_t HELPER(cdgb)(CPUS390XState *env, int64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float64 ret = int64_to_float64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit int to 128-bit float */
+uint64_t HELPER(cxgb)(CPUS390XState *env, int64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 ret = int64_to_float128(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* convert 64-bit uint to 32-bit float */
+uint64_t HELPER(celgb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float32 ret = uint64_to_float32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit uint to 64-bit float */
+uint64_t HELPER(cdlgb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float64 ret = uint64_to_float64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit uint to 128-bit float */
+uint64_t HELPER(cxlgb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 ret = uint64_to_float128(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* convert 32-bit float to 64-bit int */
+uint64_t HELPER(cgeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ int64_t ret = float32_to_int64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit float to 64-bit int */
+uint64_t HELPER(cgdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ int64_t ret = float64_to_int64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 128-bit float to 64-bit int */
+uint64_t HELPER(cgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 v2 = make_float128(h, l);
+ int64_t ret = float128_to_int64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 32-bit float to 32-bit int */
+uint64_t HELPER(cfeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ int32_t ret = float32_to_int32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit float to 32-bit int */
+uint64_t HELPER(cfdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ int32_t ret = float64_to_int32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 128-bit float to 32-bit int */
+uint64_t HELPER(cfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 v2 = make_float128(h, l);
+ int32_t ret = float128_to_int32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 32-bit float to 64-bit uint */
+uint64_t HELPER(clgeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ uint64_t ret;
+ v2 = float32_to_float64(v2, &env->fpu_status);
+ ret = float64_to_uint64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit float to 64-bit uint */
+uint64_t HELPER(clgdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ uint64_t ret = float64_to_uint64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 128-bit float to 64-bit uint */
+uint64_t HELPER(clgxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 v2 = make_float128(h, l);
+ /* ??? Not 100% correct. */
+ uint64_t ret = float128_to_int64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 32-bit float to 32-bit uint */
+uint64_t HELPER(clfeb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ uint32_t ret = float32_to_uint32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 64-bit float to 32-bit uint */
+uint64_t HELPER(clfdb)(CPUS390XState *env, uint64_t v2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ uint32_t ret = float64_to_uint32(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* convert 128-bit float to 32-bit uint */
+uint64_t HELPER(clfxb)(CPUS390XState *env, uint64_t h, uint64_t l, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 v2 = make_float128(h, l);
+ /* Not 100% correct. */
+ uint32_t ret = float128_to_int64(v2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* round to integer 32-bit */
+uint64_t HELPER(fieb)(CPUS390XState *env, uint64_t f2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float32 ret = float32_round_to_int(f2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* round to integer 64-bit */
+uint64_t HELPER(fidb)(CPUS390XState *env, uint64_t f2, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float64 ret = float64_round_to_int(f2, &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* round to integer 128-bit */
+uint64_t HELPER(fixb)(CPUS390XState *env, uint64_t ah, uint64_t al, uint32_t m3)
+{
+ int hold = swap_round_mode(env, m3);
+ float128 ret = float128_round_to_int(make_float128(ah, al),
+ &env->fpu_status);
+ set_float_rounding_mode(hold, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+/* 32-bit FP multiply and add */
+uint64_t HELPER(maeb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float32 ret = float32_muladd(f2, f3, f1, 0, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64-bit FP multiply and add */
+uint64_t HELPER(madb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float64 ret = float64_muladd(f2, f3, f1, 0, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 32-bit FP multiply and subtract */
+uint64_t HELPER(mseb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float32 ret = float32_muladd(f2, f3, f1, float_muladd_negate_c,
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* 64-bit FP multiply and subtract */
+uint64_t HELPER(msdb)(CPUS390XState *env, uint64_t f1,
+ uint64_t f2, uint64_t f3)
+{
+ float64 ret = float64_muladd(f2, f3, f1, float_muladd_negate_c,
+ &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* test data class 32-bit */
+uint32_t HELPER(tceb)(CPUS390XState *env, uint64_t f1, uint64_t m2)
+{
+ float32 v1 = f1;
+ int neg = float32_is_neg(v1);
+ uint32_t cc = 0;
+
+ if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float32_is_signaling_nan(v1, &env->fpu_status) &&
+ (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* test data class 64-bit */
+uint32_t HELPER(tcdb)(CPUS390XState *env, uint64_t v1, uint64_t m2)
+{
+ int neg = float64_is_neg(v1);
+ uint32_t cc = 0;
+
+ if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float64_is_signaling_nan(v1, &env->fpu_status) &&
+ (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* test data class 128-bit */
+uint32_t HELPER(tcxb)(CPUS390XState *env, uint64_t ah,
+ uint64_t al, uint64_t m2)
+{
+ float128 v1 = make_float128(ah, al);
+ int neg = float128_is_neg(v1);
+ uint32_t cc = 0;
+
+ if ((float128_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
+ (float128_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
+ (float128_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
+ (float128_is_signaling_nan(v1, &env->fpu_status) &&
+ (m2 & (1 << (1-neg))))) {
+ cc = 1;
+ } else if (m2 & (1 << (9-neg))) {
+ /* assume normalized number */
+ cc = 1;
+ }
+ /* FIXME: denormalized? */
+ return cc;
+}
+
+/* square root 32-bit */
+uint64_t HELPER(sqeb)(CPUS390XState *env, uint64_t f2)
+{
+ float32 ret = float32_sqrt(f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* square root 64-bit */
+uint64_t HELPER(sqdb)(CPUS390XState *env, uint64_t f2)
+{
+ float64 ret = float64_sqrt(f2, &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return ret;
+}
+
+/* square root 128-bit */
+uint64_t HELPER(sqxb)(CPUS390XState *env, uint64_t ah, uint64_t al)
+{
+ float128 ret = float128_sqrt(make_float128(ah, al), &env->fpu_status);
+ handle_exceptions(env, GETPC());
+ return RET128(ret);
+}
+
+static const int fpc_to_rnd[4] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down
+};
+
+/* set fpc */
+void HELPER(sfpc)(CPUS390XState *env, uint64_t fpc)
+{
+ /* Install everything in the main FPC. */
+ env->fpc = fpc;
+
+ /* Install the rounding mode in the shadow fpu_status. */
+ set_float_rounding_mode(fpc_to_rnd[fpc & 3], &env->fpu_status);
+}
+
+/* set fpc and signal */
+void HELPER(sfas)(CPUS390XState *env, uint64_t val)
+{
+ uint32_t signalling = env->fpc;
+ uint32_t source = val;
+ uint32_t s390_exc;
+
+ /* The contents of the source operand are placed in the FPC register;
+ then the flags in the FPC register are set to the logical OR of the
+ signalling flags and the source flags. */
+ env->fpc = source | (signalling & 0x00ff0000);
+ set_float_rounding_mode(fpc_to_rnd[source & 3], &env->fpu_status);
+
+ /* If any signalling flag is 1 and the corresponding source mask
+ is also 1, a simulated-iee-exception trap occurs. */
+ s390_exc = (signalling >> 16) & (source >> 24);
+ if (s390_exc) {
+ ieee_exception(env, s390_exc | 3, GETPC());
+ }
+}
diff --git a/target/s390x/gdbstub.c b/target/s390x/gdbstub.c
new file mode 100644
index 0000000000..3d223dec97
--- /dev/null
+++ b/target/s390x/gdbstub.c
@@ -0,0 +1,313 @@
+/*
+ * s390x gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/bitops.h"
+
+int s390_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ uint64_t val;
+ int cc_op;
+
+ switch (n) {
+ case S390_PSWM_REGNUM:
+ if (tcg_enabled()) {
+ cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
+ env->cc_vr);
+ val = deposit64(env->psw.mask, 44, 2, cc_op);
+ return gdb_get_regl(mem_buf, val);
+ }
+ return gdb_get_regl(mem_buf, env->psw.mask);
+ case S390_PSWA_REGNUM:
+ return gdb_get_regl(mem_buf, env->psw.addr);
+ case S390_R0_REGNUM ... S390_R15_REGNUM:
+ return gdb_get_regl(mem_buf, env->regs[n - S390_R0_REGNUM]);
+ }
+ return 0;
+}
+
+int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ target_ulong tmpl = ldtul_p(mem_buf);
+
+ switch (n) {
+ case S390_PSWM_REGNUM:
+ env->psw.mask = tmpl;
+ if (tcg_enabled()) {
+ env->cc_op = extract64(tmpl, 44, 2);
+ }
+ break;
+ case S390_PSWA_REGNUM:
+ env->psw.addr = tmpl;
+ break;
+ case S390_R0_REGNUM ... S390_R15_REGNUM:
+ env->regs[n - S390_R0_REGNUM] = tmpl;
+ break;
+ default:
+ return 0;
+ }
+ return 8;
+}
+
+/* the values represent the positions in s390-acr.xml */
+#define S390_A0_REGNUM 0
+#define S390_A15_REGNUM 15
+/* total number of registers in s390-acr.xml */
+#define S390_NUM_AC_REGS 16
+
+static int cpu_read_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_A0_REGNUM ... S390_A15_REGNUM:
+ return gdb_get_reg32(mem_buf, env->aregs[n]);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_ac_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_A0_REGNUM ... S390_A15_REGNUM:
+ env->aregs[n] = ldl_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-fpr.xml */
+#define S390_FPC_REGNUM 0
+#define S390_F0_REGNUM 1
+#define S390_F15_REGNUM 16
+/* total number of registers in s390-fpr.xml */
+#define S390_NUM_FP_REGS 17
+
+static int cpu_read_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_FPC_REGNUM:
+ return gdb_get_reg32(mem_buf, env->fpc);
+ case S390_F0_REGNUM ... S390_F15_REGNUM:
+ return gdb_get_reg64(mem_buf, get_freg(env, n - S390_F0_REGNUM)->ll);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_fp_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_FPC_REGNUM:
+ env->fpc = ldl_p(mem_buf);
+ return 4;
+ case S390_F0_REGNUM ... S390_F15_REGNUM:
+ get_freg(env, n - S390_F0_REGNUM)->ll = ldtul_p(mem_buf);
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-vx.xml */
+#define S390_V0L_REGNUM 0
+#define S390_V15L_REGNUM 15
+#define S390_V16_REGNUM 16
+#define S390_V31_REGNUM 31
+/* total number of registers in s390-vx.xml */
+#define S390_NUM_VREGS 32
+
+static int cpu_read_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ int ret;
+
+ switch (n) {
+ case S390_V0L_REGNUM ... S390_V15L_REGNUM:
+ ret = gdb_get_reg64(mem_buf, env->vregs[n][1].ll);
+ break;
+ case S390_V16_REGNUM ... S390_V31_REGNUM:
+ ret = gdb_get_reg64(mem_buf, env->vregs[n][0].ll);
+ ret += gdb_get_reg64(mem_buf + 8, env->vregs[n][1].ll);
+ break;
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int cpu_write_vreg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_V0L_REGNUM ... S390_V15L_REGNUM:
+ env->vregs[n][1].ll = ldtul_p(mem_buf + 8);
+ return 8;
+ case S390_V16_REGNUM ... S390_V31_REGNUM:
+ env->vregs[n][0].ll = ldtul_p(mem_buf);
+ env->vregs[n][1].ll = ldtul_p(mem_buf + 8);
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-cr.xml */
+#define S390_C0_REGNUM 0
+#define S390_C15_REGNUM 15
+/* total number of registers in s390-cr.xml */
+#define S390_NUM_C_REGS 16
+
+#ifndef CONFIG_USER_ONLY
+static int cpu_read_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_C0_REGNUM ... S390_C15_REGNUM:
+ return gdb_get_regl(mem_buf, env->cregs[n]);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_C0_REGNUM ... S390_C15_REGNUM:
+ env->cregs[n] = ldtul_p(mem_buf);
+ if (tcg_enabled()) {
+ tlb_flush(ENV_GET_CPU(env), 1);
+ }
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+/* the values represent the positions in s390-virt.xml */
+#define S390_VIRT_CKC_REGNUM 0
+#define S390_VIRT_CPUTM_REGNUM 1
+#define S390_VIRT_BEA_REGNUM 2
+#define S390_VIRT_PREFIX_REGNUM 3
+#define S390_VIRT_PP_REGNUM 4
+#define S390_VIRT_PFT_REGNUM 5
+#define S390_VIRT_PFS_REGNUM 6
+#define S390_VIRT_PFC_REGNUM 7
+/* total number of registers in s390-virt.xml */
+#define S390_NUM_VIRT_REGS 8
+
+static int cpu_read_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_VIRT_CKC_REGNUM:
+ return gdb_get_regl(mem_buf, env->ckc);
+ case S390_VIRT_CPUTM_REGNUM:
+ return gdb_get_regl(mem_buf, env->cputm);
+ case S390_VIRT_BEA_REGNUM:
+ return gdb_get_regl(mem_buf, env->gbea);
+ case S390_VIRT_PREFIX_REGNUM:
+ return gdb_get_regl(mem_buf, env->psa);
+ case S390_VIRT_PP_REGNUM:
+ return gdb_get_regl(mem_buf, env->pp);
+ case S390_VIRT_PFT_REGNUM:
+ return gdb_get_regl(mem_buf, env->pfault_token);
+ case S390_VIRT_PFS_REGNUM:
+ return gdb_get_regl(mem_buf, env->pfault_select);
+ case S390_VIRT_PFC_REGNUM:
+ return gdb_get_regl(mem_buf, env->pfault_compare);
+ default:
+ return 0;
+ }
+}
+
+static int cpu_write_virt_reg(CPUS390XState *env, uint8_t *mem_buf, int n)
+{
+ switch (n) {
+ case S390_VIRT_CKC_REGNUM:
+ env->ckc = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_CPUTM_REGNUM:
+ env->cputm = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_BEA_REGNUM:
+ env->gbea = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_PREFIX_REGNUM:
+ env->psa = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_PP_REGNUM:
+ env->pp = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_PFT_REGNUM:
+ env->pfault_token = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_PFS_REGNUM:
+ env->pfault_select = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ case S390_VIRT_PFC_REGNUM:
+ env->pfault_compare = ldtul_p(mem_buf);
+ cpu_synchronize_post_init(ENV_GET_CPU(env));
+ return 8;
+ default:
+ return 0;
+ }
+}
+#endif
+
+void s390_cpu_gdb_init(CPUState *cs)
+{
+ gdb_register_coprocessor(cs, cpu_read_ac_reg,
+ cpu_write_ac_reg,
+ S390_NUM_AC_REGS, "s390-acr.xml", 0);
+
+ gdb_register_coprocessor(cs, cpu_read_fp_reg,
+ cpu_write_fp_reg,
+ S390_NUM_FP_REGS, "s390-fpr.xml", 0);
+
+ gdb_register_coprocessor(cs, cpu_read_vreg,
+ cpu_write_vreg,
+ S390_NUM_VREGS, "s390-vx.xml", 0);
+
+#ifndef CONFIG_USER_ONLY
+ gdb_register_coprocessor(cs, cpu_read_c_reg,
+ cpu_write_c_reg,
+ S390_NUM_C_REGS, "s390-cr.xml", 0);
+
+ if (kvm_enabled()) {
+ gdb_register_coprocessor(cs, cpu_read_virt_reg,
+ cpu_write_virt_reg,
+ S390_NUM_VIRT_REGS, "s390-virt.xml", 0);
+ }
+#endif
+}
diff --git a/target/s390x/gen-features.c b/target/s390x/gen-features.c
new file mode 100644
index 0000000000..e674738ae3
--- /dev/null
+++ b/target/s390x/gen-features.c
@@ -0,0 +1,592 @@
+/*
+ * S390 feature list generator
+ *
+ * Copyright 2016 IBM Corp.
+ *
+ * Author(s): Michael Mueller <mimu@linux.vnet.ibm.com>
+ * David Hildenbrand <dahi@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ *
+ */
+
+
+#include "inttypes.h"
+#include "stdio.h"
+#include "cpu_features_def.h"
+
+#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
+
+/***** BEGIN FEATURE DEFS *****/
+
+#define S390_FEAT_GROUP_PLO \
+ S390_FEAT_PLO_CL, \
+ S390_FEAT_PLO_CLG, \
+ S390_FEAT_PLO_CLGR, \
+ S390_FEAT_PLO_CLX, \
+ S390_FEAT_PLO_CS, \
+ S390_FEAT_PLO_CSG, \
+ S390_FEAT_PLO_CSGR, \
+ S390_FEAT_PLO_CSX, \
+ S390_FEAT_PLO_DCS, \
+ S390_FEAT_PLO_DCSG, \
+ S390_FEAT_PLO_DCSGR, \
+ S390_FEAT_PLO_DCSX, \
+ S390_FEAT_PLO_CSST, \
+ S390_FEAT_PLO_CSSTG, \
+ S390_FEAT_PLO_CSSTGR, \
+ S390_FEAT_PLO_CSSTX, \
+ S390_FEAT_PLO_CSDST, \
+ S390_FEAT_PLO_CSDSTG, \
+ S390_FEAT_PLO_CSDSTGR, \
+ S390_FEAT_PLO_CSDSTX, \
+ S390_FEAT_PLO_CSTST, \
+ S390_FEAT_PLO_CSTSTG, \
+ S390_FEAT_PLO_CSTSTGR, \
+ S390_FEAT_PLO_CSTSTX
+
+#define S390_FEAT_GROUP_TOD_CLOCK_STEERING \
+ S390_FEAT_TOD_CLOCK_STEERING, \
+ S390_FEAT_PTFF_QTO, \
+ S390_FEAT_PTFF_QSI, \
+ S390_FEAT_PTFF_QPT, \
+ S390_FEAT_PTFF_STO
+
+#define S390_FEAT_GROUP_GEN13_PTFF \
+ S390_FEAT_PTFF_QUI, \
+ S390_FEAT_PTFF_QTOU, \
+ S390_FEAT_PTFF_STOU
+
+#define S390_FEAT_GROUP_MSA \
+ S390_FEAT_MSA, \
+ S390_FEAT_KMAC_DEA, \
+ S390_FEAT_KMAC_TDEA_128, \
+ S390_FEAT_KMAC_TDEA_192, \
+ S390_FEAT_KMC_DEA, \
+ S390_FEAT_KMC_TDEA_128, \
+ S390_FEAT_KMC_TDEA_192, \
+ S390_FEAT_KM_DEA, \
+ S390_FEAT_KM_TDEA_128, \
+ S390_FEAT_KM_TDEA_192, \
+ S390_FEAT_KIMD_SHA_1, \
+ S390_FEAT_KLMD_SHA_1
+
+#define S390_FEAT_GROUP_MSA_EXT_1 \
+ S390_FEAT_KMC_AES_128, \
+ S390_FEAT_KM_AES_128, \
+ S390_FEAT_KIMD_SHA_256, \
+ S390_FEAT_KLMD_SHA_256
+
+#define S390_FEAT_GROUP_MSA_EXT_2 \
+ S390_FEAT_KMC_AES_192, \
+ S390_FEAT_KMC_AES_256, \
+ S390_FEAT_KMC_PRNG, \
+ S390_FEAT_KM_AES_192, \
+ S390_FEAT_KM_AES_256, \
+ S390_FEAT_KIMD_SHA_512, \
+ S390_FEAT_KLMD_SHA_512
+
+#define S390_FEAT_GROUP_MSA_EXT_3 \
+ S390_FEAT_MSA_EXT_3, \
+ S390_FEAT_KMAC_EDEA, \
+ S390_FEAT_KMAC_ETDEA_128, \
+ S390_FEAT_KMAC_ETDEA_192, \
+ S390_FEAT_KMC_EAES_128, \
+ S390_FEAT_KMC_EAES_192, \
+ S390_FEAT_KMC_EAES_256, \
+ S390_FEAT_KMC_EDEA, \
+ S390_FEAT_KMC_ETDEA_128, \
+ S390_FEAT_KMC_ETDEA_192, \
+ S390_FEAT_KM_EDEA, \
+ S390_FEAT_KM_ETDEA_128, \
+ S390_FEAT_KM_ETDEA_192, \
+ S390_FEAT_KM_EAES_128, \
+ S390_FEAT_KM_EAES_192, \
+ S390_FEAT_KM_EAES_256, \
+ S390_FEAT_PCKMO_EDEA, \
+ S390_FEAT_PCKMO_ETDEA_128, \
+ S390_FEAT_PCKMO_ETDEA_256, \
+ S390_FEAT_PCKMO_AES_128, \
+ S390_FEAT_PCKMO_AES_192, \
+ S390_FEAT_PCKMO_AES_256
+
+#define S390_FEAT_GROUP_MSA_EXT_4 \
+ S390_FEAT_MSA_EXT_4, \
+ S390_FEAT_KMAC_AES_128, \
+ S390_FEAT_KMAC_AES_192, \
+ S390_FEAT_KMAC_AES_256, \
+ S390_FEAT_KMAC_EAES_128, \
+ S390_FEAT_KMAC_EAES_192, \
+ S390_FEAT_KMAC_EAES_256, \
+ S390_FEAT_KM_XTS_AES_128, \
+ S390_FEAT_KM_XTS_AES_256, \
+ S390_FEAT_KM_XTS_EAES_128, \
+ S390_FEAT_KM_XTS_EAES_256, \
+ S390_FEAT_KIMD_GHASH, \
+ S390_FEAT_KMCTR_DEA, \
+ S390_FEAT_KMCTR_TDEA_128, \
+ S390_FEAT_KMCTR_TDEA_192, \
+ S390_FEAT_KMCTR_EDEA, \
+ S390_FEAT_KMCTR_ETDEA_128, \
+ S390_FEAT_KMCTR_ETDEA_192, \
+ S390_FEAT_KMCTR_AES_128, \
+ S390_FEAT_KMCTR_AES_192, \
+ S390_FEAT_KMCTR_AES_256, \
+ S390_FEAT_KMCTR_EAES_128, \
+ S390_FEAT_KMCTR_EAES_192, \
+ S390_FEAT_KMCTR_EAES_256, \
+ S390_FEAT_KMF_DEA, \
+ S390_FEAT_KMF_TDEA_128, \
+ S390_FEAT_KMF_TDEA_192, \
+ S390_FEAT_KMF_EDEA, \
+ S390_FEAT_KMF_ETDEA_128, \
+ S390_FEAT_KMF_ETDEA_192, \
+ S390_FEAT_KMF_AES_128, \
+ S390_FEAT_KMF_AES_192, \
+ S390_FEAT_KMF_AES_256, \
+ S390_FEAT_KMF_EAES_128, \
+ S390_FEAT_KMF_EAES_192, \
+ S390_FEAT_KMF_EAES_256, \
+ S390_FEAT_KMO_DEA, \
+ S390_FEAT_KMO_TDEA_128, \
+ S390_FEAT_KMO_TDEA_192, \
+ S390_FEAT_KMO_EDEA, \
+ S390_FEAT_KMO_ETDEA_128, \
+ S390_FEAT_KMO_ETDEA_192, \
+ S390_FEAT_KMO_AES_128, \
+ S390_FEAT_KMO_AES_192, \
+ S390_FEAT_KMO_AES_256, \
+ S390_FEAT_KMO_EAES_128, \
+ S390_FEAT_KMO_EAES_192, \
+ S390_FEAT_KMO_EAES_256, \
+ S390_FEAT_PCC_CMAC_DEA, \
+ S390_FEAT_PCC_CMAC_TDEA_128, \
+ S390_FEAT_PCC_CMAC_TDEA_192, \
+ S390_FEAT_PCC_CMAC_ETDEA_128, \
+ S390_FEAT_PCC_CMAC_ETDEA_192, \
+ S390_FEAT_PCC_CMAC_TDEA, \
+ S390_FEAT_PCC_CMAC_AES_128, \
+ S390_FEAT_PCC_CMAC_AES_192, \
+ S390_FEAT_PCC_CMAC_AES_256, \
+ S390_FEAT_PCC_CMAC_EAES_128, \
+ S390_FEAT_PCC_CMAC_EAES_192, \
+ S390_FEAT_PCC_CMAC_EAES_256, \
+ S390_FEAT_PCC_XTS_AES_128, \
+ S390_FEAT_PCC_XTS_AES_256, \
+ S390_FEAT_PCC_XTS_EAES_128, \
+ S390_FEAT_PCC_XTS_EAES_256
+
+#define S390_FEAT_GROUP_MSA_EXT_5 \
+ S390_FEAT_MSA_EXT_5, \
+ S390_FEAT_PPNO_SHA_512_DRNG
+
+/* cpu feature groups */
+static uint16_t group_PLO[] = {
+ S390_FEAT_GROUP_PLO,
+};
+static uint16_t group_TOD_CLOCK_STEERING[] = {
+ S390_FEAT_GROUP_TOD_CLOCK_STEERING,
+};
+static uint16_t group_GEN13_PTFF[] = {
+ S390_FEAT_GROUP_GEN13_PTFF,
+};
+static uint16_t group_MSA[] = {
+ S390_FEAT_GROUP_MSA,
+};
+static uint16_t group_MSA_EXT_1[] = {
+ S390_FEAT_GROUP_MSA_EXT_1,
+};
+static uint16_t group_MSA_EXT_2[] = {
+ S390_FEAT_GROUP_MSA_EXT_2,
+};
+static uint16_t group_MSA_EXT_3[] = {
+ S390_FEAT_GROUP_MSA_EXT_3,
+};
+static uint16_t group_MSA_EXT_4[] = {
+ S390_FEAT_GROUP_MSA_EXT_4,
+};
+static uint16_t group_MSA_EXT_5[] = {
+ S390_FEAT_GROUP_MSA_EXT_5,
+};
+
+/* base features in order of release */
+static uint16_t base_GEN7_GA1[] = {
+ S390_FEAT_GROUP_PLO,
+ S390_FEAT_ESAN3,
+ S390_FEAT_ZARCH,
+};
+#define base_GEN7_GA2 EmptyFeat
+#define base_GEN7_GA3 EmptyFeat
+static uint16_t base_GEN8_GA1[] = {
+ S390_FEAT_DAT_ENH,
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+ S390_FEAT_GROUP_MSA,
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_LONG_DISPLACEMENT_FAST,
+ S390_FEAT_HFP_MADDSUB,
+};
+#define base_GEN8_GA2 EmptyFeat
+#define base_GEN8_GA3 EmptyFeat
+#define base_GEN8_GA4 EmptyFeat
+#define base_GEN8_GA5 EmptyFeat
+static uint16_t base_GEN9_GA1[] = {
+ S390_FEAT_IDTE_SEGMENT,
+ S390_FEAT_ASN_LX_REUSE,
+ S390_FEAT_STFLE,
+ S390_FEAT_SENSE_RUNNING_STATUS,
+ S390_FEAT_EXTENDED_IMMEDIATE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+ S390_FEAT_HFP_UNNORMALIZED_EXT,
+ S390_FEAT_ETF2_ENH,
+ S390_FEAT_STORE_CLOCK_FAST,
+ S390_FEAT_GROUP_TOD_CLOCK_STEERING,
+ S390_FEAT_ETF3_ENH,
+ S390_FEAT_DAT_ENH_2,
+};
+#define base_GEN9_GA2 EmptyFeat
+#define base_GEN9_GA3 EmptyFeat
+static uint16_t base_GEN10_GA1[] = {
+ S390_FEAT_CONDITIONAL_SSKE,
+ S390_FEAT_PARSING_ENH,
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE_2,
+ S390_FEAT_GENERAL_INSTRUCTIONS_EXT,
+ S390_FEAT_EXECUTE_EXT,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_DFP,
+ S390_FEAT_DFP_FAST,
+ S390_FEAT_PFPO,
+};
+#define base_GEN10_GA2 EmptyFeat
+#define base_GEN10_GA3 EmptyFeat
+static uint16_t base_GEN11_GA1[] = {
+ S390_FEAT_NONQ_KEY_SETTING,
+ S390_FEAT_ENHANCED_MONITOR,
+ S390_FEAT_FLOATING_POINT_EXT,
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_STFLE_45,
+ S390_FEAT_CMPSC_ENH,
+ S390_FEAT_INTERLOCKED_ACCESS_2,
+};
+#define base_GEN11_GA2 EmptyFeat
+static uint16_t base_GEN12_GA1[] = {
+ S390_FEAT_DFP_ZONED_CONVERSION,
+ S390_FEAT_STFLE_49,
+ S390_FEAT_LOCAL_TLB_CLEARING,
+};
+#define base_GEN12_GA2 EmptyFeat
+static uint16_t base_GEN13_GA1[] = {
+ S390_FEAT_STFLE_53,
+ S390_FEAT_DFP_PACKED_CONVERSION,
+ S390_FEAT_GROUP_GEN13_PTFF,
+};
+#define base_GEN13_GA2 EmptyFeat
+
+/* full features differing to the base in order of release */
+static uint16_t full_GEN7_GA1[] = {
+ S390_FEAT_SIE_F2,
+ S390_FEAT_SIE_SKEY,
+ S390_FEAT_SIE_GPERE,
+ S390_FEAT_SIE_IB,
+ S390_FEAT_SIE_CEI,
+};
+static uint16_t full_GEN7_GA2[] = {
+ S390_FEAT_EXTENDED_TRANSLATION_2,
+};
+static uint16_t full_GEN7_GA3[] = {
+ S390_FEAT_LONG_DISPLACEMENT,
+ S390_FEAT_SIE_SIIF,
+};
+static uint16_t full_GEN8_GA1[] = {
+ S390_FEAT_SIE_GSLS,
+ S390_FEAT_SIE_64BSCAO,
+};
+#define full_GEN8_GA2 EmptyFeat
+static uint16_t full_GEN8_GA3[] = {
+ S390_FEAT_ASN_LX_REUSE,
+ S390_FEAT_EXTENDED_TRANSLATION_3,
+};
+#define full_GEN8_GA4 EmptyFeat
+#define full_GEN8_GA5 EmptyFeat
+static uint16_t full_GEN9_GA1[] = {
+ S390_FEAT_STORE_HYPERVISOR_INFO,
+ S390_FEAT_GROUP_MSA_EXT_1,
+ S390_FEAT_CMM,
+ S390_FEAT_SIE_CMMA,
+};
+static uint16_t full_GEN9_GA2[] = {
+ S390_FEAT_MOVE_WITH_OPTIONAL_SPEC,
+ S390_FEAT_EXTRACT_CPU_TIME,
+ S390_FEAT_COMPARE_AND_SWAP_AND_STORE,
+ S390_FEAT_FLOATING_POINT_SUPPPORT_ENH,
+ S390_FEAT_DFP,
+};
+static uint16_t full_GEN9_GA3[] = {
+ S390_FEAT_CONDITIONAL_SSKE,
+ S390_FEAT_PFPO,
+};
+static uint16_t full_GEN10_GA1[] = {
+ S390_FEAT_EDAT,
+ S390_FEAT_CONFIGURATION_TOPOLOGY,
+ S390_FEAT_GROUP_MSA_EXT_2,
+ S390_FEAT_ESOP,
+ S390_FEAT_SIE_PFMFI,
+ S390_FEAT_SIE_SIGPIF,
+};
+static uint16_t full_GEN10_GA2[] = {
+ S390_FEAT_SET_PROGRAM_PARAMETERS,
+ S390_FEAT_SIE_IBS,
+};
+static uint16_t full_GEN10_GA3[] = {
+ S390_FEAT_GROUP_MSA_EXT_3,
+};
+static uint16_t full_GEN11_GA1[] = {
+ S390_FEAT_IPTE_RANGE,
+ S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
+ S390_FEAT_GROUP_MSA_EXT_4,
+};
+#define full_GEN11_GA2 EmptyFeat
+static uint16_t full_GEN12_GA1[] = {
+ S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE,
+ S390_FEAT_TRANSACTIONAL_EXE,
+ S390_FEAT_RUNTIME_INSTRUMENTATION,
+ S390_FEAT_EDAT_2,
+};
+static uint16_t full_GEN12_GA2[] = {
+ S390_FEAT_GROUP_MSA_EXT_5,
+};
+static uint16_t full_GEN13_GA1[] = {
+ S390_FEAT_VECTOR,
+};
+#define full_GEN13_GA2 EmptyFeat
+
+/* default features differing to the base in order of release */
+#define default_GEN7_GA1 EmptyFeat
+#define default_GEN7_GA2 EmptyFeat
+#define default_GEN7_GA3 EmptyFeat
+#define default_GEN8_GA1 EmptyFeat
+#define default_GEN8_GA2 EmptyFeat
+#define default_GEN8_GA3 EmptyFeat
+#define default_GEN8_GA4 EmptyFeat
+#define default_GEN8_GA5 EmptyFeat
+static uint16_t default_GEN9_GA1[] = {
+ S390_FEAT_STORE_HYPERVISOR_INFO,
+ S390_FEAT_GROUP_MSA_EXT_1,
+ S390_FEAT_CMM,
+};
+#define default_GEN9_GA2 EmptyFeat
+#define default_GEN9_GA3 EmptyFeat
+static uint16_t default_GEN10_GA1[] = {
+ S390_FEAT_EDAT,
+ S390_FEAT_GROUP_MSA_EXT_2,
+};
+#define default_GEN10_GA2 EmptyFeat
+#define default_GEN10_GA3 EmptyFeat
+static uint16_t default_GEN11_GA1[] = {
+ S390_FEAT_GROUP_MSA_EXT_3,
+ S390_FEAT_IPTE_RANGE,
+ S390_FEAT_ACCESS_EXCEPTION_FS_INDICATION,
+ S390_FEAT_GROUP_MSA_EXT_4,
+};
+#define default_GEN11_GA2 EmptyFeat
+static uint16_t default_GEN12_GA1[] = {
+ S390_FEAT_CONSTRAINT_TRANSACTIONAL_EXE,
+ S390_FEAT_TRANSACTIONAL_EXE,
+ S390_FEAT_RUNTIME_INSTRUMENTATION,
+ S390_FEAT_EDAT_2,
+};
+#define default_GEN12_GA2 EmptyFeat
+static uint16_t default_GEN13_GA1[] = {
+ S390_FEAT_GROUP_MSA_EXT_5,
+ S390_FEAT_VECTOR,
+};
+#define default_GEN13_GA2 EmptyFeat
+
+/****** END FEATURE DEFS ******/
+
+#define _YEARS "2016"
+#define _NAME_H "TARGET_S390X_GEN_FEATURES_H"
+
+#define CPU_FEAT_INITIALIZER(_name) \
+ { \
+ .name = "S390_FEAT_LIST_" #_name, \
+ .base_bits = \
+ { .data = base_##_name, \
+ .len = ARRAY_SIZE(base_##_name) }, \
+ .default_bits = \
+ { .data = default_##_name, \
+ .len = ARRAY_SIZE(default_##_name) }, \
+ .full_bits = \
+ { .data = full_##_name, \
+ .len = ARRAY_SIZE(full_##_name) }, \
+ }
+
+typedef struct BitSpec {
+ uint16_t *data;
+ uint32_t len;
+} BitSpec;
+
+typedef struct {
+ const char *name;
+ BitSpec base_bits;
+ BitSpec default_bits;
+ BitSpec full_bits;
+} CpuFeatDefSpec;
+
+static uint16_t EmptyFeat[] = {};
+
+/*******************************
+ * processor GA series
+ *******************************/
+static CpuFeatDefSpec CpuFeatDef[] = {
+ CPU_FEAT_INITIALIZER(GEN7_GA1),
+ CPU_FEAT_INITIALIZER(GEN7_GA2),
+ CPU_FEAT_INITIALIZER(GEN7_GA3),
+ CPU_FEAT_INITIALIZER(GEN8_GA1),
+ CPU_FEAT_INITIALIZER(GEN8_GA2),
+ CPU_FEAT_INITIALIZER(GEN8_GA3),
+ CPU_FEAT_INITIALIZER(GEN8_GA4),
+ CPU_FEAT_INITIALIZER(GEN8_GA5),
+ CPU_FEAT_INITIALIZER(GEN9_GA1),
+ CPU_FEAT_INITIALIZER(GEN9_GA2),
+ CPU_FEAT_INITIALIZER(GEN9_GA3),
+ CPU_FEAT_INITIALIZER(GEN10_GA1),
+ CPU_FEAT_INITIALIZER(GEN10_GA2),
+ CPU_FEAT_INITIALIZER(GEN10_GA3),
+ CPU_FEAT_INITIALIZER(GEN11_GA1),
+ CPU_FEAT_INITIALIZER(GEN11_GA2),
+ CPU_FEAT_INITIALIZER(GEN12_GA1),
+ CPU_FEAT_INITIALIZER(GEN12_GA2),
+ CPU_FEAT_INITIALIZER(GEN13_GA1),
+ CPU_FEAT_INITIALIZER(GEN13_GA2),
+};
+
+#define FEAT_GROUP_INITIALIZER(_name) \
+ { \
+ .name = "S390_FEAT_GROUP_LIST_" #_name, \
+ .bits = \
+ { .data = group_##_name, \
+ .len = ARRAY_SIZE(group_##_name) }, \
+ }
+
+typedef struct {
+ const char *name;
+ BitSpec bits;
+} FeatGroupDefSpec;
+
+/*******************************
+ * feature groups
+ *******************************/
+static FeatGroupDefSpec FeatGroupDef[] = {
+ FEAT_GROUP_INITIALIZER(PLO),
+ FEAT_GROUP_INITIALIZER(TOD_CLOCK_STEERING),
+ FEAT_GROUP_INITIALIZER(GEN13_PTFF),
+ FEAT_GROUP_INITIALIZER(MSA),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_1),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_2),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_3),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_4),
+ FEAT_GROUP_INITIALIZER(MSA_EXT_5),
+};
+
+static void set_bits(uint64_t list[], BitSpec bits)
+{
+ uint32_t i;
+
+ for (i = 0; i < bits.len; i++) {
+ list[bits.data[i] / 64] |= 1ULL << (bits.data[i] % 64);
+ }
+}
+
+static void print_feature_defs(void)
+{
+ uint64_t base_feat[S390_FEAT_MAX / 64 + 1] = {};
+ uint64_t default_feat[S390_FEAT_MAX / 64 + 1] = {};
+ uint64_t full_feat[S390_FEAT_MAX / 64 + 1] = {};
+ int i, j;
+
+ printf("\n/* CPU model feature list data */\n");
+
+ for (i = 0; i < ARRAY_SIZE(CpuFeatDef); i++) {
+ set_bits(base_feat, CpuFeatDef[i].base_bits);
+ /* add the base to the default features */
+ set_bits(default_feat, CpuFeatDef[i].base_bits);
+ set_bits(default_feat, CpuFeatDef[i].default_bits);
+ /* add the base to the full features */
+ set_bits(full_feat, CpuFeatDef[i].base_bits);
+ set_bits(full_feat, CpuFeatDef[i].full_bits);
+
+ printf("#define %s_BASE\t", CpuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(base_feat); j++) {
+ printf("0x%016"PRIx64"ULL", base_feat[j]);
+ if (j < ARRAY_SIZE(base_feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ printf("#define %s_DEFAULT\t", CpuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(default_feat); j++) {
+ printf("0x%016"PRIx64"ULL", default_feat[j]);
+ if (j < ARRAY_SIZE(default_feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ printf("#define %s_FULL\t\t", CpuFeatDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(full_feat); j++) {
+ printf("0x%016"PRIx64"ULL", full_feat[j]);
+ if (j < ARRAY_SIZE(full_feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ }
+}
+
+static void print_feature_group_defs(void)
+{
+ int i, j;
+
+ printf("\n/* CPU feature group list data */\n");
+
+ for (i = 0; i < ARRAY_SIZE(FeatGroupDef); i++) {
+ uint64_t feat[S390_FEAT_MAX / 64 + 1] = {};
+
+ set_bits(feat, FeatGroupDef[i].bits);
+ printf("#define %s\t", FeatGroupDef[i].name);
+ for (j = 0; j < ARRAY_SIZE(feat); j++) {
+ printf("0x%016"PRIx64"ULL", feat[j]);
+ if (j < ARRAY_SIZE(feat) - 1) {
+ printf(",");
+ } else {
+ printf("\n");
+ }
+ }
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ printf("/*\n"
+ " * AUTOMATICALLY GENERATED, DO NOT MODIFY HERE, EDIT\n"
+ " * SOURCE FILE \"%s\" INSTEAD.\n"
+ " *\n"
+ " * Copyright %s IBM Corp.\n"
+ " *\n"
+ " * This work is licensed under the terms of the GNU GPL, "
+ "version 2 or (at\n * your option) any later version. See "
+ "the COPYING file in the top-level\n * directory.\n"
+ " */\n\n"
+ "#ifndef %s\n#define %s\n", __FILE__, _YEARS, _NAME_H, _NAME_H);
+ print_feature_defs();
+ print_feature_group_defs();
+ printf("\n#endif\n");
+ return 0;
+}
diff --git a/target/s390x/helper.c b/target/s390x/helper.c
new file mode 100644
index 0000000000..68bd2f9784
--- /dev/null
+++ b/target/s390x/helper.c
@@ -0,0 +1,721 @@
+/*
+ * S/390 helpers
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2011 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/timer.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "hw/s390x/ioinst.h"
+#ifndef CONFIG_USER_ONLY
+#include "sysemu/sysemu.h"
+#endif
+
+//#define DEBUG_S390
+//#define DEBUG_S390_STDOUT
+
+#ifdef DEBUG_S390
+#ifdef DEBUG_S390_STDOUT
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); \
+ if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
+#endif
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+
+#ifndef CONFIG_USER_ONLY
+void s390x_tod_timer(void *opaque)
+{
+ S390CPU *cpu = opaque;
+ CPUS390XState *env = &cpu->env;
+
+ env->pending_int |= INTERRUPT_TOD;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+void s390x_cpu_timer(void *opaque)
+{
+ S390CPU *cpu = opaque;
+ CPUS390XState *env = &cpu->env;
+
+ env->pending_int |= INTERRUPT_CPUTIMER;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+#endif
+
+S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
+{
+ static bool features_parsed;
+ char *name, *features;
+ const char *typename;
+ ObjectClass *oc;
+ CPUClass *cc;
+
+ name = g_strdup(cpu_model);
+ features = strchr(name, ',');
+ if (features) {
+ features[0] = 0;
+ features++;
+ }
+
+ oc = cpu_class_by_name(TYPE_S390_CPU, name);
+ if (!oc) {
+ error_setg(errp, "Unknown CPU definition \'%s\'", name);
+ g_free(name);
+ return NULL;
+ }
+ typename = object_class_get_name(oc);
+
+ if (!features_parsed) {
+ features_parsed = true;
+ cc = CPU_CLASS(oc);
+ cc->parse_features(typename, features, errp);
+ }
+ g_free(name);
+
+ if (*errp) {
+ return NULL;
+ }
+ return S390_CPU(CPU(object_new(typename)));
+}
+
+S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
+{
+ S390CPU *cpu;
+ Error *err = NULL;
+
+ cpu = cpu_s390x_create(cpu_model, &err);
+ if (err != NULL) {
+ goto out;
+ }
+
+ object_property_set_int(OBJECT(cpu), id, "id", &err);
+ if (err != NULL) {
+ goto out;
+ }
+ object_property_set_bool(OBJECT(cpu), true, "realized", &err);
+
+out:
+ if (err) {
+ error_propagate(errp, err);
+ object_unref(OBJECT(cpu));
+ cpu = NULL;
+ }
+ return cpu;
+}
+
+S390CPU *cpu_s390x_init(const char *cpu_model)
+{
+ Error *err = NULL;
+ S390CPU *cpu;
+ /* Use to track CPU ID for linux-user only */
+ static int64_t next_cpu_id;
+
+ cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
+ if (err) {
+ error_report_err(err);
+ }
+ return cpu;
+}
+
+#if defined(CONFIG_USER_ONLY)
+
+void s390_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int rw, int mmu_idx)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ cs->exception_index = EXCP_PGM;
+ cpu->env.int_pgm_code = PGM_ADDRESSING;
+ /* On real machines this value is dropped into LowMem. Since this
+ is userland, simply put this someplace that cpu_loop can find it. */
+ cpu->env.__excp_addr = address;
+ return 1;
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+/* Ensure to exit the TB after this call! */
+void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ cs->exception_index = EXCP_PGM;
+ env->int_pgm_code = code;
+ env->int_pgm_ilen = ilen;
+}
+
+int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
+ int rw, int mmu_idx)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
+ target_ulong vaddr, raddr;
+ int prot;
+
+ DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
+ __func__, orig_vaddr, rw, mmu_idx);
+
+ orig_vaddr &= TARGET_PAGE_MASK;
+ vaddr = orig_vaddr;
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+
+ if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
+ /* Translation ended in exception */
+ return 1;
+ }
+
+ /* check out of RAM access */
+ if (raddr > ram_size) {
+ DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
+ (uint64_t)raddr, (uint64_t)ram_size);
+ trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
+ return 1;
+ }
+
+ qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
+ __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
+
+ tlb_set_page(cs, orig_vaddr, raddr, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+
+ return 0;
+}
+
+hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ target_ulong raddr;
+ int prot;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ vaddr &= 0x7fffffff;
+ }
+
+ if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
+ return -1;
+ }
+ return raddr;
+}
+
+hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
+{
+ hwaddr phys_addr;
+ target_ulong page;
+
+ page = vaddr & TARGET_PAGE_MASK;
+ phys_addr = cpu_get_phys_page_debug(cs, page);
+ phys_addr += (vaddr & ~TARGET_PAGE_MASK);
+
+ return phys_addr;
+}
+
+void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
+{
+ uint64_t old_mask = env->psw.mask;
+
+ env->psw.addr = addr;
+ env->psw.mask = mask;
+ if (tcg_enabled()) {
+ env->cc_op = (mask >> 44) & 3;
+ }
+
+ if ((old_mask ^ mask) & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
+ }
+
+ if (mask & PSW_MASK_WAIT) {
+ S390CPU *cpu = s390_env_get_cpu(env);
+ if (s390_cpu_halt(cpu) == 0) {
+#ifndef CONFIG_USER_ONLY
+ qemu_system_shutdown_request();
+#endif
+ }
+ }
+}
+
+static uint64_t get_psw_mask(CPUS390XState *env)
+{
+ uint64_t r = env->psw.mask;
+
+ if (tcg_enabled()) {
+ env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
+ env->cc_vr);
+
+ r &= ~PSW_MASK_CC;
+ assert(!(env->cc_op & ~3));
+ r |= (uint64_t)env->cc_op << 44;
+ }
+
+ return r;
+}
+
+static LowCore *cpu_map_lowcore(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ LowCore *lowcore;
+ hwaddr len = sizeof(LowCore);
+
+ lowcore = cpu_physical_memory_map(env->psa, &len, 1);
+
+ if (len < sizeof(LowCore)) {
+ cpu_abort(CPU(cpu), "Could not map lowcore\n");
+ }
+
+ return lowcore;
+}
+
+static void cpu_unmap_lowcore(LowCore *lowcore)
+{
+ cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
+}
+
+void do_restart_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->restart_new_psw.mask);
+ addr = be64_to_cpu(lowcore->restart_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ load_psw(env, mask, addr);
+}
+
+static void do_program_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ int ilen = env->int_pgm_ilen;
+
+ switch (ilen) {
+ case ILEN_LATER:
+ ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
+ break;
+ case ILEN_LATER_INC:
+ ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
+ env->psw.addr += ilen;
+ break;
+ default:
+ assert(ilen == 2 || ilen == 4 || ilen == 6);
+ }
+
+ qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
+ __func__, env->int_pgm_code, ilen);
+
+ lowcore = cpu_map_lowcore(env);
+
+ /* Signal PER events with the exception. */
+ if (env->per_perc_atmid) {
+ env->int_pgm_code |= PGM_PER;
+ lowcore->per_address = cpu_to_be64(env->per_address);
+ lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
+ env->per_perc_atmid = 0;
+ }
+
+ lowcore->pgm_ilen = cpu_to_be16(ilen);
+ lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
+ lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->program_new_psw.mask);
+ addr = be64_to_cpu(lowcore->program_new_psw.addr);
+ lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
+
+ cpu_unmap_lowcore(lowcore);
+
+ DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
+ env->int_pgm_code, ilen, env->psw.mask,
+ env->psw.addr);
+
+ load_psw(env, mask, addr);
+}
+
+static void do_svc_interrupt(CPUS390XState *env)
+{
+ uint64_t mask, addr;
+ LowCore *lowcore;
+
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->svc_code = cpu_to_be16(env->int_svc_code);
+ lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
+ lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
+ mask = be64_to_cpu(lowcore->svc_new_psw.mask);
+ addr = be64_to_cpu(lowcore->svc_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ load_psw(env, mask, addr);
+
+ /* When a PER event is pending, the PER exception has to happen
+ immediately after the SERVICE CALL one. */
+ if (env->per_perc_atmid) {
+ env->int_pgm_code = PGM_PER;
+ env->int_pgm_ilen = env->int_svc_ilen;
+ do_program_interrupt(env);
+ }
+}
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+static void do_ext_interrupt(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ ExtQueue *q;
+
+ if (!(env->psw.mask & PSW_MASK_EXT)) {
+ cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
+ }
+
+ if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
+ cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
+ }
+
+ q = &env->ext_queue[env->ext_index];
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->ext_int_code = cpu_to_be16(q->code);
+ lowcore->ext_params = cpu_to_be32(q->param);
+ lowcore->ext_params2 = cpu_to_be64(q->param64);
+ lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
+ lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
+ mask = be64_to_cpu(lowcore->external_new_psw.mask);
+ addr = be64_to_cpu(lowcore->external_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ env->ext_index--;
+ if (env->ext_index == -1) {
+ env->pending_int &= ~INTERRUPT_EXT;
+ }
+
+ DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+ env->psw.mask, env->psw.addr);
+
+ load_psw(env, mask, addr);
+}
+
+static void do_io_interrupt(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ LowCore *lowcore;
+ IOIntQueue *q;
+ uint8_t isc;
+ int disable = 1;
+ int found = 0;
+
+ if (!(env->psw.mask & PSW_MASK_IO)) {
+ cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
+ }
+
+ for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
+ uint64_t isc_bits;
+
+ if (env->io_index[isc] < 0) {
+ continue;
+ }
+ if (env->io_index[isc] >= MAX_IO_QUEUE) {
+ cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
+ isc, env->io_index[isc]);
+ }
+
+ q = &env->io_queue[env->io_index[isc]][isc];
+ isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
+ if (!(env->cregs[6] & isc_bits)) {
+ disable = 0;
+ continue;
+ }
+ if (!found) {
+ uint64_t mask, addr;
+
+ found = 1;
+ lowcore = cpu_map_lowcore(env);
+
+ lowcore->subchannel_id = cpu_to_be16(q->id);
+ lowcore->subchannel_nr = cpu_to_be16(q->nr);
+ lowcore->io_int_parm = cpu_to_be32(q->parm);
+ lowcore->io_int_word = cpu_to_be32(q->word);
+ lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->io_new_psw.mask);
+ addr = be64_to_cpu(lowcore->io_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ env->io_index[isc]--;
+
+ DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+ env->psw.mask, env->psw.addr);
+ load_psw(env, mask, addr);
+ }
+ if (env->io_index[isc] >= 0) {
+ disable = 0;
+ }
+ continue;
+ }
+
+ if (disable) {
+ env->pending_int &= ~INTERRUPT_IO;
+ }
+
+}
+
+static void do_mchk_interrupt(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ uint64_t mask, addr;
+ LowCore *lowcore;
+ MchkQueue *q;
+ int i;
+
+ if (!(env->psw.mask & PSW_MASK_MCHECK)) {
+ cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
+ }
+
+ if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
+ cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
+ }
+
+ q = &env->mchk_queue[env->mchk_index];
+
+ if (q->type != 1) {
+ /* Don't know how to handle this... */
+ cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
+ }
+ if (!(env->cregs[14] & (1 << 28))) {
+ /* CRW machine checks disabled */
+ return;
+ }
+
+ lowcore = cpu_map_lowcore(env);
+
+ for (i = 0; i < 16; i++) {
+ lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
+ lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
+ lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
+ lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
+ }
+ lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
+ lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
+ lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
+ lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
+ lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
+ lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
+ lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
+
+ lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
+ lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
+ lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
+ lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
+ mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
+ addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
+
+ cpu_unmap_lowcore(lowcore);
+
+ env->mchk_index--;
+ if (env->mchk_index == -1) {
+ env->pending_int &= ~INTERRUPT_MCHK;
+ }
+
+ DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
+ env->psw.mask, env->psw.addr);
+
+ load_psw(env, mask, addr);
+}
+
+void s390_cpu_do_interrupt(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
+ __func__, cs->exception_index, env->psw.addr);
+
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+ /* handle machine checks */
+ if ((env->psw.mask & PSW_MASK_MCHECK) &&
+ (cs->exception_index == -1)) {
+ if (env->pending_int & INTERRUPT_MCHK) {
+ cs->exception_index = EXCP_MCHK;
+ }
+ }
+ /* handle external interrupts */
+ if ((env->psw.mask & PSW_MASK_EXT) &&
+ cs->exception_index == -1) {
+ if (env->pending_int & INTERRUPT_EXT) {
+ /* code is already in env */
+ cs->exception_index = EXCP_EXT;
+ } else if (env->pending_int & INTERRUPT_TOD) {
+ cpu_inject_ext(cpu, 0x1004, 0, 0);
+ cs->exception_index = EXCP_EXT;
+ env->pending_int &= ~INTERRUPT_EXT;
+ env->pending_int &= ~INTERRUPT_TOD;
+ } else if (env->pending_int & INTERRUPT_CPUTIMER) {
+ cpu_inject_ext(cpu, 0x1005, 0, 0);
+ cs->exception_index = EXCP_EXT;
+ env->pending_int &= ~INTERRUPT_EXT;
+ env->pending_int &= ~INTERRUPT_TOD;
+ }
+ }
+ /* handle I/O interrupts */
+ if ((env->psw.mask & PSW_MASK_IO) &&
+ (cs->exception_index == -1)) {
+ if (env->pending_int & INTERRUPT_IO) {
+ cs->exception_index = EXCP_IO;
+ }
+ }
+
+ switch (cs->exception_index) {
+ case EXCP_PGM:
+ do_program_interrupt(env);
+ break;
+ case EXCP_SVC:
+ do_svc_interrupt(env);
+ break;
+ case EXCP_EXT:
+ do_ext_interrupt(env);
+ break;
+ case EXCP_IO:
+ do_io_interrupt(env);
+ break;
+ case EXCP_MCHK:
+ do_mchk_interrupt(env);
+ break;
+ }
+ cs->exception_index = -1;
+
+ if (!env->pending_int) {
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ }
+}
+
+bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ if (env->psw.mask & PSW_MASK_EXT) {
+ s390_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
+
+void s390_cpu_recompute_watchpoints(CPUState *cs)
+{
+ const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+
+ /* We are called when the watchpoints have changed. First
+ remove them all. */
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+
+ /* Return if PER is not enabled */
+ if (!(env->psw.mask & PSW_MASK_PER)) {
+ return;
+ }
+
+ /* Return if storage-alteration event is not enabled. */
+ if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
+ return;
+ }
+
+ if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
+ /* We can't create a watchoint spanning the whole memory range, so
+ split it in two parts. */
+ cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
+ cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
+ } else if (env->cregs[10] > env->cregs[11]) {
+ /* The address range loops, create two watchpoints. */
+ cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
+ wp_flags, NULL);
+ cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
+
+ } else {
+ /* Default case, create a single watchpoint. */
+ cpu_watchpoint_insert(cs, env->cregs[10],
+ env->cregs[11] - env->cregs[10] + 1,
+ wp_flags, NULL);
+ }
+}
+
+void s390x_cpu_debug_excp_handler(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ CPUWatchpoint *wp_hit = cs->watchpoint_hit;
+
+ if (wp_hit && wp_hit->flags & BP_CPU) {
+ /* FIXME: When the storage-alteration-space control bit is set,
+ the exception should only be triggered if the memory access
+ is done using an address space with the storage-alteration-event
+ bit set. We have no way to detect that with the current
+ watchpoint code. */
+ cs->watchpoint_hit = NULL;
+
+ env->per_address = env->psw.addr;
+ env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
+ /* FIXME: We currently no way to detect the address space used
+ to trigger the watchpoint. For now just consider it is the
+ current default ASC. This turn to be true except when MVCP
+ and MVCS instrutions are not used. */
+ env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
+
+ /* Remove all watchpoints to re-execute the code. A PER exception
+ will be triggered, it will call load_psw which will recompute
+ the watchpoints. */
+ cpu_watchpoint_remove_all(cs, BP_CPU);
+ cpu_loop_exit_noexc(cs);
+ }
+}
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/s390x/helper.h b/target/s390x/helper.h
new file mode 100644
index 0000000000..207a6e7d1c
--- /dev/null
+++ b/target/s390x/helper.h
@@ -0,0 +1,133 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_FLAGS_4(nc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(oc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(xc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(mvc, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(clc, TCG_CALL_NO_WG, i32, env, i32, i64, i64)
+DEF_HELPER_3(mvcl, i32, env, i32, i32)
+DEF_HELPER_FLAGS_4(clm, TCG_CALL_NO_WG, i32, env, i32, i32, i64)
+DEF_HELPER_FLAGS_3(divs32, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_3(divu32, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(divs64, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_4(divu64, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_4(srst, i64, env, i64, i64, i64)
+DEF_HELPER_4(clst, i64, env, i64, i64, i64)
+DEF_HELPER_4(mvpg, void, env, i64, i64, i64)
+DEF_HELPER_4(mvst, i64, env, i64, i64, i64)
+DEF_HELPER_5(ex, i32, env, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_4(stam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(lam, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_4(mvcle, i32, env, i32, i64, i32)
+DEF_HELPER_4(clcle, i32, env, i32, i64, i32)
+DEF_HELPER_3(cegb, i64, env, s64, i32)
+DEF_HELPER_3(cdgb, i64, env, s64, i32)
+DEF_HELPER_3(cxgb, i64, env, s64, i32)
+DEF_HELPER_3(celgb, i64, env, i64, i32)
+DEF_HELPER_3(cdlgb, i64, env, i64, i32)
+DEF_HELPER_3(cxlgb, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(aeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(adb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(axb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(seb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(sdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(sxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(deb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(ddb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(dxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(meeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mdeb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(mdb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_5(mxb, TCG_CALL_NO_WG, i64, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mxdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_2(ldeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(ldxb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_2(lxdb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lxeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(ledb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(lexb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(ceb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(cdb, TCG_CALL_NO_WG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_5(cxb, TCG_CALL_NO_WG_SE, i32, env, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(cgeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(cgdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(cgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(cfeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(cfdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(cfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(clgeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(clgdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(clgxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(clfeb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(clfdb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(clfxb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_3(fieb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_3(fidb, TCG_CALL_NO_WG, i64, env, i64, i32)
+DEF_HELPER_FLAGS_4(fixb, TCG_CALL_NO_WG, i64, env, i64, i64, i32)
+DEF_HELPER_FLAGS_4(maeb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(madb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(mseb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(msdb, TCG_CALL_NO_WG, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_3(tceb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_3(tcdb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64)
+DEF_HELPER_FLAGS_4(tcxb, TCG_CALL_NO_RWG_SE, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_2(sqeb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(sqdb, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(sqxb, TCG_CALL_NO_WG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_1(cvd, TCG_CALL_NO_RWG_SE, i64, s32)
+DEF_HELPER_FLAGS_4(unpk, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_FLAGS_4(tr, TCG_CALL_NO_WG, void, env, i32, i64, i64)
+DEF_HELPER_4(tre, i64, env, i64, i64, i64)
+DEF_HELPER_4(trt, i32, env, i32, i64, i64)
+DEF_HELPER_4(cksm, i64, env, i64, i64, i64)
+DEF_HELPER_FLAGS_5(calc_cc, TCG_CALL_NO_RWG_SE, i32, env, i32, i64, i64, i64)
+DEF_HELPER_FLAGS_2(sfpc, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(sfas, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_1(popcnt, TCG_CALL_NO_RWG_SE, i64, i64)
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_3(servc, i32, env, i64, i64)
+DEF_HELPER_4(diag, void, env, i32, i32, i32)
+DEF_HELPER_3(load_psw, noreturn, env, i64, i64)
+DEF_HELPER_FLAGS_2(spx, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stck, TCG_CALL_NO_RWG_SE, i64, env)
+DEF_HELPER_FLAGS_2(sckc, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stckc, TCG_CALL_NO_RWG, i64, env)
+DEF_HELPER_FLAGS_2(spt, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_1(stpt, TCG_CALL_NO_RWG, i64, env)
+DEF_HELPER_4(stsi, i32, env, i64, i64, i64)
+DEF_HELPER_FLAGS_4(lctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(lctlg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(stctl, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_4(stctg, TCG_CALL_NO_WG, void, env, i32, i64, i32)
+DEF_HELPER_FLAGS_2(tprot, TCG_CALL_NO_RWG, i32, i64, i64)
+DEF_HELPER_FLAGS_2(iske, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_3(sske, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(rrbe, TCG_CALL_NO_RWG, i32, env, i64)
+DEF_HELPER_3(csp, i32, env, i32, i64)
+DEF_HELPER_4(mvcs, i32, env, i64, i64, i64)
+DEF_HELPER_4(mvcp, i32, env, i64, i64, i64)
+DEF_HELPER_4(sigp, i32, env, i64, i32, i64)
+DEF_HELPER_FLAGS_2(sacf, TCG_CALL_NO_WG, void, env, i64)
+DEF_HELPER_FLAGS_3(ipte, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_1(ptlb, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_2(lra, i64, env, i64)
+DEF_HELPER_FLAGS_2(lura, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_2(lurag, TCG_CALL_NO_WG, i64, env, i64)
+DEF_HELPER_FLAGS_3(stura, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_FLAGS_3(sturg, TCG_CALL_NO_WG, void, env, i64, i64)
+DEF_HELPER_1(per_check_exception, void, env)
+DEF_HELPER_FLAGS_3(per_branch, TCG_CALL_NO_RWG, void, env, i64, i64)
+DEF_HELPER_FLAGS_2(per_ifetch, TCG_CALL_NO_RWG, void, env, i64)
+
+DEF_HELPER_2(xsch, void, env, i64)
+DEF_HELPER_2(csch, void, env, i64)
+DEF_HELPER_2(hsch, void, env, i64)
+DEF_HELPER_3(msch, void, env, i64, i64)
+DEF_HELPER_2(rchp, void, env, i64)
+DEF_HELPER_2(rsch, void, env, i64)
+DEF_HELPER_3(ssch, void, env, i64, i64)
+DEF_HELPER_3(stsch, void, env, i64, i64)
+DEF_HELPER_3(tsch, void, env, i64, i64)
+DEF_HELPER_2(chsc, void, env, i64)
+#endif
diff --git a/target/s390x/insn-data.def b/target/s390x/insn-data.def
new file mode 100644
index 0000000000..075ff597c3
--- /dev/null
+++ b/target/s390x/insn-data.def
@@ -0,0 +1,931 @@
+/*
+ * Arguments to the opcode prototypes
+ *
+ * C(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC)
+ * D(OPC, NAME, FMT, FAC, I1, I2, P, W, OP, CC, DATA)
+ *
+ * OPC = (op << 8) | op2 where op is the major, op2 the minor opcode
+ * NAME = name of the opcode, used internally
+ * FMT = format of the opcode (defined in insn-format.def)
+ * FAC = facility the opcode is available in (defined in DisasFacility)
+ * I1 = func in1_xx fills o->in1
+ * I2 = func in2_xx fills o->in2
+ * P = func prep_xx initializes o->*out*
+ * W = func wout_xx writes o->*out* somewhere
+ * OP = func op_xx does the bulk of the operation
+ * CC = func cout_xx defines how cc should get set
+ * DATA = immediate argument to op_xx function
+ *
+ * The helpers get called in order: I1, I2, P, OP, W, CC
+ */
+
+/* ADD */
+ C(0x1a00, AR, RR_a, Z, r1, r2, new, r1_32, add, adds32)
+ C(0xb9f8, ARK, RRF_a, DO, r2, r3, new, r1_32, add, adds32)
+ C(0x5a00, A, RX_a, Z, r1, m2_32s, new, r1_32, add, adds32)
+ C(0xe35a, AY, RXY_a, LD, r1, m2_32s, new, r1_32, add, adds32)
+ C(0xb908, AGR, RRE, Z, r1, r2, r1, 0, add, adds64)
+ C(0xb918, AGFR, RRE, Z, r1, r2_32s, r1, 0, add, adds64)
+ C(0xb9e8, AGRK, RRF_a, DO, r2, r3, r1, 0, add, adds64)
+ C(0xe308, AG, RXY_a, Z, r1, m2_64, r1, 0, add, adds64)
+ C(0xe318, AGF, RXY_a, Z, r1, m2_32s, r1, 0, add, adds64)
+ C(0xb30a, AEBR, RRE, Z, e1, e2, new, e1, aeb, f32)
+ C(0xb31a, ADBR, RRE, Z, f1_o, f2_o, f1, 0, adb, f64)
+ C(0xb34a, AXBR, RRE, Z, 0, x2_o, x1, 0, axb, f128)
+ C(0xed0a, AEB, RXE, Z, e1, m2_32u, new, e1, aeb, f32)
+ C(0xed1a, ADB, RXE, Z, f1_o, m2_64, f1, 0, adb, f64)
+/* ADD HIGH */
+ C(0xb9c8, AHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, adds32)
+ C(0xb9d8, AHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, adds32)
+/* ADD IMMEDIATE */
+ C(0xc209, AFI, RIL_a, EI, r1, i2, new, r1_32, add, adds32)
+ C(0xeb6a, ASI, SIY, GIE, m1_32s, i2, new, m1_32, add, adds32)
+ C(0xecd8, AHIK, RIE_d, DO, r3, i2, new, r1_32, add, adds32)
+ C(0xc208, AGFI, RIL_a, EI, r1, i2, r1, 0, add, adds64)
+ C(0xeb7a, AGSI, SIY, GIE, m1_64, i2, new, m1_64, add, adds64)
+ C(0xecd9, AGHIK, RIE_d, DO, r3, i2, r1, 0, add, adds64)
+/* ADD IMMEDIATE HIGH */
+ C(0xcc08, AIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, adds32)
+/* ADD HALFWORD */
+ C(0x4a00, AH, RX_a, Z, r1, m2_16s, new, r1_32, add, adds32)
+ C(0xe37a, AHY, RXY_a, LD, r1, m2_16s, new, r1_32, add, adds32)
+/* ADD HALFWORD IMMEDIATE */
+ C(0xa70a, AHI, RI_a, Z, r1, i2, new, r1_32, add, adds32)
+ C(0xa70b, AGHI, RI_a, Z, r1, i2, r1, 0, add, adds64)
+
+/* ADD LOGICAL */
+ C(0x1e00, ALR, RR_a, Z, r1, r2, new, r1_32, add, addu32)
+ C(0xb9fa, ALRK, RRF_a, DO, r2, r3, new, r1_32, add, addu32)
+ C(0x5e00, AL, RX_a, Z, r1, m2_32u, new, r1_32, add, addu32)
+ C(0xe35e, ALY, RXY_a, LD, r1, m2_32u, new, r1_32, add, addu32)
+ C(0xb90a, ALGR, RRE, Z, r1, r2, r1, 0, add, addu64)
+ C(0xb91a, ALGFR, RRE, Z, r1, r2_32u, r1, 0, add, addu64)
+ C(0xb9ea, ALGRK, RRF_a, DO, r2, r3, r1, 0, add, addu64)
+ C(0xe30a, ALG, RXY_a, Z, r1, m2_64, r1, 0, add, addu64)
+ C(0xe31a, ALGF, RXY_a, Z, r1, m2_32u, r1, 0, add, addu64)
+/* ADD LOGICAL HIGH */
+ C(0xb9ca, ALHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, add, addu32)
+ C(0xb9da, ALHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, add, addu32)
+/* ADD LOGICAL IMMEDIATE */
+ C(0xc20b, ALFI, RIL_a, EI, r1, i2_32u, new, r1_32, add, addu32)
+ C(0xc20a, ALGFI, RIL_a, EI, r1, i2_32u, r1, 0, add, addu64)
+/* ADD LOGICAL WITH SIGNED IMMEDIATE */
+ C(0xeb6e, ALSI, SIY, GIE, m1_32u, i2, new, m1_32, add, addu32)
+ C(0xecda, ALHSIK, RIE_d, DO, r3, i2, new, r1_32, add, addu32)
+ C(0xeb7e, ALGSI, SIY, GIE, m1_64, i2, new, m1_64, add, addu64)
+ C(0xecdb, ALGHSIK, RIE_d, DO, r3, i2, r1, 0, add, addu64)
+/* ADD LOGICAL WITH SIGNED IMMEDIATE HIGH */
+ C(0xcc0a, ALSIH, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, addu32)
+ C(0xcc0b, ALSIHN, RIL_a, HW, r1_sr32, i2, new, r1_32h, add, 0)
+/* ADD LOGICAL WITH CARRY */
+ C(0xb998, ALCR, RRE, Z, r1, r2, new, r1_32, addc, addc32)
+ C(0xb988, ALCGR, RRE, Z, r1, r2, r1, 0, addc, addc64)
+ C(0xe398, ALC, RXY_a, Z, r1, m2_32u, new, r1_32, addc, addc32)
+ C(0xe388, ALCG, RXY_a, Z, r1, m2_64, r1, 0, addc, addc64)
+
+/* AND */
+ C(0x1400, NR, RR_a, Z, r1, r2, new, r1_32, and, nz32)
+ C(0xb9f4, NRK, RRF_a, DO, r2, r3, new, r1_32, and, nz32)
+ C(0x5400, N, RX_a, Z, r1, m2_32s, new, r1_32, and, nz32)
+ C(0xe354, NY, RXY_a, LD, r1, m2_32s, new, r1_32, and, nz32)
+ C(0xb980, NGR, RRE, Z, r1, r2, r1, 0, and, nz64)
+ C(0xb9e4, NGRK, RRF_a, DO, r2, r3, r1, 0, and, nz64)
+ C(0xe380, NG, RXY_a, Z, r1, m2_64, r1, 0, and, nz64)
+ C(0xd400, NC, SS_a, Z, la1, a2, 0, 0, nc, 0)
+/* AND IMMEDIATE */
+ D(0xc00a, NIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2020)
+ D(0xc00b, NILF, RIL_a, EI, r1_o, i2_32u, r1, 0, andi, 0, 0x2000)
+ D(0xa504, NIHH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1030)
+ D(0xa505, NIHL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1020)
+ D(0xa506, NILH, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1010)
+ D(0xa507, NILL, RI_a, Z, r1_o, i2_16u, r1, 0, andi, 0, 0x1000)
+ C(0x9400, NI, SI, Z, m1_8u, i2_8u, new, m1_8, and, nz64)
+ C(0xeb54, NIY, SIY, LD, m1_8u, i2_8u, new, m1_8, and, nz64)
+
+/* BRANCH AND SAVE */
+ C(0x0d00, BASR, RR_a, Z, 0, r2_nz, r1, 0, bas, 0)
+ C(0x4d00, BAS, RX_a, Z, 0, a2, r1, 0, bas, 0)
+/* BRANCH RELATIVE AND SAVE */
+ C(0xa705, BRAS, RI_b, Z, 0, 0, r1, 0, basi, 0)
+ C(0xc005, BRASL, RIL_b, Z, 0, 0, r1, 0, basi, 0)
+/* BRANCH ON CONDITION */
+ C(0x0700, BCR, RR_b, Z, 0, r2_nz, 0, 0, bc, 0)
+ C(0x4700, BC, RX_b, Z, 0, a2, 0, 0, bc, 0)
+/* BRANCH RELATIVE ON CONDITION */
+ C(0xa704, BRC, RI_c, Z, 0, 0, 0, 0, bc, 0)
+ C(0xc004, BRCL, RIL_c, Z, 0, 0, 0, 0, bc, 0)
+/* BRANCH ON COUNT */
+ C(0x0600, BCTR, RR_a, Z, 0, r2_nz, 0, 0, bct32, 0)
+ C(0xb946, BCTGR, RRE, Z, 0, r2_nz, 0, 0, bct64, 0)
+ C(0x4600, BCT, RX_a, Z, 0, a2, 0, 0, bct32, 0)
+ C(0xe346, BCTG, RXY_a, Z, 0, a2, 0, 0, bct64, 0)
+/* BRANCH RELATIVE ON COUNT */
+ C(0xa706, BRCT, RI_b, Z, 0, 0, 0, 0, bct32, 0)
+ C(0xa707, BRCTG, RI_b, Z, 0, 0, 0, 0, bct64, 0)
+/* BRANCH RELATIVE ON COUNT HIGH */
+ C(0xcc06, BRCTH, RIL_b, HW, 0, 0, 0, 0, bcth, 0)
+/* BRANCH ON INDEX */
+ D(0x8600, BXH, RS_a, Z, 0, a2, 0, 0, bx32, 0, 0)
+ D(0x8700, BXLE, RS_a, Z, 0, a2, 0, 0, bx32, 0, 1)
+ D(0xeb44, BXHG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 0)
+ D(0xeb45, BXLEG, RSY_a, Z, 0, a2, 0, 0, bx64, 0, 1)
+/* BRANCH RELATIVE ON INDEX */
+ D(0x8400, BRXH, RSI, Z, 0, 0, 0, 0, bx32, 0, 0)
+ D(0x8500, BRXLE, RSI, Z, 0, 0, 0, 0, bx32, 0, 1)
+ D(0xec44, BRXHG, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 0)
+ D(0xec45, BRXHLE, RIE_e, Z, 0, 0, 0, 0, bx64, 0, 1)
+
+/* CHECKSUM */
+ C(0xb241, CKSM, RRE, Z, r1_o, ra2, new, r1_32, cksm, 0)
+
+/* COPY SIGN */
+ C(0xb372, CPSDR, RRF_b, FPSSH, f3_o, f2_o, f1, 0, cps, 0)
+
+/* COMPARE */
+ C(0x1900, CR, RR_a, Z, r1_o, r2_o, 0, 0, 0, cmps32)
+ C(0x5900, C, RX_a, Z, r1_o, m2_32s, 0, 0, 0, cmps32)
+ C(0xe359, CY, RXY_a, LD, r1_o, m2_32s, 0, 0, 0, cmps32)
+ C(0xb920, CGR, RRE, Z, r1_o, r2_o, 0, 0, 0, cmps64)
+ C(0xb930, CGFR, RRE, Z, r1_o, r2_32s, 0, 0, 0, cmps64)
+ C(0xe320, CG, RXY_a, Z, r1_o, m2_64, 0, 0, 0, cmps64)
+ C(0xe330, CGF, RXY_a, Z, r1_o, m2_32s, 0, 0, 0, cmps64)
+ C(0xb309, CEBR, RRE, Z, e1, e2, 0, 0, ceb, 0)
+ C(0xb319, CDBR, RRE, Z, f1_o, f2_o, 0, 0, cdb, 0)
+ C(0xb349, CXBR, RRE, Z, x1_o, x2_o, 0, 0, cxb, 0)
+ C(0xed09, CEB, RXE, Z, e1, m2_32u, 0, 0, ceb, 0)
+ C(0xed19, CDB, RXE, Z, f1_o, m2_64, 0, 0, cdb, 0)
+/* COMPARE IMMEDIATE */
+ C(0xc20d, CFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps32)
+ C(0xc20c, CGFI, RIL_a, EI, r1, i2, 0, 0, 0, cmps64)
+/* COMPARE RELATIVE LONG */
+ C(0xc60d, CRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps32)
+ C(0xc608, CGRL, RIL_b, GIE, r1, mri2_64, 0, 0, 0, cmps64)
+ C(0xc60c, CGFRL, RIL_b, GIE, r1, mri2_32s, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD */
+ C(0x4900, CH, RX_a, Z, r1_o, m2_16s, 0, 0, 0, cmps32)
+ C(0xe379, CHY, RXY_a, LD, r1_o, m2_16s, 0, 0, 0, cmps32)
+ C(0xe334, CGH, RXY_a, GIE, r1_o, m2_16s, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD IMMEDIATE */
+ C(0xa70e, CHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps32)
+ C(0xa70f, CGHI, RI_a, Z, r1_o, i2, 0, 0, 0, cmps64)
+ C(0xe554, CHHSI, SIL, GIE, m1_16s, i2, 0, 0, 0, cmps64)
+ C(0xe55c, CHSI, SIL, GIE, m1_32s, i2, 0, 0, 0, cmps64)
+ C(0xe558, CGHSI, SIL, GIE, m1_64, i2, 0, 0, 0, cmps64)
+/* COMPARE HALFWORD RELATIVE LONG */
+ C(0xc605, CHRL, RIL_b, GIE, r1_o, mri2_32s, 0, 0, 0, cmps32)
+ C(0xc604, CGHRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmps64)
+/* COMPARE HIGH */
+ C(0xb9cd, CHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmps32)
+ C(0xb9dd, CHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmps32)
+ C(0xe3cd, CHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmps32)
+/* COMPARE IMMEDIATE HIGH */
+ C(0xcc0d, CIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmps32)
+
+/* COMPARE LOGICAL */
+ C(0x1500, CLR, RR_a, Z, r1, r2, 0, 0, 0, cmpu32)
+ C(0x5500, CL, RX_a, Z, r1, m2_32s, 0, 0, 0, cmpu32)
+ C(0xe355, CLY, RXY_a, LD, r1, m2_32s, 0, 0, 0, cmpu32)
+ C(0xb921, CLGR, RRE, Z, r1, r2, 0, 0, 0, cmpu64)
+ C(0xb931, CLGFR, RRE, Z, r1, r2_32u, 0, 0, 0, cmpu64)
+ C(0xe321, CLG, RXY_a, Z, r1, m2_64, 0, 0, 0, cmpu64)
+ C(0xe331, CLGF, RXY_a, Z, r1, m2_32u, 0, 0, 0, cmpu64)
+ C(0xd500, CLC, SS_a, Z, la1, a2, 0, 0, clc, 0)
+/* COMPARE LOGICAL HIGH */
+ C(0xb9cf, CLHHR, RRE, HW, r1_sr32, r2_sr32, 0, 0, 0, cmpu32)
+ C(0xb9df, CLHLR, RRE, HW, r1_sr32, r2_o, 0, 0, 0, cmpu32)
+ C(0xe3cf, CLHF, RXY_a, HW, r1_sr32, m2_32s, 0, 0, 0, cmpu32)
+/* COMPARE LOGICAL IMMEDIATE */
+ C(0xc20f, CLFI, RIL_a, EI, r1, i2, 0, 0, 0, cmpu32)
+ C(0xc20e, CLGFI, RIL_a, EI, r1, i2_32u, 0, 0, 0, cmpu64)
+ C(0x9500, CLI, SI, Z, m1_8u, i2_8u, 0, 0, 0, cmpu64)
+ C(0xeb55, CLIY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, cmpu64)
+ C(0xe555, CLHHSI, SIL, GIE, m1_16u, i2_16u, 0, 0, 0, cmpu64)
+ C(0xe55d, CLFHSI, SIL, GIE, m1_32u, i2_16u, 0, 0, 0, cmpu64)
+ C(0xe559, CLGHSI, SIL, GIE, m1_64, i2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL IMMEDIATE HIGH */
+ C(0xcc0f, CLIH, RIL_a, HW, r1_sr32, i2, 0, 0, 0, cmpu32)
+/* COMPARE LOGICAL RELATIVE LONG */
+ C(0xc60f, CLRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu32)
+ C(0xc60a, CLGRL, RIL_b, GIE, r1_o, mri2_64, 0, 0, 0, cmpu64)
+ C(0xc60e, CLGFRL, RIL_b, GIE, r1_o, mri2_32u, 0, 0, 0, cmpu64)
+ C(0xc607, CLHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu32)
+ C(0xc606, CLGHRL, RIL_b, GIE, r1_o, mri2_16u, 0, 0, 0, cmpu64)
+/* COMPARE LOGICAL LONG EXTENDED */
+ C(0xa900, CLCLE, RS_a, Z, 0, a2, 0, 0, clcle, 0)
+/* COMPARE LOGICAL CHARACTERS UNDER MASK */
+ C(0xbd00, CLM, RS_b, Z, r1_o, a2, 0, 0, clm, 0)
+ C(0xeb21, CLMY, RSY_b, LD, r1_o, a2, 0, 0, clm, 0)
+ C(0xeb20, CLMH, RSY_b, Z, r1_sr32, a2, 0, 0, clm, 0)
+/* COMPARE LOGICAL STRING */
+ C(0xb25d, CLST, RRE, Z, r1_o, r2_o, 0, 0, clst, 0)
+
+/* COMPARE AND BRANCH */
+ D(0xecf6, CRB, RRS, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0)
+ D(0xece4, CGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 0)
+ D(0xec76, CRJ, RIE_b, GIE, r1_32s, r2_32s, 0, 0, cj, 0, 0)
+ D(0xec64, CGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 0)
+ D(0xecfe, CIB, RIS, GIE, r1_32s, i2, 0, 0, cj, 0, 0)
+ D(0xecfc, CGIB, RIS, GIE, r1_o, i2, 0, 0, cj, 0, 0)
+ D(0xec7e, CIJ, RIE_c, GIE, r1_32s, i2, 0, 0, cj, 0, 0)
+ D(0xec7c, CGIJ, RIE_c, GIE, r1_o, i2, 0, 0, cj, 0, 0)
+/* COMPARE LOGICAL AND BRANCH */
+ D(0xecf7, CLRB, RRS, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1)
+ D(0xece5, CLGRB, RRS, GIE, r1_o, r2_o, 0, 0, cj, 0, 1)
+ D(0xec77, CLRJ, RIE_b, GIE, r1_32u, r2_32u, 0, 0, cj, 0, 1)
+ D(0xec65, CLGRJ, RIE_b, GIE, r1_o, r2_o, 0, 0, cj, 0, 1)
+ D(0xecff, CLIB, RIS, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1)
+ D(0xecfd, CLGIB, RIS, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
+ D(0xec7f, CLIJ, RIE_c, GIE, r1_32u, i2_8u, 0, 0, cj, 0, 1)
+ D(0xec7d, CLGIJ, RIE_c, GIE, r1_o, i2_8u, 0, 0, cj, 0, 1)
+
+/* COMPARE AND SWAP */
+ D(0xba00, CS, RS_a, Z, r3_32u, r1_32u, new, r1_32, cs, 0, 0)
+ D(0xeb14, CSY, RSY_a, LD, r3_32u, r1_32u, new, r1_32, cs, 0, 0)
+ D(0xeb30, CSG, RSY_a, Z, r3_o, r1_o, new, r1, cs, 0, 1)
+/* COMPARE DOUBLE AND SWAP */
+ D(0xbb00, CDS, RS_a, Z, r3_D32, r1_D32, new, r1_D32, cs, 0, 1)
+ D(0xeb31, CDSY, RSY_a, LD, r3_D32, r1_D32, new, r1_D32, cs, 0, 1)
+ C(0xeb3e, CDSG, RSY_a, Z, 0, 0, 0, 0, cdsg, 0)
+
+/* COMPARE AND TRAP */
+ D(0xb972, CRT, RRF_c, GIE, r1_32s, r2_32s, 0, 0, ct, 0, 0)
+ D(0xb960, CGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 0)
+ D(0xec72, CIT, RIE_a, GIE, r1_32s, i2, 0, 0, ct, 0, 0)
+ D(0xec70, CGIT, RIE_a, GIE, r1_o, i2, 0, 0, ct, 0, 0)
+/* COMPARE LOGICAL AND TRAP */
+ D(0xb973, CLRT, RRF_c, GIE, r1_32u, r2_32u, 0, 0, ct, 0, 1)
+ D(0xb961, CLGRT, RRF_c, GIE, r1_o, r2_o, 0, 0, ct, 0, 1)
+ D(0xeb23, CLT, RSY_b, MIE, r1_32u, m2_32u, 0, 0, ct, 0, 1)
+ D(0xeb2b, CLGT, RSY_b, MIE, r1_o, m2_64, 0, 0, ct, 0, 1)
+ D(0xec73, CLFIT, RIE_a, GIE, r1_32u, i2_32u, 0, 0, ct, 0, 1)
+ D(0xec71, CLGIT, RIE_a, GIE, r1_o, i2_32u, 0, 0, ct, 0, 1)
+
+/* CONVERT TO DECIMAL */
+ C(0x4e00, CVD, RX_a, Z, r1_o, a2, 0, 0, cvd, 0)
+ C(0xe326, CVDY, RXY_a, LD, r1_o, a2, 0, 0, cvd, 0)
+/* CONVERT TO FIXED */
+ C(0xb398, CFEBR, RRF_e, Z, 0, e2, new, r1_32, cfeb, 0)
+ C(0xb399, CFDBR, RRF_e, Z, 0, f2_o, new, r1_32, cfdb, 0)
+ C(0xb39a, CFXBR, RRF_e, Z, 0, x2_o, new, r1_32, cfxb, 0)
+ C(0xb3a8, CGEBR, RRF_e, Z, 0, e2, r1, 0, cgeb, 0)
+ C(0xb3a9, CGDBR, RRF_e, Z, 0, f2_o, r1, 0, cgdb, 0)
+ C(0xb3aa, CGXBR, RRF_e, Z, 0, x2_o, r1, 0, cgxb, 0)
+/* CONVERT FROM FIXED */
+ C(0xb394, CEFBR, RRF_e, Z, 0, r2_32s, new, e1, cegb, 0)
+ C(0xb395, CDFBR, RRF_e, Z, 0, r2_32s, f1, 0, cdgb, 0)
+ C(0xb396, CXFBR, RRF_e, Z, 0, r2_32s, x1, 0, cxgb, 0)
+ C(0xb3a4, CEGBR, RRF_e, Z, 0, r2_o, new, e1, cegb, 0)
+ C(0xb3a5, CDGBR, RRF_e, Z, 0, r2_o, f1, 0, cdgb, 0)
+ C(0xb3a6, CXGBR, RRF_e, Z, 0, r2_o, x1, 0, cxgb, 0)
+/* CONVERT TO LOGICAL */
+ C(0xb39c, CLFEBR, RRF_e, FPE, 0, e2, new, r1_32, clfeb, 0)
+ C(0xb39d, CLFDBR, RRF_e, FPE, 0, f2_o, new, r1_32, clfdb, 0)
+ C(0xb39e, CLFXBR, RRF_e, FPE, 0, x2_o, new, r1_32, clfxb, 0)
+ C(0xb3ac, CLGEBR, RRF_e, FPE, 0, e2, r1, 0, clgeb, 0)
+ C(0xb3ad, CLGDBR, RRF_e, FPE, 0, f2_o, r1, 0, clgdb, 0)
+ C(0xb3ae, CLGXBR, RRF_e, FPE, 0, x2_o, r1, 0, clgxb, 0)
+/* CONVERT FROM LOGICAL */
+ C(0xb390, CELFBR, RRF_e, FPE, 0, r2_32u, new, e1, celgb, 0)
+ C(0xb391, CDLFBR, RRF_e, FPE, 0, r2_32u, f1, 0, cdlgb, 0)
+ C(0xb392, CXLFBR, RRF_e, FPE, 0, r2_32u, x1, 0, cxlgb, 0)
+ C(0xb3a0, CELGBR, RRF_e, FPE, 0, r2_o, new, e1, celgb, 0)
+ C(0xb3a1, CDLGBR, RRF_e, FPE, 0, r2_o, f1, 0, cdlgb, 0)
+ C(0xb3a2, CXLGBR, RRF_e, FPE, 0, r2_o, x1, 0, cxlgb, 0)
+
+/* DIVIDE */
+ C(0x1d00, DR, RR_a, Z, r1_D32, r2_32s, new_P, r1_P32, divs32, 0)
+ C(0x5d00, D, RX_a, Z, r1_D32, m2_32s, new_P, r1_P32, divs32, 0)
+ C(0xb30d, DEBR, RRE, Z, e1, e2, new, e1, deb, 0)
+ C(0xb31d, DDBR, RRE, Z, f1_o, f2_o, f1, 0, ddb, 0)
+ C(0xb34d, DXBR, RRE, Z, 0, x2_o, x1, 0, dxb, 0)
+ C(0xed0d, DEB, RXE, Z, e1, m2_32u, new, e1, deb, 0)
+ C(0xed1d, DDB, RXE, Z, f1_o, m2_64, f1, 0, ddb, 0)
+/* DIVIDE LOGICAL */
+ C(0xb997, DLR, RRE, Z, r1_D32, r2_32u, new_P, r1_P32, divu32, 0)
+ C(0xe397, DL, RXY_a, Z, r1_D32, m2_32u, new_P, r1_P32, divu32, 0)
+ C(0xb987, DLGR, RRE, Z, 0, r2_o, r1_P, 0, divu64, 0)
+ C(0xe387, DLG, RXY_a, Z, 0, m2_64, r1_P, 0, divu64, 0)
+/* DIVIDE SINGLE */
+ C(0xb90d, DSGR, RRE, Z, r1p1, r2, r1_P, 0, divs64, 0)
+ C(0xb91d, DSGFR, RRE, Z, r1p1, r2_32s, r1_P, 0, divs64, 0)
+ C(0xe30d, DSG, RXY_a, Z, r1p1, m2_64, r1_P, 0, divs64, 0)
+ C(0xe31d, DSGF, RXY_a, Z, r1p1, m2_32s, r1_P, 0, divs64, 0)
+
+/* EXCLUSIVE OR */
+ C(0x1700, XR, RR_a, Z, r1, r2, new, r1_32, xor, nz32)
+ C(0xb9f7, XRK, RRF_a, DO, r2, r3, new, r1_32, xor, nz32)
+ C(0x5700, X, RX_a, Z, r1, m2_32s, new, r1_32, xor, nz32)
+ C(0xe357, XY, RXY_a, LD, r1, m2_32s, new, r1_32, xor, nz32)
+ C(0xb982, XGR, RRE, Z, r1, r2, r1, 0, xor, nz64)
+ C(0xb9e7, XGRK, RRF_a, DO, r2, r3, r1, 0, xor, nz64)
+ C(0xe382, XG, RXY_a, Z, r1, m2_64, r1, 0, xor, nz64)
+ C(0xd700, XC, SS_a, Z, 0, 0, 0, 0, xc, 0)
+/* EXCLUSIVE OR IMMEDIATE */
+ D(0xc006, XIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2020)
+ D(0xc007, XILF, RIL_a, EI, r1_o, i2_32u, r1, 0, xori, 0, 0x2000)
+ C(0x9700, XI, SI, Z, m1_8u, i2_8u, new, m1_8, xor, nz64)
+ C(0xeb57, XIY, SIY, LD, m1_8u, i2_8u, new, m1_8, xor, nz64)
+
+/* EXECUTE */
+ C(0x4400, EX, RX_a, Z, r1_o, a2, 0, 0, ex, 0)
+/* EXECUTE RELATIVE LONG */
+ C(0xc600, EXRL, RIL_b, EE, r1_o, ri2, 0, 0, ex, 0)
+
+/* EXTRACT ACCESS */
+ C(0xb24f, EAR, RRE, Z, 0, 0, new, r1_32, ear, 0)
+/* EXTRACT CPU ATTRIBUTE */
+ C(0xeb4c, ECAG, RSY_a, GIE, 0, a2, r1, 0, ecag, 0)
+/* EXTRACT FPC */
+ C(0xb38c, EFPC, RRE, Z, 0, 0, new, r1_32, efpc, 0)
+/* EXTRACT PSW */
+ C(0xb98d, EPSW, RRE, Z, 0, 0, 0, 0, epsw, 0)
+
+/* FIND LEFTMOST ONE */
+ C(0xb983, FLOGR, RRE, EI, 0, r2_o, r1_P, 0, flogr, 0)
+
+/* INSERT CHARACTER */
+ C(0x4300, IC, RX_a, Z, 0, m2_8u, 0, r1_8, mov2, 0)
+ C(0xe373, ICY, RXY_a, LD, 0, m2_8u, 0, r1_8, mov2, 0)
+/* INSERT CHARACTERS UNDER MASK */
+ D(0xbf00, ICM, RS_b, Z, 0, a2, r1, 0, icm, 0, 0)
+ D(0xeb81, ICMY, RSY_b, LD, 0, a2, r1, 0, icm, 0, 0)
+ D(0xeb80, ICMH, RSY_b, Z, 0, a2, r1, 0, icm, 0, 32)
+/* INSERT IMMEDIATE */
+ D(0xc008, IIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2020)
+ D(0xc009, IILF, RIL_a, EI, r1_o, i2_32u, r1, 0, insi, 0, 0x2000)
+ D(0xa500, IIHH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1030)
+ D(0xa501, IIHL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1020)
+ D(0xa502, IILH, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1010)
+ D(0xa503, IILL, RI_a, Z, r1_o, i2_16u, r1, 0, insi, 0, 0x1000)
+/* INSERT PROGRAM MASK */
+ C(0xb222, IPM, RRE, Z, 0, 0, r1, 0, ipm, 0)
+
+/* LOAD */
+ C(0x1800, LR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, 0)
+ C(0x5800, L, RX_a, Z, 0, a2, new, r1_32, ld32s, 0)
+ C(0xe358, LY, RXY_a, LD, 0, a2, new, r1_32, ld32s, 0)
+ C(0xb904, LGR, RRE, Z, 0, r2_o, 0, r1, mov2, 0)
+ C(0xb914, LGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, 0)
+ C(0xe304, LG, RXY_a, Z, 0, a2, r1, 0, ld64, 0)
+ C(0xe314, LGF, RXY_a, Z, 0, a2, r1, 0, ld32s, 0)
+ C(0x2800, LDR, RR_a, Z, 0, f2_o, 0, f1, mov2, 0)
+ C(0x6800, LD, RX_a, Z, 0, m2_64, 0, f1, mov2, 0)
+ C(0xed65, LDY, RXY_a, LD, 0, m2_64, 0, f1, mov2, 0)
+ C(0x3800, LER, RR_a, Z, 0, e2, 0, cond_e1e2, mov2, 0)
+ C(0x7800, LE, RX_a, Z, 0, m2_32u, 0, e1, mov2, 0)
+ C(0xed64, LEY, RXY_a, LD, 0, m2_32u, 0, e1, mov2, 0)
+ C(0xb365, LXR, RRE, Z, 0, x2_o, 0, x1, movx, 0)
+/* LOAD IMMEDIATE */
+ C(0xc001, LGFI, RIL_a, EI, 0, i2, 0, r1, mov2, 0)
+/* LOAD RELATIVE LONG */
+ C(0xc40d, LRL, RIL_b, GIE, 0, ri2, new, r1_32, ld32s, 0)
+ C(0xc408, LGRL, RIL_b, GIE, 0, ri2, r1, 0, ld64, 0)
+ C(0xc40c, LGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32s, 0)
+/* LOAD ADDRESS */
+ C(0x4100, LA, RX_a, Z, 0, a2, 0, r1, mov2, 0)
+ C(0xe371, LAY, RXY_a, LD, 0, a2, 0, r1, mov2, 0)
+/* LOAD ADDRESS EXTENDED */
+ C(0x5100, LAE, RX_a, Z, 0, a2, 0, r1, mov2e, 0)
+ C(0xe375, LAEY, RXY_a, GIE, 0, a2, 0, r1, mov2e, 0)
+/* LOAD ADDRESS RELATIVE LONG */
+ C(0xc000, LARL, RIL_b, Z, 0, ri2, 0, r1, mov2, 0)
+/* LOAD AND ADD */
+ C(0xebf8, LAA, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, adds32)
+ C(0xebe8, LAAG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, adds64)
+/* LOAD AND ADD LOGICAL */
+ C(0xebfa, LAAL, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, add, addu32)
+ C(0xebea, LAALG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, add, addu64)
+/* LOAD AND AND */
+ C(0xebf4, LAN, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, and, nz32)
+ C(0xebe4, LANG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, and, nz64)
+/* LOAD AND EXCLUSIVE OR */
+ C(0xebf7, LAX, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, xor, nz32)
+ C(0xebe7, LAXG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, xor, nz64)
+/* LOAD AND OR */
+ C(0xebf6, LAO, RSY_a, ILA, r3_32s, m2_32s_atomic, new, m2_32_r1_atomic, or, nz32)
+ C(0xebe6, LAOG, RSY_a, ILA, r3, m2_64_atomic, new, m2_64_r1_atomic, or, nz64)
+/* LOAD AND TEST */
+ C(0x1200, LTR, RR_a, Z, 0, r2_o, 0, cond_r1r2_32, mov2, s32)
+ C(0xb902, LTGR, RRE, Z, 0, r2_o, 0, r1, mov2, s64)
+ C(0xb912, LTGFR, RRE, Z, 0, r2_32s, 0, r1, mov2, s64)
+ C(0xe312, LT, RXY_a, EI, 0, a2, new, r1_32, ld32s, s64)
+ C(0xe302, LTG, RXY_a, EI, 0, a2, r1, 0, ld64, s64)
+ C(0xe332, LTGF, RXY_a, GIE, 0, a2, r1, 0, ld32s, s64)
+ C(0xb302, LTEBR, RRE, Z, 0, e2, 0, cond_e1e2, mov2, f32)
+ C(0xb312, LTDBR, RRE, Z, 0, f2_o, 0, f1, mov2, f64)
+ C(0xb342, LTXBR, RRE, Z, 0, x2_o, 0, x1, movx, f128)
+/* LOAD AND TRAP */
+ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0)
+ C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0)
+/* LOAD BYTE */
+ C(0xb926, LBR, RRE, EI, 0, r2_8s, 0, r1_32, mov2, 0)
+ C(0xb906, LGBR, RRE, EI, 0, r2_8s, 0, r1, mov2, 0)
+ C(0xe376, LB, RXY_a, LD, 0, a2, new, r1_32, ld8s, 0)
+ C(0xe377, LGB, RXY_a, LD, 0, a2, r1, 0, ld8s, 0)
+/* LOAD BYTE HIGH */
+ C(0xe3c0, LBH, RXY_a, HW, 0, a2, new, r1_32h, ld8s, 0)
+/* LOAD COMPLEMENT */
+ C(0x1300, LCR, RR_a, Z, 0, r2, new, r1_32, neg, neg32)
+ C(0xb903, LCGR, RRE, Z, 0, r2, r1, 0, neg, neg64)
+ C(0xb913, LCGFR, RRE, Z, 0, r2_32s, r1, 0, neg, neg64)
+ C(0xb303, LCEBR, RRE, Z, 0, e2, new, e1, negf32, f32)
+ C(0xb313, LCDBR, RRE, Z, 0, f2_o, f1, 0, negf64, f64)
+ C(0xb343, LCXBR, RRE, Z, 0, x2_o, x1, 0, negf128, f128)
+ C(0xb373, LCDFR, RRE, FPSSH, 0, f2_o, f1, 0, negf64, 0)
+/* LOAD HALFWORD */
+ C(0xb927, LHR, RRE, EI, 0, r2_16s, 0, r1_32, mov2, 0)
+ C(0xb907, LGHR, RRE, EI, 0, r2_16s, 0, r1, mov2, 0)
+ C(0x4800, LH, RX_a, Z, 0, a2, new, r1_32, ld16s, 0)
+ C(0xe378, LHY, RXY_a, LD, 0, a2, new, r1_32, ld16s, 0)
+ C(0xe315, LGH, RXY_a, Z, 0, a2, r1, 0, ld16s, 0)
+/* LOAD HALFWORD HIGH */
+ C(0xe3c4, LHH, RXY_a, HW, 0, a2, new, r1_32h, ld16s, 0)
+/* LOAD HALFWORD IMMEDIATE */
+ C(0xa708, LHI, RI_a, Z, 0, i2, 0, r1_32, mov2, 0)
+ C(0xa709, LGHI, RI_a, Z, 0, i2, 0, r1, mov2, 0)
+/* LOAD HALFWORD RELATIVE LONG */
+ C(0xc405, LHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16s, 0)
+ C(0xc404, LGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16s, 0)
+/* LOAD HIGH */
+ C(0xe3ca, LFH, RXY_a, HW, 0, a2, new, r1_32h, ld32u, 0)
+/* LOAG HIGH AND TRAP */
+ C(0xe3c8, LFHAT, RXY_a, LAT, 0, m2_32u, r1, 0, lfhat, 0)
+/* LOAD LOGICAL */
+ C(0xb916, LLGFR, RRE, Z, 0, r2_32u, 0, r1, mov2, 0)
+ C(0xe316, LLGF, RXY_a, Z, 0, a2, r1, 0, ld32u, 0)
+/* LOAD LOGICAL AND TRAP */
+ C(0xe39d, LLGFAT, RXY_a, LAT, 0, a2, r1, 0, llgfat, 0)
+/* LOAD LOGICAL RELATIVE LONG */
+ C(0xc40e, LLGFRL, RIL_b, GIE, 0, ri2, r1, 0, ld32u, 0)
+/* LOAD LOGICAL CHARACTER */
+ C(0xb994, LLCR, RRE, EI, 0, r2_8u, 0, r1_32, mov2, 0)
+ C(0xb984, LLGCR, RRE, EI, 0, r2_8u, 0, r1, mov2, 0)
+ C(0xe394, LLC, RXY_a, EI, 0, a2, new, r1_32, ld8u, 0)
+ C(0xe390, LLGC, RXY_a, Z, 0, a2, r1, 0, ld8u, 0)
+/* LOAD LOGICAL CHARACTER HIGH */
+ C(0xe3c2, LLCH, RXY_a, HW, 0, a2, new, r1_32h, ld8u, 0)
+/* LOAD LOGICAL HALFWORD */
+ C(0xb995, LLHR, RRE, EI, 0, r2_16u, 0, r1_32, mov2, 0)
+ C(0xb985, LLGHR, RRE, EI, 0, r2_16u, 0, r1, mov2, 0)
+ C(0xe395, LLH, RXY_a, EI, 0, a2, new, r1_32, ld16u, 0)
+ C(0xe391, LLGH, RXY_a, Z, 0, a2, r1, 0, ld16u, 0)
+/* LOAD LOGICAL HALFWORD HIGH */
+ C(0xe3c6, LLHH, RXY_a, HW, 0, a2, new, r1_32h, ld16u, 0)
+/* LOAD LOGICAL HALFWORD RELATIVE LONG */
+ C(0xc402, LLHRL, RIL_b, GIE, 0, ri2, new, r1_32, ld16u, 0)
+ C(0xc406, LLGHRL, RIL_b, GIE, 0, ri2, r1, 0, ld16u, 0)
+/* LOAD LOGICAL IMMEDATE */
+ D(0xc00e, LLIHF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 32)
+ D(0xc00f, LLILF, RIL_a, EI, 0, i2_32u_shl, 0, r1, mov2, 0, 0)
+ D(0xa50c, LLIHH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 48)
+ D(0xa50d, LLIHL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 32)
+ D(0xa50e, LLILH, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 16)
+ D(0xa50f, LLILL, RI_a, Z, 0, i2_16u_shl, 0, r1, mov2, 0, 0)
+/* LOAD LOGICAL THIRTY ONE BITS */
+ C(0xb917, LLGTR, RRE, Z, 0, r2_o, r1, 0, llgt, 0)
+ C(0xe317, LLGT, RXY_a, Z, 0, m2_32u, r1, 0, llgt, 0)
+/* LOAD LOGICAL THIRTY ONE BITS AND TRAP */
+ C(0xe39c, LLGTAT, RXY_a, LAT, 0, m2_32u, r1, 0, llgtat, 0)
+
+/* LOAD FPR FROM GR */
+ C(0xb3c1, LDGR, RRE, FPRGR, 0, r2_o, 0, f1, mov2, 0)
+/* LOAD GR FROM FPR */
+ C(0xb3cd, LGDR, RRE, FPRGR, 0, f2_o, 0, r1, mov2, 0)
+/* LOAD NEGATIVE */
+ C(0x1100, LNR, RR_a, Z, 0, r2_32s, new, r1_32, nabs, nabs32)
+ C(0xb901, LNGR, RRE, Z, 0, r2, r1, 0, nabs, nabs64)
+ C(0xb911, LNGFR, RRE, Z, 0, r2_32s, r1, 0, nabs, nabs64)
+ C(0xb301, LNEBR, RRE, Z, 0, e2, new, e1, nabsf32, f32)
+ C(0xb311, LNDBR, RRE, Z, 0, f2_o, f1, 0, nabsf64, f64)
+ C(0xb341, LNXBR, RRE, Z, 0, x2_o, x1, 0, nabsf128, f128)
+ C(0xb371, LNDFR, RRE, FPSSH, 0, f2_o, f1, 0, nabsf64, 0)
+/* LOAD ON CONDITION */
+ C(0xb9f2, LOCR, RRF_c, LOC, r1, r2, new, r1_32, loc, 0)
+ C(0xb9e2, LOCGR, RRF_c, LOC, r1, r2, r1, 0, loc, 0)
+ C(0xebf2, LOC, RSY_b, LOC, r1, m2_32u, new, r1_32, loc, 0)
+ C(0xebe2, LOCG, RSY_b, LOC, r1, m2_64, r1, 0, loc, 0)
+/* LOAD PAIR DISJOINT TODO */
+/* LOAD POSITIVE */
+ C(0x1000, LPR, RR_a, Z, 0, r2_32s, new, r1_32, abs, abs32)
+ C(0xb900, LPGR, RRE, Z, 0, r2, r1, 0, abs, abs64)
+ C(0xb910, LPGFR, RRE, Z, 0, r2_32s, r1, 0, abs, abs64)
+ C(0xb300, LPEBR, RRE, Z, 0, e2, new, e1, absf32, f32)
+ C(0xb310, LPDBR, RRE, Z, 0, f2_o, f1, 0, absf64, f64)
+ C(0xb340, LPXBR, RRE, Z, 0, x2_o, x1, 0, absf128, f128)
+ C(0xb370, LPDFR, RRE, FPSSH, 0, f2_o, f1, 0, absf64, 0)
+/* LOAD REVERSED */
+ C(0xb91f, LRVR, RRE, Z, 0, r2_32u, new, r1_32, rev32, 0)
+ C(0xb90f, LRVGR, RRE, Z, 0, r2_o, r1, 0, rev64, 0)
+ C(0xe31f, LRVH, RXY_a, Z, 0, m2_16u, new, r1_16, rev16, 0)
+ C(0xe31e, LRV, RXY_a, Z, 0, m2_32u, new, r1_32, rev32, 0)
+ C(0xe30f, LRVG, RXY_a, Z, 0, m2_64, r1, 0, rev64, 0)
+/* LOAD ZERO */
+ C(0xb374, LZER, RRE, Z, 0, 0, 0, e1, zero, 0)
+ C(0xb375, LZDR, RRE, Z, 0, 0, 0, f1, zero, 0)
+ C(0xb376, LZXR, RRE, Z, 0, 0, 0, x1, zero2, 0)
+
+/* LOAD FPC */
+ C(0xb29d, LFPC, S, Z, 0, m2_32u, 0, 0, sfpc, 0)
+/* LOAD FPC AND SIGNAL */
+ C(0xb2bd, LFAS, S, IEEEE_SIM, 0, m2_32u, 0, 0, sfas, 0)
+/* LOAD FP INTEGER */
+ C(0xb357, FIEBR, RRF_e, Z, 0, e2, new, e1, fieb, 0)
+ C(0xb35f, FIDBR, RRF_e, Z, 0, f2_o, f1, 0, fidb, 0)
+ C(0xb347, FIXBR, RRF_e, Z, 0, x2_o, x1, 0, fixb, 0)
+
+/* LOAD LENGTHENED */
+ C(0xb304, LDEBR, RRE, Z, 0, e2, f1, 0, ldeb, 0)
+ C(0xb305, LXDBR, RRE, Z, 0, f2_o, x1, 0, lxdb, 0)
+ C(0xb306, LXEBR, RRE, Z, 0, e2, x1, 0, lxeb, 0)
+ C(0xed04, LDEB, RXE, Z, 0, m2_32u, f1, 0, ldeb, 0)
+ C(0xed05, LXDB, RXE, Z, 0, m2_64, x1, 0, lxdb, 0)
+ C(0xed06, LXEB, RXE, Z, 0, m2_32u, x1, 0, lxeb, 0)
+/* LOAD ROUNDED */
+ C(0xb344, LEDBR, RRE, Z, 0, f2_o, new, e1, ledb, 0)
+ C(0xb345, LDXBR, RRE, Z, 0, x2_o, f1, 0, ldxb, 0)
+ C(0xb346, LEXBR, RRE, Z, 0, x2_o, new, e1, lexb, 0)
+
+/* LOAD MULTIPLE */
+ C(0x9800, LM, RS_a, Z, 0, a2, 0, 0, lm32, 0)
+ C(0xeb98, LMY, RSY_a, LD, 0, a2, 0, 0, lm32, 0)
+ C(0xeb04, LMG, RSY_a, Z, 0, a2, 0, 0, lm64, 0)
+/* LOAD MULTIPLE HIGH */
+ C(0xeb96, LMH, RSY_a, Z, 0, a2, 0, 0, lmh, 0)
+/* LOAD ACCESS MULTIPLE */
+ C(0x9a00, LAM, RS_a, Z, 0, a2, 0, 0, lam, 0)
+ C(0xeb9a, LAMY, RSY_a, LD, 0, a2, 0, 0, lam, 0)
+
+/* MOVE */
+ C(0xd200, MVC, SS_a, Z, la1, a2, 0, 0, mvc, 0)
+ C(0xe544, MVHHI, SIL, GIE, la1, i2, 0, m1_16, mov2, 0)
+ C(0xe54c, MVHI, SIL, GIE, la1, i2, 0, m1_32, mov2, 0)
+ C(0xe548, MVGHI, SIL, GIE, la1, i2, 0, m1_64, mov2, 0)
+ C(0x9200, MVI, SI, Z, la1, i2, 0, m1_8, mov2, 0)
+ C(0xeb52, MVIY, SIY, LD, la1, i2, 0, m1_8, mov2, 0)
+/* MOVE LONG */
+ C(0x0e00, MVCL, RR_a, Z, 0, 0, 0, 0, mvcl, 0)
+/* MOVE LONG EXTENDED */
+ C(0xa800, MVCLE, RS_a, Z, 0, a2, 0, 0, mvcle, 0)
+/* MOVE PAGE */
+ C(0xb254, MVPG, RRE, Z, r1_o, r2_o, 0, 0, mvpg, 0)
+/* MOVE STRING */
+ C(0xb255, MVST, RRE, Z, r1_o, r2_o, 0, 0, mvst, 0)
+
+/* MULTIPLY */
+ C(0x1c00, MR, RR_a, Z, r1p1_32s, r2_32s, new, r1_D32, mul, 0)
+ C(0x5c00, M, RX_a, Z, r1p1_32s, m2_32s, new, r1_D32, mul, 0)
+ C(0xe35c, MFY, RXY_a, GIE, r1p1_32s, m2_32s, new, r1_D32, mul, 0)
+ C(0xb317, MEEBR, RRE, Z, e1, e2, new, e1, meeb, 0)
+ C(0xb31c, MDBR, RRE, Z, f1_o, f2_o, f1, 0, mdb, 0)
+ C(0xb34c, MXBR, RRE, Z, 0, x2_o, x1, 0, mxb, 0)
+ C(0xb30c, MDEBR, RRE, Z, f1_o, e2, f1, 0, mdeb, 0)
+ C(0xb307, MXDBR, RRE, Z, 0, f2_o, x1, 0, mxdb, 0)
+ C(0xed17, MEEB, RXE, Z, e1, m2_32u, new, e1, meeb, 0)
+ C(0xed1c, MDB, RXE, Z, f1_o, m2_64, f1, 0, mdb, 0)
+ C(0xed0c, MDEB, RXE, Z, f1_o, m2_32u, f1, 0, mdeb, 0)
+ C(0xed07, MXDB, RXE, Z, 0, m2_64, x1, 0, mxdb, 0)
+/* MULTIPLY HALFWORD */
+ C(0x4c00, MH, RX_a, Z, r1_o, m2_16s, new, r1_32, mul, 0)
+ C(0xe37c, MHY, RXY_a, GIE, r1_o, m2_16s, new, r1_32, mul, 0)
+/* MULTIPLY HALFWORD IMMEDIATE */
+ C(0xa70c, MHI, RI_a, Z, r1_o, i2, new, r1_32, mul, 0)
+ C(0xa70d, MGHI, RI_a, Z, r1_o, i2, r1, 0, mul, 0)
+/* MULTIPLY LOGICAL */
+ C(0xb996, MLR, RRE, Z, r1p1_32u, r2_32u, new, r1_D32, mul, 0)
+ C(0xe396, ML, RXY_a, Z, r1p1_32u, m2_32u, new, r1_D32, mul, 0)
+ C(0xb986, MLGR, RRE, Z, r1p1, r2_o, r1_P, 0, mul128, 0)
+ C(0xe386, MLG, RXY_a, Z, r1p1, m2_64, r1_P, 0, mul128, 0)
+/* MULTIPLY SINGLE */
+ C(0xb252, MSR, RRE, Z, r1_o, r2_o, new, r1_32, mul, 0)
+ C(0x7100, MS, RX_a, Z, r1_o, m2_32s, new, r1_32, mul, 0)
+ C(0xe351, MSY, RXY_a, LD, r1_o, m2_32s, new, r1_32, mul, 0)
+ C(0xb90c, MSGR, RRE, Z, r1_o, r2_o, r1, 0, mul, 0)
+ C(0xb91c, MSGFR, RRE, Z, r1_o, r2_32s, r1, 0, mul, 0)
+ C(0xe30c, MSG, RXY_a, Z, r1_o, m2_64, r1, 0, mul, 0)
+ C(0xe31c, MSGF, RXY_a, Z, r1_o, m2_32s, r1, 0, mul, 0)
+/* MULTIPLY SINGLE IMMEDIATE */
+ C(0xc201, MSFI, RIL_a, GIE, r1_o, i2, new, r1_32, mul, 0)
+ C(0xc200, MSGFI, RIL_a, GIE, r1_o, i2, r1, 0, mul, 0)
+
+/* MULTIPLY AND ADD */
+ C(0xb30e, MAEBR, RRD, Z, e1, e2, new, e1, maeb, 0)
+ C(0xb31e, MADBR, RRD, Z, f1_o, f2_o, f1, 0, madb, 0)
+ C(0xed0e, MAEB, RXF, Z, e1, m2_32u, new, e1, maeb, 0)
+ C(0xed1e, MADB, RXF, Z, f1_o, m2_64, f1, 0, madb, 0)
+/* MULTIPLY AND SUBTRACT */
+ C(0xb30f, MSEBR, RRD, Z, e1, e2, new, e1, mseb, 0)
+ C(0xb31f, MSDBR, RRD, Z, f1_o, f2_o, f1, 0, msdb, 0)
+ C(0xed0f, MSEB, RXF, Z, e1, m2_32u, new, e1, mseb, 0)
+ C(0xed1f, MSDB, RXF, Z, f1_o, m2_64, f1, 0, msdb, 0)
+
+/* OR */
+ C(0x1600, OR, RR_a, Z, r1, r2, new, r1_32, or, nz32)
+ C(0xb9f6, ORK, RRF_a, DO, r2, r3, new, r1_32, or, nz32)
+ C(0x5600, O, RX_a, Z, r1, m2_32s, new, r1_32, or, nz32)
+ C(0xe356, OY, RXY_a, LD, r1, m2_32s, new, r1_32, or, nz32)
+ C(0xb981, OGR, RRE, Z, r1, r2, r1, 0, or, nz64)
+ C(0xb9e6, OGRK, RRF_a, DO, r2, r3, r1, 0, or, nz64)
+ C(0xe381, OG, RXY_a, Z, r1, m2_64, r1, 0, or, nz64)
+ C(0xd600, OC, SS_a, Z, la1, a2, 0, 0, oc, 0)
+/* OR IMMEDIATE */
+ D(0xc00c, OIHF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2020)
+ D(0xc00d, OILF, RIL_a, EI, r1_o, i2_32u, r1, 0, ori, 0, 0x2000)
+ D(0xa508, OIHH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1030)
+ D(0xa509, OIHL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1020)
+ D(0xa50a, OILH, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1010)
+ D(0xa50b, OILL, RI_a, Z, r1_o, i2_16u, r1, 0, ori, 0, 0x1000)
+ C(0x9600, OI, SI, Z, m1_8u, i2_8u, new, m1_8, or, nz64)
+ C(0xeb56, OIY, SIY, LD, m1_8u, i2_8u, new, m1_8, or, nz64)
+
+/* PREFETCH */
+ /* Implemented as nops of course. */
+ C(0xe336, PFD, RXY_b, GIE, 0, 0, 0, 0, 0, 0)
+ C(0xc602, PFDRL, RIL_c, GIE, 0, 0, 0, 0, 0, 0)
+
+/* POPULATION COUNT */
+ C(0xb9e1, POPCNT, RRE, PC, 0, r2_o, r1, 0, popcnt, nz64)
+
+/* ROTATE LEFT SINGLE LOGICAL */
+ C(0xeb1d, RLL, RSY_a, Z, r3_o, sh32, new, r1_32, rll32, 0)
+ C(0xeb1c, RLLG, RSY_a, Z, r3_o, sh64, r1, 0, rll64, 0)
+
+/* ROTATE THEN INSERT SELECTED BITS */
+ C(0xec55, RISBG, RIE_f, GIE, 0, r2, r1, 0, risbg, s64)
+ C(0xec59, RISBGN, RIE_f, MIE, 0, r2, r1, 0, risbg, 0)
+ C(0xec5d, RISBHG, RIE_f, HW, 0, r2, r1, 0, risbg, 0)
+ C(0xec51, RISBLG, RIE_f, HW, 0, r2, r1, 0, risbg, 0)
+/* ROTATE_THEN <OP> SELECTED BITS */
+ C(0xec54, RNSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+ C(0xec56, ROSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+ C(0xec57, RXSBG, RIE_f, GIE, 0, r2, r1, 0, rosbg, 0)
+
+/* SEARCH STRING */
+ C(0xb25e, SRST, RRE, Z, r1_o, r2_o, 0, 0, srst, 0)
+
+/* SET ACCESS */
+ C(0xb24e, SAR, RRE, Z, 0, r2_o, 0, 0, sar, 0)
+/* SET ADDRESSING MODE */
+ D(0x010c, SAM24, E, Z, 0, 0, 0, 0, sam, 0, 0)
+ D(0x010d, SAM31, E, Z, 0, 0, 0, 0, sam, 0, 1)
+ D(0x010e, SAM64, E, Z, 0, 0, 0, 0, sam, 0, 3)
+/* SET FPC */
+ C(0xb384, SFPC, RRE, Z, 0, r1_o, 0, 0, sfpc, 0)
+/* SET FPC AND SIGNAL */
+ C(0xb385, SFASR, RRE, IEEEE_SIM, 0, r1_o, 0, 0, sfas, 0)
+/* SET BFP ROUNDING MODE */
+ C(0xb299, SRNM, S, Z, 0, 0, 0, 0, srnm, 0)
+ C(0xb2b8, SRNMB, S, FPE, 0, 0, 0, 0, srnm, 0)
+/* SET DFP ROUNDING MODE */
+ C(0xb2b9, SRNMT, S, DFPR, 0, 0, 0, 0, srnm, 0)
+
+/* SHIFT LEFT SINGLE */
+ D(0x8b00, SLA, RS_a, Z, r1, sh32, new, r1_32, sla, 0, 31)
+ D(0xebdd, SLAK, RSY_a, DO, r3, sh32, new, r1_32, sla, 0, 31)
+ D(0xeb0b, SLAG, RSY_a, Z, r3, sh64, r1, 0, sla, 0, 63)
+/* SHIFT LEFT SINGLE LOGICAL */
+ C(0x8900, SLL, RS_a, Z, r1_o, sh32, new, r1_32, sll, 0)
+ C(0xebdf, SLLK, RSY_a, DO, r3_o, sh32, new, r1_32, sll, 0)
+ C(0xeb0d, SLLG, RSY_a, Z, r3_o, sh64, r1, 0, sll, 0)
+/* SHIFT RIGHT SINGLE */
+ C(0x8a00, SRA, RS_a, Z, r1_32s, sh32, new, r1_32, sra, s32)
+ C(0xebdc, SRAK, RSY_a, DO, r3_32s, sh32, new, r1_32, sra, s32)
+ C(0xeb0a, SRAG, RSY_a, Z, r3_o, sh64, r1, 0, sra, s64)
+/* SHIFT RIGHT SINGLE LOGICAL */
+ C(0x8800, SRL, RS_a, Z, r1_32u, sh32, new, r1_32, srl, 0)
+ C(0xebde, SRLK, RSY_a, DO, r3_32u, sh32, new, r1_32, srl, 0)
+ C(0xeb0c, SRLG, RSY_a, Z, r3_o, sh64, r1, 0, srl, 0)
+/* SHIFT LEFT DOUBLE */
+ D(0x8f00, SLDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sla, 0, 31)
+/* SHIFT LEFT DOUBLE LOGICAL */
+ C(0x8d00, SLDL, RS_a, Z, r1_D32, sh64, new, r1_D32, sll, 0)
+/* SHIFT RIGHT DOUBLE */
+ C(0x8e00, SRDA, RS_a, Z, r1_D32, sh64, new, r1_D32, sra, s64)
+/* SHIFT RIGHT DOUBLE LOGICAL */
+ C(0x8c00, SRDL, RS_a, Z, r1_D32, sh64, new, r1_D32, srl, 0)
+
+/* SQUARE ROOT */
+ C(0xb314, SQEBR, RRE, Z, 0, e2, new, e1, sqeb, 0)
+ C(0xb315, SQDBR, RRE, Z, 0, f2_o, f1, 0, sqdb, 0)
+ C(0xb316, SQXBR, RRE, Z, 0, x2_o, x1, 0, sqxb, 0)
+ C(0xed14, SQEB, RXE, Z, 0, m2_32u, new, e1, sqeb, 0)
+ C(0xed15, SQDB, RXE, Z, 0, m2_64, f1, 0, sqdb, 0)
+
+/* STORE */
+ C(0x5000, ST, RX_a, Z, r1_o, a2, 0, 0, st32, 0)
+ C(0xe350, STY, RXY_a, LD, r1_o, a2, 0, 0, st32, 0)
+ C(0xe324, STG, RXY_a, Z, r1_o, a2, 0, 0, st64, 0)
+ C(0x6000, STD, RX_a, Z, f1_o, a2, 0, 0, st64, 0)
+ C(0xed67, STDY, RXY_a, LD, f1_o, a2, 0, 0, st64, 0)
+ C(0x7000, STE, RX_a, Z, e1, a2, 0, 0, st32, 0)
+ C(0xed66, STEY, RXY_a, LD, e1, a2, 0, 0, st32, 0)
+/* STORE RELATIVE LONG */
+ C(0xc40f, STRL, RIL_b, GIE, r1_o, ri2, 0, 0, st32, 0)
+ C(0xc40b, STGRL, RIL_b, GIE, r1_o, ri2, 0, 0, st64, 0)
+/* STORE CHARACTER */
+ C(0x4200, STC, RX_a, Z, r1_o, a2, 0, 0, st8, 0)
+ C(0xe372, STCY, RXY_a, LD, r1_o, a2, 0, 0, st8, 0)
+/* STORE CHARACTER HIGH */
+ C(0xe3c3, STCH, RXY_a, HW, r1_sr32, a2, 0, 0, st8, 0)
+/* STORE CHARACTERS UNDER MASK */
+ D(0xbe00, STCM, RS_b, Z, r1_o, a2, 0, 0, stcm, 0, 0)
+ D(0xeb2d, STCMY, RSY_b, LD, r1_o, a2, 0, 0, stcm, 0, 0)
+ D(0xeb2c, STCMH, RSY_b, Z, r1_o, a2, 0, 0, stcm, 0, 32)
+/* STORE HALFWORD */
+ C(0x4000, STH, RX_a, Z, r1_o, a2, 0, 0, st16, 0)
+ C(0xe370, STHY, RXY_a, LD, r1_o, a2, 0, 0, st16, 0)
+/* STORE HALFWORD HIGH */
+ C(0xe3c7, STHH, RXY_a, HW, r1_sr32, a2, 0, 0, st16, 0)
+/* STORE HALFWORD RELATIVE LONG */
+ C(0xc407, STHRL, RIL_b, GIE, r1_o, ri2, 0, 0, st16, 0)
+/* STORE HIGH */
+ C(0xe3cb, STFH, RXY_a, HW, r1_sr32, a2, 0, 0, st32, 0)
+/* STORE ON CONDITION */
+ D(0xebf3, STOC, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 0)
+ D(0xebe3, STOCG, RSY_b, LOC, 0, 0, 0, 0, soc, 0, 1)
+/* STORE REVERSED */
+ C(0xe33f, STRVH, RXY_a, Z, la2, r1_16u, new, m1_16, rev16, 0)
+ C(0xe33e, STRV, RXY_a, Z, la2, r1_32u, new, m1_32, rev32, 0)
+ C(0xe32f, STRVG, RXY_a, Z, la2, r1_o, new, m1_64, rev64, 0)
+
+/* STORE FPC */
+ C(0xb29c, STFPC, S, Z, 0, a2, new, m2_32, efpc, 0)
+
+/* STORE MULTIPLE */
+ D(0x9000, STM, RS_a, Z, 0, a2, 0, 0, stm, 0, 4)
+ D(0xeb90, STMY, RSY_a, LD, 0, a2, 0, 0, stm, 0, 4)
+ D(0xeb24, STMG, RSY_a, Z, 0, a2, 0, 0, stm, 0, 8)
+/* STORE MULTIPLE HIGH */
+ C(0xeb26, STMH, RSY_a, Z, 0, a2, 0, 0, stmh, 0)
+/* STORE ACCESS MULTIPLE */
+ C(0x9b00, STAM, RS_a, Z, 0, a2, 0, 0, stam, 0)
+ C(0xeb9b, STAMY, RSY_a, LD, 0, a2, 0, 0, stam, 0)
+
+/* SUBTRACT */
+ C(0x1b00, SR, RR_a, Z, r1, r2, new, r1_32, sub, subs32)
+ C(0xb9f9, SRK, RRF_a, DO, r2, r3, new, r1_32, sub, subs32)
+ C(0x5b00, S, RX_a, Z, r1, m2_32s, new, r1_32, sub, subs32)
+ C(0xe35b, SY, RXY_a, LD, r1, m2_32s, new, r1_32, sub, subs32)
+ C(0xb909, SGR, RRE, Z, r1, r2, r1, 0, sub, subs64)
+ C(0xb919, SGFR, RRE, Z, r1, r2_32s, r1, 0, sub, subs64)
+ C(0xb9e9, SGRK, RRF_a, DO, r2, r3, r1, 0, sub, subs64)
+ C(0xe309, SG, RXY_a, Z, r1, m2_64, r1, 0, sub, subs64)
+ C(0xe319, SGF, RXY_a, Z, r1, m2_32s, r1, 0, sub, subs64)
+ C(0xb30b, SEBR, RRE, Z, e1, e2, new, e1, seb, f32)
+ C(0xb31b, SDBR, RRE, Z, f1_o, f2_o, f1, 0, sdb, f64)
+ C(0xb34b, SXBR, RRE, Z, 0, x2_o, x1, 0, sxb, f128)
+ C(0xed0b, SEB, RXE, Z, e1, m2_32u, new, e1, seb, f32)
+ C(0xed1b, SDB, RXE, Z, f1_o, m2_64, f1, 0, sdb, f64)
+/* SUBTRACT HALFWORD */
+ C(0x4b00, SH, RX_a, Z, r1, m2_16s, new, r1_32, sub, subs32)
+ C(0xe37b, SHY, RXY_a, LD, r1, m2_16s, new, r1_32, sub, subs32)
+/* SUBTRACT HIGH */
+ C(0xb9c9, SHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subs32)
+ C(0xb9d9, SHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subs32)
+/* SUBTRACT LOGICAL */
+ C(0x1f00, SLR, RR_a, Z, r1, r2, new, r1_32, sub, subu32)
+ C(0xb9fb, SLRK, RRF_a, DO, r2, r3, new, r1_32, sub, subu32)
+ C(0x5f00, SL, RX_a, Z, r1, m2_32u, new, r1_32, sub, subu32)
+ C(0xe35f, SLY, RXY_a, LD, r1, m2_32u, new, r1_32, sub, subu32)
+ C(0xb90b, SLGR, RRE, Z, r1, r2, r1, 0, sub, subu64)
+ C(0xb91b, SLGFR, RRE, Z, r1, r2_32u, r1, 0, sub, subu64)
+ C(0xb9eb, SLGRK, RRF_a, DO, r2, r3, r1, 0, sub, subu64)
+ C(0xe30b, SLG, RXY_a, Z, r1, m2_64, r1, 0, sub, subu64)
+ C(0xe31b, SLGF, RXY_a, Z, r1, m2_32u, r1, 0, sub, subu64)
+/* SUBTRACT LOCICAL HIGH */
+ C(0xb9cb, SLHHHR, RRF_a, HW, r2_sr32, r3_sr32, new, r1_32h, sub, subu32)
+ C(0xb9db, SLHHLR, RRF_a, HW, r2_sr32, r3, new, r1_32h, sub, subu32)
+/* SUBTRACT LOGICAL IMMEDIATE */
+ C(0xc205, SLFI, RIL_a, EI, r1, i2_32u, new, r1_32, sub, subu32)
+ C(0xc204, SLGFI, RIL_a, EI, r1, i2_32u, r1, 0, sub, subu64)
+/* SUBTRACT LOGICAL WITH BORROW */
+ C(0xb999, SLBR, RRE, Z, r1, r2, new, r1_32, subb, subb32)
+ C(0xb989, SLBGR, RRE, Z, r1, r2, r1, 0, subb, subb64)
+ C(0xe399, SLB, RXY_a, Z, r1, m2_32u, new, r1_32, subb, subb32)
+ C(0xe389, SLBG, RXY_a, Z, r1, m2_64, r1, 0, subb, subb64)
+
+/* SUPERVISOR CALL */
+ C(0x0a00, SVC, I, Z, 0, 0, 0, 0, svc, 0)
+
+/* TEST DATA CLASS */
+ C(0xed10, TCEB, RXE, Z, e1, a2, 0, 0, tceb, 0)
+ C(0xed11, TCDB, RXE, Z, f1_o, a2, 0, 0, tcdb, 0)
+ C(0xed12, TCXB, RXE, Z, x1_o, a2, 0, 0, tcxb, 0)
+
+/* TEST UNDER MASK */
+ C(0x9100, TM, SI, Z, m1_8u, i2_8u, 0, 0, 0, tm32)
+ C(0xeb51, TMY, SIY, LD, m1_8u, i2_8u, 0, 0, 0, tm32)
+ D(0xa702, TMHH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 48)
+ D(0xa703, TMHL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 32)
+ D(0xa700, TMLH, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 16)
+ D(0xa701, TMLL, RI_a, Z, r1_o, i2_16u_shl, 0, 0, 0, tm64, 0)
+
+/* TRANSLATE */
+ C(0xdc00, TR, SS_a, Z, la1, a2, 0, 0, tr, 0)
+/* TRANSLATE AND TEST */
+ C(0xdd00, TRT, SS_a, Z, la1, a2, 0, 0, trt, 0)
+/* TRANSLATE EXTENDED */
+ C(0xb2a5, TRE, RRE, Z, 0, r2, r1_P, 0, tre, 0)
+
+/* UNPACK */
+ /* Really format SS_b, but we pack both lengths into one argument
+ for the helper call, so we might as well leave one 8-bit field. */
+ C(0xf300, UNPK, SS_a, Z, la1, a2, 0, 0, unpk, 0)
+
+#ifndef CONFIG_USER_ONLY
+/* COMPARE AND SWAP AND PURGE */
+ C(0xb250, CSP, RRE, Z, 0, ra2, 0, 0, csp, 0)
+/* DIAGNOSE (KVM hypercall) */
+ C(0x8300, DIAG, RSI, Z, 0, 0, 0, 0, diag, 0)
+/* INSERT STORAGE KEY EXTENDED */
+ C(0xb229, ISKE, RRE, Z, 0, r2_o, new, r1_8, iske, 0)
+/* INVALIDATE PAGE TABLE ENTRY */
+ C(0xb221, IPTE, RRF_a, Z, r1_o, r2_o, 0, 0, ipte, 0)
+/* LOAD CONTROL */
+ C(0xb700, LCTL, RS_a, Z, 0, a2, 0, 0, lctl, 0)
+ C(0xeb2f, LCTLG, RSY_a, Z, 0, a2, 0, 0, lctlg, 0)
+/* LOAD PSW */
+ C(0x8200, LPSW, S, Z, 0, a2, 0, 0, lpsw, 0)
+/* LOAD PSW EXTENDED */
+ C(0xb2b2, LPSWE, S, Z, 0, a2, 0, 0, lpswe, 0)
+/* LOAD REAL ADDRESS */
+ C(0xb100, LRA, RX_a, Z, 0, a2, r1, 0, lra, 0)
+ C(0xe313, LRAY, RXY_a, LD, 0, a2, r1, 0, lra, 0)
+ C(0xe303, LRAG, RXY_a, Z, 0, a2, r1, 0, lra, 0)
+/* LOAD USING REAL ADDRESS */
+ C(0xb24b, LURA, RRE, Z, 0, r2, new, r1_32, lura, 0)
+ C(0xb905, LURAG, RRE, Z, 0, r2, r1, 0, lurag, 0)
+/* MOVE TO PRIMARY */
+ C(0xda00, MVCP, SS_d, Z, la1, a2, 0, 0, mvcp, 0)
+/* MOVE TO SECONDARY */
+ C(0xdb00, MVCS, SS_d, Z, la1, a2, 0, 0, mvcs, 0)
+/* PURGE TLB */
+ C(0xb20d, PTLB, S, Z, 0, 0, 0, 0, ptlb, 0)
+/* RESET REFERENCE BIT EXTENDED */
+ C(0xb22a, RRBE, RRE, Z, 0, r2_o, 0, 0, rrbe, 0)
+/* SERVICE CALL LOGICAL PROCESSOR (PV hypercall) */
+ C(0xb220, SERVC, RRE, Z, r1_o, r2_o, 0, 0, servc, 0)
+/* SET ADDRESS SPACE CONTROL FAST */
+ C(0xb279, SACF, S, Z, 0, a2, 0, 0, sacf, 0)
+/* SET CLOCK */
+ /* ??? Not implemented - is it necessary? */
+ C(0xb204, SCK, S, Z, 0, 0, 0, 0, 0, 0)
+/* SET CLOCK COMPARATOR */
+ C(0xb206, SCKC, S, Z, 0, m2_64, 0, 0, sckc, 0)
+/* SET CPU TIMER */
+ C(0xb208, SPT, S, Z, 0, m2_64, 0, 0, spt, 0)
+/* SET PREFIX */
+ C(0xb210, SPX, S, Z, 0, m2_32u, 0, 0, spx, 0)
+/* SET PSW KEY FROM ADDRESS */
+ C(0xb20a, SPKA, S, Z, 0, a2, 0, 0, spka, 0)
+/* SET STORAGE KEY EXTENDED */
+ C(0xb22b, SSKE, RRF_c, Z, r1_o, r2_o, 0, 0, sske, 0)
+/* SET SYSTEM MASK */
+ C(0x8000, SSM, S, Z, 0, m2_8u, 0, 0, ssm, 0)
+/* SIGNAL PROCESSOR */
+ C(0xae00, SIGP, RS_a, Z, r3_o, a2, 0, 0, sigp, 0)
+/* STORE CLOCK */
+ C(0xb205, STCK, S, Z, la2, 0, new, m1_64, stck, 0)
+ C(0xb27c, STCKF, S, SCF, la2, 0, new, m1_64, stck, 0)
+/* STORE CLOCK EXTENDED */
+ C(0xb278, STCKE, S, Z, 0, a2, 0, 0, stcke, 0)
+/* STORE CLOCK COMPARATOR */
+ C(0xb207, STCKC, S, Z, la2, 0, new, m1_64, stckc, 0)
+/* STORE CONTROL */
+ C(0xb600, STCTL, RS_a, Z, 0, a2, 0, 0, stctl, 0)
+ C(0xeb25, STCTG, RSY_a, Z, 0, a2, 0, 0, stctg, 0)
+/* STORE CPU ADDRESS */
+ C(0xb212, STAP, S, Z, la2, 0, new, m1_16, stap, 0)
+/* STORE CPU ID */
+ C(0xb202, STIDP, S, Z, la2, 0, new, m1_64, stidp, 0)
+/* STORE CPU TIMER */
+ C(0xb209, STPT, S, Z, la2, 0, new, m1_64, stpt, 0)
+/* STORE FACILITY LIST */
+ C(0xb2b1, STFL, S, Z, 0, 0, 0, 0, stfl, 0)
+/* STORE PREFIX */
+ C(0xb211, STPX, S, Z, la2, 0, new, m1_32, stpx, 0)
+/* STORE SYSTEM INFORMATION */
+ C(0xb27d, STSI, S, Z, 0, a2, 0, 0, stsi, 0)
+/* STORE THEN AND SYSTEM MASK */
+ C(0xac00, STNSM, SI, Z, la1, 0, 0, 0, stnosm, 0)
+/* STORE THEN OR SYSTEM MASK */
+ C(0xad00, STOSM, SI, Z, la1, 0, 0, 0, stnosm, 0)
+/* STORE USING REAL ADDRESS */
+ C(0xb246, STURA, RRE, Z, r1_o, r2_o, 0, 0, stura, 0)
+ C(0xb925, STURG, RRE, Z, r1_o, r2_o, 0, 0, sturg, 0)
+/* TEST PROTECTION */
+ C(0xe501, TPROT, SSE, Z, la1, a2, 0, 0, tprot, 0)
+
+/* CCW I/O Instructions */
+ C(0xb276, XSCH, S, Z, 0, 0, 0, 0, xsch, 0)
+ C(0xb230, CSCH, S, Z, 0, 0, 0, 0, csch, 0)
+ C(0xb231, HSCH, S, Z, 0, 0, 0, 0, hsch, 0)
+ C(0xb232, MSCH, S, Z, 0, insn, 0, 0, msch, 0)
+ C(0xb23b, RCHP, S, Z, 0, 0, 0, 0, rchp, 0)
+ C(0xb238, RSCH, S, Z, 0, 0, 0, 0, rsch, 0)
+ C(0xb233, SSCH, S, Z, 0, insn, 0, 0, ssch, 0)
+ C(0xb234, STSCH, S, Z, 0, insn, 0, 0, stsch, 0)
+ C(0xb235, TSCH, S, Z, 0, insn, 0, 0, tsch, 0)
+ /* ??? Not listed in PoO ninth edition, but there's a linux driver that
+ uses it: "A CHSC subchannel is usually present on LPAR only." */
+ C(0xb25f, CHSC, RRE, Z, 0, insn, 0, 0, chsc, 0)
+#endif /* CONFIG_USER_ONLY */
diff --git a/target/s390x/insn-format.def b/target/s390x/insn-format.def
new file mode 100644
index 0000000000..0e898b90bd
--- /dev/null
+++ b/target/s390x/insn-format.def
@@ -0,0 +1,55 @@
+/* Description of s390 insn formats. */
+/* NAME F1, F2... */
+F0(E)
+F1(I, I(1, 8, 8))
+F2(RI_a, R(1, 8), I(2,16,16))
+F2(RI_b, R(1, 8), I(2,16,16))
+F2(RI_c, M(1, 8), I(2,16,16))
+F3(RIE_a, R(1, 8), I(2,16,16), M(3,32))
+F4(RIE_b, R(1, 8), R(2,12), M(3,32), I(4,16,16))
+F4(RIE_c, R(1, 8), I(2,32, 8), M(3,12), I(4,16,16))
+F3(RIE_d, R(1, 8), I(2,16,16), R(3,12))
+F3(RIE_e, R(1, 8), I(2,16,16), R(3,12))
+F5(RIE_f, R(1, 8), R(2,12), I(3,16,8), I(4,24,8), I(5,32,8))
+F2(RIL_a, R(1, 8), I(2,16,32))
+F2(RIL_b, R(1, 8), I(2,16,32))
+F2(RIL_c, M(1, 8), I(2,16,32))
+F4(RIS, R(1, 8), I(2,32, 8), M(3,12), BD(4,16,20))
+/* ??? The PoO does not call out subtypes _a and _b for RR, as it does
+ for e.g. RX. Our checking requires this for e.g. BCR. */
+F2(RR_a, R(1, 8), R(2,12))
+F2(RR_b, M(1, 8), R(2,12))
+F2(RRE, R(1,24), R(2,28))
+F3(RRD, R(1,16), R(2,28), R(3,24))
+F4(RRF_a, R(1,24), R(2,28), R(3,16), M(4,20))
+F4(RRF_b, R(1,24), R(2,28), R(3,16), M(4,20))
+F4(RRF_c, R(1,24), R(2,28), M(3,16), M(4,20))
+F4(RRF_d, R(1,24), R(2,28), M(3,16), M(4,20))
+F4(RRF_e, R(1,24), R(2,28), M(3,16), M(4,20))
+F4(RRS, R(1, 8), R(2,12), M(3,32), BD(4,16,20))
+F3(RS_a, R(1, 8), BD(2,16,20), R(3,12))
+F3(RS_b, R(1, 8), BD(2,16,20), M(3,12))
+F3(RSI, R(1, 8), I(2,16,16), R(3,12))
+F2(RSL, L(1, 8, 4), BD(1,16,20))
+F3(RSY_a, R(1, 8), BDL(2), R(3,12))
+F3(RSY_b, R(1, 8), BDL(2), M(3,12))
+F2(RX_a, R(1, 8), BXD(2))
+F2(RX_b, M(1, 8), BXD(2))
+F2(RXE, R(1, 8), BXD(2))
+F3(RXF, R(1,32), BXD(2), R(3, 8))
+F2(RXY_a, R(1, 8), BXDL(2))
+F2(RXY_b, M(1, 8), BXDL(2))
+F1(S, BD(2,16,20))
+F2(SI, BD(1,16,20), I(2,8,8))
+F2(SIL, BD(1,16,20), I(2,32,16))
+F2(SIY, BDL(1), I(2, 8, 8))
+F3(SS_a, L(1, 8, 8), BD(1,16,20), BD(2,32,36))
+F4(SS_b, L(1, 8, 4), BD(1,16,20), L(2,12,4), BD(2,32,36))
+F4(SS_c, L(1, 8, 4), BD(1,16,20), BD(2,32,36), I(3,12, 4))
+/* ??? Odd man out. The L1 field here is really a register, but the
+ easy way to compress the fields has R1 and B1 overlap. */
+F4(SS_d, L(1, 8, 4), BD(1,16,20), BD(2,32,36), R(3,12))
+F4(SS_e, R(1, 8), BD(2,16,20), R(3,12), BD(4,32,36))
+F3(SS_f, BD(1,16,20), L(2,8,8), BD(2,32,36))
+F2(SSE, BD(1,16,20), BD(2,32,36))
+F3(SSF, BD(1,16,20), BD(2,32,36), R(3,8))
diff --git a/target/s390x/int_helper.c b/target/s390x/int_helper.c
new file mode 100644
index 0000000000..370c94da55
--- /dev/null
+++ b/target/s390x/int_helper.c
@@ -0,0 +1,156 @@
+/*
+ * S/390 integer helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* 64/32 -> 32 signed division */
+int64_t HELPER(divs32)(CPUS390XState *env, int64_t a, int64_t b64)
+{
+ int32_t ret, b = b64;
+ int64_t q;
+
+ if (b == 0) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ ret = q = a / b;
+ env->retxl = a % b;
+
+ /* Catch non-representable quotient. */
+ if (ret != q) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ return ret;
+}
+
+/* 64/32 -> 32 unsigned division */
+uint64_t HELPER(divu32)(CPUS390XState *env, uint64_t a, uint64_t b64)
+{
+ uint32_t ret, b = b64;
+ uint64_t q;
+
+ if (b == 0) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ ret = q = a / b;
+ env->retxl = a % b;
+
+ /* Catch non-representable quotient. */
+ if (ret != q) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+
+ return ret;
+}
+
+/* 64/64 -> 64 signed division */
+int64_t HELPER(divs64)(CPUS390XState *env, int64_t a, int64_t b)
+{
+ /* Catch divide by zero, and non-representable quotient (MIN / -1). */
+ if (b == 0 || (b == -1 && a == (1ll << 63))) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+ env->retxl = a % b;
+ return a / b;
+}
+
+/* 128 -> 64/64 unsigned division */
+uint64_t HELPER(divu64)(CPUS390XState *env, uint64_t ah, uint64_t al,
+ uint64_t b)
+{
+ uint64_t ret;
+ /* Signal divide by zero. */
+ if (b == 0) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+ if (ah == 0) {
+ /* 64 -> 64/64 case */
+ env->retxl = al % b;
+ ret = al / b;
+ } else {
+ /* ??? Move i386 idivq helper to host-utils. */
+#ifdef CONFIG_INT128
+ __uint128_t a = ((__uint128_t)ah << 64) | al;
+ __uint128_t q = a / b;
+ env->retxl = a % b;
+ ret = q;
+ if (ret != q) {
+ runtime_exception(env, PGM_FIXPT_DIVIDE, GETPC());
+ }
+#else
+ S390CPU *cpu = s390_env_get_cpu(env);
+ /* 32-bit hosts would need special wrapper functionality - just abort if
+ we encounter such a case; it's very unlikely anyways. */
+ cpu_abort(CPU(cpu), "128 -> 64/64 division not implemented\n");
+#endif
+ }
+ return ret;
+}
+
+/* count leading zeros, for find leftmost one */
+uint64_t HELPER(clz)(uint64_t v)
+{
+ return clz64(v);
+}
+
+uint64_t HELPER(cvd)(int32_t reg)
+{
+ /* positive 0 */
+ uint64_t dec = 0x0c;
+ int64_t bin = reg;
+ int shift;
+
+ if (bin < 0) {
+ bin = -bin;
+ dec = 0x0d;
+ }
+
+ for (shift = 4; (shift < 64) && bin; shift += 4) {
+ dec |= (bin % 10) << shift;
+ bin /= 10;
+ }
+
+ return dec;
+}
+
+uint64_t HELPER(popcnt)(uint64_t r2)
+{
+ uint64_t ret = 0;
+ int i;
+
+ for (i = 0; i < 64; i += 8) {
+ uint64_t t = ctpop32((r2 >> i) & 0xff);
+ ret |= t << i;
+ }
+ return ret;
+}
diff --git a/target/s390x/interrupt.c b/target/s390x/interrupt.c
new file mode 100644
index 0000000000..9edef96795
--- /dev/null
+++ b/target/s390x/interrupt.c
@@ -0,0 +1,120 @@
+/*
+ * QEMU S/390 Interrupt support
+ *
+ * Copyright IBM Corp. 2012, 2014
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at your
+ * option) any later version. See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "hw/s390x/ioinst.h"
+
+#if !defined(CONFIG_USER_ONLY)
+void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
+ uint64_t param64)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (env->ext_index == MAX_EXT_QUEUE - 1) {
+ /* ugh - can't queue anymore. Let's drop. */
+ return;
+ }
+
+ env->ext_index++;
+ assert(env->ext_index < MAX_EXT_QUEUE);
+
+ env->ext_queue[env->ext_index].code = code;
+ env->ext_queue[env->ext_index].param = param;
+ env->ext_queue[env->ext_index].param64 = param64;
+
+ env->pending_int |= INTERRUPT_EXT;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+static void cpu_inject_io(S390CPU *cpu, uint16_t subchannel_id,
+ uint16_t subchannel_number,
+ uint32_t io_int_parm, uint32_t io_int_word)
+{
+ CPUS390XState *env = &cpu->env;
+ int isc = IO_INT_WORD_ISC(io_int_word);
+
+ if (env->io_index[isc] == MAX_IO_QUEUE - 1) {
+ /* ugh - can't queue anymore. Let's drop. */
+ return;
+ }
+
+ env->io_index[isc]++;
+ assert(env->io_index[isc] < MAX_IO_QUEUE);
+
+ env->io_queue[env->io_index[isc]][isc].id = subchannel_id;
+ env->io_queue[env->io_index[isc]][isc].nr = subchannel_number;
+ env->io_queue[env->io_index[isc]][isc].parm = io_int_parm;
+ env->io_queue[env->io_index[isc]][isc].word = io_int_word;
+
+ env->pending_int |= INTERRUPT_IO;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+static void cpu_inject_crw_mchk(S390CPU *cpu)
+{
+ CPUS390XState *env = &cpu->env;
+
+ if (env->mchk_index == MAX_MCHK_QUEUE - 1) {
+ /* ugh - can't queue anymore. Let's drop. */
+ return;
+ }
+
+ env->mchk_index++;
+ assert(env->mchk_index < MAX_MCHK_QUEUE);
+
+ env->mchk_queue[env->mchk_index].type = 1;
+
+ env->pending_int |= INTERRUPT_MCHK;
+ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
+}
+
+/*
+ * All of the following interrupts are floating, i.e. not per-vcpu.
+ * We just need a dummy cpustate in order to be able to inject in the
+ * non-kvm case.
+ */
+void s390_sclp_extint(uint32_t parm)
+{
+ if (kvm_enabled()) {
+ kvm_s390_service_interrupt(parm);
+ } else {
+ S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+ cpu_inject_ext(dummy_cpu, EXT_SERVICE, parm, 0);
+ }
+}
+
+void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
+ uint32_t io_int_parm, uint32_t io_int_word)
+{
+ if (kvm_enabled()) {
+ kvm_s390_io_interrupt(subchannel_id, subchannel_nr, io_int_parm,
+ io_int_word);
+ } else {
+ S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+ cpu_inject_io(dummy_cpu, subchannel_id, subchannel_nr, io_int_parm,
+ io_int_word);
+ }
+}
+
+void s390_crw_mchk(void)
+{
+ if (kvm_enabled()) {
+ kvm_s390_crw_mchk();
+ } else {
+ S390CPU *dummy_cpu = s390_cpu_addr2state(0);
+
+ cpu_inject_crw_mchk(dummy_cpu);
+ }
+}
+
+#endif
diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c
new file mode 100644
index 0000000000..590bfa4f12
--- /dev/null
+++ b/target/s390x/ioinst.c
@@ -0,0 +1,834 @@
+/*
+ * I/O instructions for S/390
+ *
+ * Copyright 2012, 2015 IBM Corp.
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or (at
+ * your option) any later version. See the COPYING file in the top-level
+ * directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "hw/s390x/ioinst.h"
+#include "trace.h"
+#include "hw/s390x/s390-pci-bus.h"
+
+int ioinst_disassemble_sch_ident(uint32_t value, int *m, int *cssid, int *ssid,
+ int *schid)
+{
+ if (!IOINST_SCHID_ONE(value)) {
+ return -EINVAL;
+ }
+ if (!IOINST_SCHID_M(value)) {
+ if (IOINST_SCHID_CSSID(value)) {
+ return -EINVAL;
+ }
+ *cssid = 0;
+ *m = 0;
+ } else {
+ *cssid = IOINST_SCHID_CSSID(value);
+ *m = 1;
+ }
+ *ssid = IOINST_SCHID_SSID(value);
+ *schid = IOINST_SCHID_NR(value);
+ return 0;
+}
+
+void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(&cpu->env, PGM_OPERAND, 2);
+ return;
+ }
+ trace_ioinst_sch_id("xsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_xsch(sch);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+ setcc(cpu, cc);
+}
+
+void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(&cpu->env, PGM_OPERAND, 2);
+ return;
+ }
+ trace_ioinst_sch_id("csch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_csch(sch);
+ }
+ if (ret == -ENODEV) {
+ cc = 3;
+ } else {
+ cc = 0;
+ }
+ setcc(cpu, cc);
+}
+
+void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(&cpu->env, PGM_OPERAND, 2);
+ return;
+ }
+ trace_ioinst_sch_id("hsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_hsch(sch);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+ setcc(cpu, cc);
+}
+
+static int ioinst_schib_valid(SCHIB *schib)
+{
+ if ((be16_to_cpu(schib->pmcw.flags) & PMCW_FLAGS_MASK_INVALID) ||
+ (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_INVALID)) {
+ return 0;
+ }
+ /* Disallow extended measurements for now. */
+ if (be32_to_cpu(schib->pmcw.chars) & PMCW_CHARS_MASK_XMWME) {
+ return 0;
+ }
+ return 1;
+}
+
+void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ SCHIB schib;
+ uint64_t addr;
+ int ret = -ENODEV;
+ int cc;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = decode_basedisp_s(env, ipb, &ar);
+ if (addr & 3) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return;
+ }
+ if (s390_cpu_virt_mem_read(cpu, addr, ar, &schib, sizeof(schib))) {
+ return;
+ }
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
+ !ioinst_schib_valid(&schib)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+ trace_ioinst_sch_id("msch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_msch(sch, &schib);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+ setcc(cpu, cc);
+}
+
+static void copy_orb_from_guest(ORB *dest, const ORB *src)
+{
+ dest->intparm = be32_to_cpu(src->intparm);
+ dest->ctrl0 = be16_to_cpu(src->ctrl0);
+ dest->lpm = src->lpm;
+ dest->ctrl1 = src->ctrl1;
+ dest->cpa = be32_to_cpu(src->cpa);
+}
+
+static int ioinst_orb_valid(ORB *orb)
+{
+ if ((orb->ctrl0 & ORB_CTRL0_MASK_INVALID) ||
+ (orb->ctrl1 & ORB_CTRL1_MASK_INVALID)) {
+ return 0;
+ }
+ if ((orb->cpa & HIGH_ORDER_BIT) != 0) {
+ return 0;
+ }
+ return 1;
+}
+
+void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ ORB orig_orb, orb;
+ uint64_t addr;
+ int ret = -ENODEV;
+ int cc;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = decode_basedisp_s(env, ipb, &ar);
+ if (addr & 3) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return;
+ }
+ if (s390_cpu_virt_mem_read(cpu, addr, ar, &orig_orb, sizeof(orb))) {
+ return;
+ }
+ copy_orb_from_guest(&orb, &orig_orb);
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid) ||
+ !ioinst_orb_valid(&orb)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+ trace_ioinst_sch_id("ssch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_ssch(sch, &orb);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+ setcc(cpu, cc);
+}
+
+void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb)
+{
+ CRW crw;
+ uint64_t addr;
+ int cc;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = decode_basedisp_s(env, ipb, &ar);
+ if (addr & 3) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return;
+ }
+
+ cc = css_do_stcrw(&crw);
+ /* 0 - crw stored, 1 - zeroes stored */
+
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &crw, sizeof(crw)) == 0) {
+ setcc(cpu, cc);
+ } else if (cc == 0) {
+ /* Write failed: requeue CRW since STCRW is a suppressing instruction */
+ css_undo_stcrw(&crw);
+ }
+}
+
+void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ uint64_t addr;
+ int cc;
+ SCHIB schib;
+ CPUS390XState *env = &cpu->env;
+ uint8_t ar;
+
+ addr = decode_basedisp_s(env, ipb, &ar);
+ if (addr & 3) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return;
+ }
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ /*
+ * As operand exceptions have a lower priority than access exceptions,
+ * we check whether the memory area is writeable (injecting the
+ * access execption if it is not) first.
+ */
+ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ }
+ return;
+ }
+ trace_ioinst_sch_id("stsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch) {
+ if (css_subch_visible(sch)) {
+ css_do_stsch(sch, &schib);
+ cc = 0;
+ } else {
+ /* Indicate no more subchannels in this css/ss */
+ cc = 3;
+ }
+ } else {
+ if (css_schid_final(m, cssid, ssid, schid)) {
+ cc = 3; /* No more subchannels in this css/ss */
+ } else {
+ /* Store an empty schib. */
+ memset(&schib, 0, sizeof(schib));
+ cc = 0;
+ }
+ }
+ if (cc != 3) {
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &schib,
+ sizeof(schib)) != 0) {
+ return;
+ }
+ } else {
+ /* Access exceptions have a higher priority than cc3 */
+ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib)) != 0) {
+ return;
+ }
+ }
+ setcc(cpu, cc);
+}
+
+int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb)
+{
+ CPUS390XState *env = &cpu->env;
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ IRB irb;
+ uint64_t addr;
+ int cc, irb_len;
+ uint8_t ar;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return -EIO;
+ }
+ trace_ioinst_sch_id("tsch", cssid, ssid, schid);
+ addr = decode_basedisp_s(env, ipb, &ar);
+ if (addr & 3) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return -EIO;
+ }
+
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ cc = css_do_tsch_get_irb(sch, &irb, &irb_len);
+ } else {
+ cc = 3;
+ }
+ /* 0 - status pending, 1 - not status pending, 3 - not operational */
+ if (cc != 3) {
+ if (s390_cpu_virt_mem_write(cpu, addr, ar, &irb, irb_len) != 0) {
+ return -EFAULT;
+ }
+ css_do_tsch_update_subch(sch);
+ } else {
+ irb_len = sizeof(irb) - sizeof(irb.emw);
+ /* Access exceptions have a higher priority than cc3 */
+ if (s390_cpu_virt_mem_check_write(cpu, addr, ar, irb_len) != 0) {
+ return -EFAULT;
+ }
+ }
+
+ setcc(cpu, cc);
+ return 0;
+}
+
+typedef struct ChscReq {
+ uint16_t len;
+ uint16_t command;
+ uint32_t param0;
+ uint32_t param1;
+ uint32_t param2;
+} QEMU_PACKED ChscReq;
+
+typedef struct ChscResp {
+ uint16_t len;
+ uint16_t code;
+ uint32_t param;
+ char data[0];
+} QEMU_PACKED ChscResp;
+
+#define CHSC_MIN_RESP_LEN 0x0008
+
+#define CHSC_SCPD 0x0002
+#define CHSC_SCSC 0x0010
+#define CHSC_SDA 0x0031
+#define CHSC_SEI 0x000e
+
+#define CHSC_SCPD_0_M 0x20000000
+#define CHSC_SCPD_0_C 0x10000000
+#define CHSC_SCPD_0_FMT 0x0f000000
+#define CHSC_SCPD_0_CSSID 0x00ff0000
+#define CHSC_SCPD_0_RFMT 0x00000f00
+#define CHSC_SCPD_0_RES 0xc000f000
+#define CHSC_SCPD_1_RES 0xffffff00
+#define CHSC_SCPD_01_CHPID 0x000000ff
+static void ioinst_handle_chsc_scpd(ChscReq *req, ChscResp *res)
+{
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint32_t param1 = be32_to_cpu(req->param1);
+ uint16_t resp_code;
+ int rfmt;
+ uint16_t cssid;
+ uint8_t f_chpid, l_chpid;
+ int desc_size;
+ int m;
+
+ rfmt = (param0 & CHSC_SCPD_0_RFMT) >> 8;
+ if ((rfmt == 0) || (rfmt == 1)) {
+ rfmt = !!(param0 & CHSC_SCPD_0_C);
+ }
+ if ((len != 0x0010) || (param0 & CHSC_SCPD_0_RES) ||
+ (param1 & CHSC_SCPD_1_RES) || req->param2) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ if (param0 & CHSC_SCPD_0_FMT) {
+ resp_code = 0x0007;
+ goto out_err;
+ }
+ cssid = (param0 & CHSC_SCPD_0_CSSID) >> 16;
+ m = param0 & CHSC_SCPD_0_M;
+ if (cssid != 0) {
+ if (!m || !css_present(cssid)) {
+ resp_code = 0x0008;
+ goto out_err;
+ }
+ }
+ f_chpid = param0 & CHSC_SCPD_01_CHPID;
+ l_chpid = param1 & CHSC_SCPD_01_CHPID;
+ if (l_chpid < f_chpid) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ /* css_collect_chp_desc() is endian-aware */
+ desc_size = css_collect_chp_desc(m, cssid, f_chpid, l_chpid, rfmt,
+ &res->data);
+ res->code = cpu_to_be16(0x0001);
+ res->len = cpu_to_be16(8 + desc_size);
+ res->param = cpu_to_be32(rfmt);
+ return;
+
+ out_err:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = cpu_to_be32(rfmt);
+}
+
+#define CHSC_SCSC_0_M 0x20000000
+#define CHSC_SCSC_0_FMT 0x000f0000
+#define CHSC_SCSC_0_CSSID 0x0000ff00
+#define CHSC_SCSC_0_RES 0xdff000ff
+static void ioinst_handle_chsc_scsc(ChscReq *req, ChscResp *res)
+{
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint8_t cssid;
+ uint16_t resp_code;
+ uint32_t general_chars[510];
+ uint32_t chsc_chars[508];
+
+ if (len != 0x0010) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+
+ if (param0 & CHSC_SCSC_0_FMT) {
+ resp_code = 0x0007;
+ goto out_err;
+ }
+ cssid = (param0 & CHSC_SCSC_0_CSSID) >> 8;
+ if (cssid != 0) {
+ if (!(param0 & CHSC_SCSC_0_M) || !css_present(cssid)) {
+ resp_code = 0x0008;
+ goto out_err;
+ }
+ }
+ if ((param0 & CHSC_SCSC_0_RES) || req->param1 || req->param2) {
+ resp_code = 0x0003;
+ goto out_err;
+ }
+ res->code = cpu_to_be16(0x0001);
+ res->len = cpu_to_be16(4080);
+ res->param = 0;
+
+ memset(general_chars, 0, sizeof(general_chars));
+ memset(chsc_chars, 0, sizeof(chsc_chars));
+
+ general_chars[0] = cpu_to_be32(0x03000000);
+ general_chars[1] = cpu_to_be32(0x00079000);
+ general_chars[3] = cpu_to_be32(0x00080000);
+
+ chsc_chars[0] = cpu_to_be32(0x40000000);
+ chsc_chars[3] = cpu_to_be32(0x00040000);
+
+ memcpy(res->data, general_chars, sizeof(general_chars));
+ memcpy(res->data + sizeof(general_chars), chsc_chars, sizeof(chsc_chars));
+ return;
+
+ out_err:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = 0;
+}
+
+#define CHSC_SDA_0_FMT 0x0f000000
+#define CHSC_SDA_0_OC 0x0000ffff
+#define CHSC_SDA_0_RES 0xf0ff0000
+#define CHSC_SDA_OC_MCSSE 0x0
+#define CHSC_SDA_OC_MSS 0x2
+static void ioinst_handle_chsc_sda(ChscReq *req, ChscResp *res)
+{
+ uint16_t resp_code = 0x0001;
+ uint16_t len = be16_to_cpu(req->len);
+ uint32_t param0 = be32_to_cpu(req->param0);
+ uint16_t oc;
+ int ret;
+
+ if ((len != 0x0400) || (param0 & CHSC_SDA_0_RES)) {
+ resp_code = 0x0003;
+ goto out;
+ }
+
+ if (param0 & CHSC_SDA_0_FMT) {
+ resp_code = 0x0007;
+ goto out;
+ }
+
+ oc = param0 & CHSC_SDA_0_OC;
+ switch (oc) {
+ case CHSC_SDA_OC_MCSSE:
+ ret = css_enable_mcsse();
+ if (ret == -EINVAL) {
+ resp_code = 0x0101;
+ goto out;
+ }
+ break;
+ case CHSC_SDA_OC_MSS:
+ ret = css_enable_mss();
+ if (ret == -EINVAL) {
+ resp_code = 0x0101;
+ goto out;
+ }
+ break;
+ default:
+ resp_code = 0x0003;
+ goto out;
+ }
+
+out:
+ res->code = cpu_to_be16(resp_code);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->param = 0;
+}
+
+static int chsc_sei_nt0_get_event(void *res)
+{
+ /* no events yet */
+ return 1;
+}
+
+static int chsc_sei_nt0_have_event(void)
+{
+ /* no events yet */
+ return 0;
+}
+
+#define CHSC_SEI_NT0 (1ULL << 63)
+#define CHSC_SEI_NT2 (1ULL << 61)
+static void ioinst_handle_chsc_sei(ChscReq *req, ChscResp *res)
+{
+ uint64_t selection_mask = ldq_p(&req->param1);
+ uint8_t *res_flags = (uint8_t *)res->data;
+ int have_event = 0;
+ int have_more = 0;
+
+ /* regarding architecture nt0 can not be masked */
+ have_event = !chsc_sei_nt0_get_event(res);
+ have_more = chsc_sei_nt0_have_event();
+
+ if (selection_mask & CHSC_SEI_NT2) {
+ if (!have_event) {
+ have_event = !chsc_sei_nt2_get_event(res);
+ }
+
+ if (!have_more) {
+ have_more = chsc_sei_nt2_have_event();
+ }
+ }
+
+ if (have_event) {
+ res->code = cpu_to_be16(0x0001);
+ if (have_more) {
+ (*res_flags) |= 0x80;
+ } else {
+ (*res_flags) &= ~0x80;
+ css_clear_sei_pending();
+ }
+ } else {
+ res->code = cpu_to_be16(0x0005);
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ }
+}
+
+static void ioinst_handle_chsc_unimplemented(ChscResp *res)
+{
+ res->len = cpu_to_be16(CHSC_MIN_RESP_LEN);
+ res->code = cpu_to_be16(0x0004);
+ res->param = 0;
+}
+
+void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb)
+{
+ ChscReq *req;
+ ChscResp *res;
+ uint64_t addr;
+ int reg;
+ uint16_t len;
+ uint16_t command;
+ CPUS390XState *env = &cpu->env;
+ uint8_t buf[TARGET_PAGE_SIZE];
+
+ trace_ioinst("chsc");
+ reg = (ipb >> 20) & 0x00f;
+ addr = env->regs[reg];
+ /* Page boundary? */
+ if (addr & 0xfff) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return;
+ }
+ /*
+ * Reading sizeof(ChscReq) bytes is currently enough for all of our
+ * present CHSC sub-handlers ... if we ever need more, we should take
+ * care of req->len here first.
+ */
+ if (s390_cpu_virt_mem_read(cpu, addr, reg, buf, sizeof(ChscReq))) {
+ return;
+ }
+ req = (ChscReq *)buf;
+ len = be16_to_cpu(req->len);
+ /* Length field valid? */
+ if ((len < 16) || (len > 4088) || (len & 7)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+ memset((char *)req + len, 0, TARGET_PAGE_SIZE - len);
+ res = (void *)((char *)req + len);
+ command = be16_to_cpu(req->command);
+ trace_ioinst_chsc_cmd(command, len);
+ switch (command) {
+ case CHSC_SCSC:
+ ioinst_handle_chsc_scsc(req, res);
+ break;
+ case CHSC_SCPD:
+ ioinst_handle_chsc_scpd(req, res);
+ break;
+ case CHSC_SDA:
+ ioinst_handle_chsc_sda(req, res);
+ break;
+ case CHSC_SEI:
+ ioinst_handle_chsc_sei(req, res);
+ break;
+ default:
+ ioinst_handle_chsc_unimplemented(res);
+ break;
+ }
+
+ if (!s390_cpu_virt_mem_write(cpu, addr + len, reg, res,
+ be16_to_cpu(res->len))) {
+ setcc(cpu, 0); /* Command execution complete */
+ }
+}
+
+int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb)
+{
+ CPUS390XState *env = &cpu->env;
+ uint64_t addr;
+ int lowcore;
+ IOIntCode int_code;
+ hwaddr len;
+ int ret;
+ uint8_t ar;
+
+ trace_ioinst("tpi");
+ addr = decode_basedisp_s(env, ipb, &ar);
+ if (addr & 3) {
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ return -EIO;
+ }
+
+ lowcore = addr ? 0 : 1;
+ len = lowcore ? 8 /* two words */ : 12 /* three words */;
+ ret = css_do_tpi(&int_code, lowcore);
+ if (ret == 1) {
+ s390_cpu_virt_mem_write(cpu, lowcore ? 184 : addr, ar, &int_code, len);
+ }
+ return ret;
+}
+
+#define SCHM_REG1_RES(_reg) (_reg & 0x000000000ffffffc)
+#define SCHM_REG1_MBK(_reg) ((_reg & 0x00000000f0000000) >> 28)
+#define SCHM_REG1_UPD(_reg) ((_reg & 0x0000000000000002) >> 1)
+#define SCHM_REG1_DCT(_reg) (_reg & 0x0000000000000001)
+
+void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
+ uint32_t ipb)
+{
+ uint8_t mbk;
+ int update;
+ int dct;
+ CPUS390XState *env = &cpu->env;
+
+ trace_ioinst("schm");
+
+ if (SCHM_REG1_RES(reg1)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+
+ mbk = SCHM_REG1_MBK(reg1);
+ update = SCHM_REG1_UPD(reg1);
+ dct = SCHM_REG1_DCT(reg1);
+
+ if (update && (reg2 & 0x000000000000001f)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+
+ css_do_schm(mbk, update, dct, update ? reg2 : 0);
+}
+
+void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1)
+{
+ int cssid, ssid, schid, m;
+ SubchDev *sch;
+ int ret = -ENODEV;
+ int cc;
+
+ if (ioinst_disassemble_sch_ident(reg1, &m, &cssid, &ssid, &schid)) {
+ program_interrupt(&cpu->env, PGM_OPERAND, 2);
+ return;
+ }
+ trace_ioinst_sch_id("rsch", cssid, ssid, schid);
+ sch = css_find_subch(m, cssid, ssid, schid);
+ if (sch && css_subch_visible(sch)) {
+ ret = css_do_rsch(sch);
+ }
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EINVAL:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ cc = 1;
+ break;
+ }
+ setcc(cpu, cc);
+}
+
+#define RCHP_REG1_RES(_reg) (_reg & 0x00000000ff00ff00)
+#define RCHP_REG1_CSSID(_reg) ((_reg & 0x0000000000ff0000) >> 16)
+#define RCHP_REG1_CHPID(_reg) (_reg & 0x00000000000000ff)
+void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1)
+{
+ int cc;
+ uint8_t cssid;
+ uint8_t chpid;
+ int ret;
+ CPUS390XState *env = &cpu->env;
+
+ if (RCHP_REG1_RES(reg1)) {
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+
+ cssid = RCHP_REG1_CSSID(reg1);
+ chpid = RCHP_REG1_CHPID(reg1);
+
+ trace_ioinst_chp_id("rchp", cssid, chpid);
+
+ ret = css_do_rchp(cssid, chpid);
+
+ switch (ret) {
+ case -ENODEV:
+ cc = 3;
+ break;
+ case -EBUSY:
+ cc = 2;
+ break;
+ case 0:
+ cc = 0;
+ break;
+ default:
+ /* Invalid channel subsystem. */
+ program_interrupt(env, PGM_OPERAND, 2);
+ return;
+ }
+ setcc(cpu, cc);
+}
+
+#define SAL_REG1_INVALID(_reg) (_reg & 0x0000000080000000)
+void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1)
+{
+ /* We do not provide address limit checking, so let's suppress it. */
+ if (SAL_REG1_INVALID(reg1) || reg1 & 0x000000000000ffff) {
+ program_interrupt(&cpu->env, PGM_OPERAND, 2);
+ }
+}
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
new file mode 100644
index 0000000000..97afe02599
--- /dev/null
+++ b/target/s390x/kvm.c
@@ -0,0 +1,2653 @@
+/*
+ * QEMU S390x KVM implementation
+ *
+ * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
+ * Copyright IBM Corp. 2012
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contributions after 2012-10-29 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ *
+ * You should have received a copy of the GNU (Lesser) General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+
+#include <linux/kvm.h>
+#include <asm/ptrace.h>
+
+#include "qemu-common.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
+#include "hw/hw.h"
+#include "sysemu/device_tree.h"
+#include "qapi/qmp/qjson.h"
+#include "exec/gdbstub.h"
+#include "exec/address-spaces.h"
+#include "trace.h"
+#include "qapi-event.h"
+#include "hw/s390x/s390-pci-inst.h"
+#include "hw/s390x/s390-pci-bus.h"
+#include "hw/s390x/ipl.h"
+#include "hw/s390x/ebcdic.h"
+#include "exec/memattrs.h"
+#include "hw/s390x/s390-virtio-ccw.h"
+
+/* #define DEBUG_KVM */
+
+#ifdef DEBUG_KVM
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define kvm_vm_check_mem_attr(s, attr) \
+ kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
+
+#define IPA0_DIAG 0x8300
+#define IPA0_SIGP 0xae00
+#define IPA0_B2 0xb200
+#define IPA0_B9 0xb900
+#define IPA0_EB 0xeb00
+#define IPA0_E3 0xe300
+
+#define PRIV_B2_SCLP_CALL 0x20
+#define PRIV_B2_CSCH 0x30
+#define PRIV_B2_HSCH 0x31
+#define PRIV_B2_MSCH 0x32
+#define PRIV_B2_SSCH 0x33
+#define PRIV_B2_STSCH 0x34
+#define PRIV_B2_TSCH 0x35
+#define PRIV_B2_TPI 0x36
+#define PRIV_B2_SAL 0x37
+#define PRIV_B2_RSCH 0x38
+#define PRIV_B2_STCRW 0x39
+#define PRIV_B2_STCPS 0x3a
+#define PRIV_B2_RCHP 0x3b
+#define PRIV_B2_SCHM 0x3c
+#define PRIV_B2_CHSC 0x5f
+#define PRIV_B2_SIGA 0x74
+#define PRIV_B2_XSCH 0x76
+
+#define PRIV_EB_SQBS 0x8a
+#define PRIV_EB_PCISTB 0xd0
+#define PRIV_EB_SIC 0xd1
+
+#define PRIV_B9_EQBS 0x9c
+#define PRIV_B9_CLP 0xa0
+#define PRIV_B9_PCISTG 0xd0
+#define PRIV_B9_PCILG 0xd2
+#define PRIV_B9_RPCIT 0xd3
+
+#define PRIV_E3_MPCIFC 0xd0
+#define PRIV_E3_STPCIFC 0xd4
+
+#define DIAG_TIMEREVENT 0x288
+#define DIAG_IPL 0x308
+#define DIAG_KVM_HYPERCALL 0x500
+#define DIAG_KVM_BREAKPOINT 0x501
+
+#define ICPT_INSTRUCTION 0x04
+#define ICPT_PROGRAM 0x08
+#define ICPT_EXT_INT 0x14
+#define ICPT_WAITPSW 0x1c
+#define ICPT_SOFT_INTERCEPT 0x24
+#define ICPT_CPU_STOP 0x28
+#define ICPT_OPEREXC 0x2c
+#define ICPT_IO 0x40
+
+#define NR_LOCAL_IRQS 32
+/*
+ * Needs to be big enough to contain max_cpus emergency signals
+ * and in addition NR_LOCAL_IRQS interrupts
+ */
+#define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \
+ (max_cpus + NR_LOCAL_IRQS))
+
+static CPUWatchpoint hw_watchpoint;
+/*
+ * We don't use a list because this structure is also used to transmit the
+ * hardware breakpoints to the kernel.
+ */
+static struct kvm_hw_breakpoint *hw_breakpoints;
+static int nb_hw_breakpoints;
+
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_LAST_INFO
+};
+
+static QemuMutex qemu_sigp_mutex;
+
+static int cap_sync_regs;
+static int cap_async_pf;
+static int cap_mem_op;
+static int cap_s390_irq;
+static int cap_ri;
+
+static void *legacy_s390_alloc(size_t size, uint64_t *align);
+
+static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+ .addr = (uint64_t) memory_limit,
+ };
+
+ return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit)
+{
+ int rc;
+
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
+ .addr = (uint64_t) &new_limit,
+ };
+
+ if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_LIMIT_SIZE)) {
+ return 0;
+ }
+
+ rc = kvm_s390_query_mem_limit(s, hw_limit);
+ if (rc) {
+ return rc;
+ } else if (*hw_limit < new_limit) {
+ return -E2BIG;
+ }
+
+ return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+static bool kvm_s390_cmma_available(void)
+{
+ static bool initialized, value;
+
+ if (!initialized) {
+ initialized = true;
+ value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
+ kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
+ }
+ return value;
+}
+
+void kvm_s390_cmma_reset(void)
+{
+ int rc;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_CLR_CMMA,
+ };
+
+ if (!mem_path || !kvm_s390_cmma_available()) {
+ return;
+ }
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ trace_kvm_clear_cmma(rc);
+}
+
+static void kvm_s390_enable_cmma(void)
+{
+ int rc;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_MEM_CTRL,
+ .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
+ };
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ trace_kvm_enable_cmma(rc);
+}
+
+static void kvm_s390_set_attr(uint64_t attr)
+{
+ struct kvm_device_attr attribute = {
+ .group = KVM_S390_VM_CRYPTO,
+ .attr = attr,
+ };
+
+ int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
+
+ if (ret) {
+ error_report("Failed to set crypto device attribute %lu: %s",
+ attr, strerror(-ret));
+ }
+}
+
+static void kvm_s390_init_aes_kw(void)
+{
+ uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
+
+ if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
+ NULL)) {
+ attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
+ }
+
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+ kvm_s390_set_attr(attr);
+ }
+}
+
+static void kvm_s390_init_dea_kw(void)
+{
+ uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
+
+ if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
+ NULL)) {
+ attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
+ }
+
+ if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
+ kvm_s390_set_attr(attr);
+ }
+}
+
+void kvm_s390_crypto_reset(void)
+{
+ if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
+ kvm_s390_init_aes_kw();
+ kvm_s390_init_dea_kw();
+ }
+}
+
+int kvm_arch_init(MachineState *ms, KVMState *s)
+{
+ cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
+ cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
+ cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
+ cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
+
+ if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
+ || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
+ phys_mem_set_alloc(legacy_s390_alloc);
+ }
+
+ kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
+ kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
+ kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
+ if (ri_allowed()) {
+ if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
+ cap_ri = 1;
+ }
+ }
+
+ qemu_mutex_init(&qemu_sigp_mutex);
+
+ return 0;
+}
+
+unsigned long kvm_arch_vcpu_id(CPUState *cpu)
+{
+ return cpu->cpu_index;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+ cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE);
+ return 0;
+}
+
+void kvm_s390_reset_vcpu(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+
+ /* The initial reset call is needed here to reset in-kernel
+ * vcpu data that we can't access directly from QEMU
+ * (i.e. with older kernels which don't support sync_regs/ONE_REG).
+ * Before this ioctl cpu_synchronize_state() is called in common kvm
+ * code (kvm-all) */
+ if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
+ error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
+ }
+}
+
+static int can_sync_regs(CPUState *cs, int regs)
+{
+ return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ struct kvm_regs regs;
+ struct kvm_fpu fpu = {};
+ int r;
+ int i;
+
+ /* always save the PSW and the GPRS*/
+ cs->kvm_run->psw_addr = env->psw.addr;
+ cs->kvm_run->psw_mask = env->psw.mask;
+
+ if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
+ for (i = 0; i < 16; i++) {
+ cs->kvm_run->s.regs.gprs[i] = env->regs[i];
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
+ }
+ } else {
+ for (i = 0; i < 16; i++) {
+ regs.gprs[i] = env->regs[i];
+ }
+ r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_VRS)) {
+ for (i = 0; i < 32; i++) {
+ cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll;
+ cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll;
+ }
+ cs->kvm_run->s.regs.fpc = env->fpc;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
+ } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
+ for (i = 0; i < 16; i++) {
+ cs->kvm_run->s.regs.fprs[i] = get_freg(env, i)->ll;
+ }
+ cs->kvm_run->s.regs.fpc = env->fpc;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
+ } else {
+ /* Floating point */
+ for (i = 0; i < 16; i++) {
+ fpu.fprs[i] = get_freg(env, i)->ll;
+ }
+ fpu.fpc = env->fpc;
+
+ r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ /* Do we need to save more than that? */
+ if (level == KVM_PUT_RUNTIME_STATE) {
+ return 0;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
+ cs->kvm_run->s.regs.cputm = env->cputm;
+ cs->kvm_run->s.regs.ckc = env->ckc;
+ cs->kvm_run->s.regs.todpr = env->todpr;
+ cs->kvm_run->s.regs.gbea = env->gbea;
+ cs->kvm_run->s.regs.pp = env->pp;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
+ } else {
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
+ memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
+ }
+
+ /* pfault parameters */
+ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
+ cs->kvm_run->s.regs.pft = env->pfault_token;
+ cs->kvm_run->s.regs.pfs = env->pfault_select;
+ cs->kvm_run->s.regs.pfc = env->pfault_compare;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
+ } else if (cap_async_pf) {
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ /* access registers and control registers*/
+ if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
+ for (i = 0; i < 16; i++) {
+ cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
+ cs->kvm_run->s.regs.crs[i] = env->cregs[i];
+ }
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
+ } else {
+ for (i = 0; i < 16; i++) {
+ sregs.acrs[i] = env->aregs[i];
+ sregs.crs[i] = env->cregs[i];
+ }
+ r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ /* Finally the prefix */
+ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
+ cs->kvm_run->s.regs.prefix = env->psa;
+ cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
+ } else {
+ /* prefix is only supported via sync regs */
+ }
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ struct kvm_regs regs;
+ struct kvm_fpu fpu;
+ int i, r;
+
+ /* get the PSW */
+ env->psw.addr = cs->kvm_run->psw_addr;
+ env->psw.mask = cs->kvm_run->psw_mask;
+
+ /* the GPRS */
+ if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = cs->kvm_run->s.regs.gprs[i];
+ }
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
+ if (r < 0) {
+ return r;
+ }
+ for (i = 0; i < 16; i++) {
+ env->regs[i] = regs.gprs[i];
+ }
+ }
+
+ /* The ACRS and CRS */
+ if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
+ for (i = 0; i < 16; i++) {
+ env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
+ env->cregs[i] = cs->kvm_run->s.regs.crs[i];
+ }
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ if (r < 0) {
+ return r;
+ }
+ for (i = 0; i < 16; i++) {
+ env->aregs[i] = sregs.acrs[i];
+ env->cregs[i] = sregs.crs[i];
+ }
+ }
+
+ /* Floating point and vector registers */
+ if (can_sync_regs(cs, KVM_SYNC_VRS)) {
+ for (i = 0; i < 32; i++) {
+ env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0];
+ env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1];
+ }
+ env->fpc = cs->kvm_run->s.regs.fpc;
+ } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
+ for (i = 0; i < 16; i++) {
+ get_freg(env, i)->ll = cs->kvm_run->s.regs.fprs[i];
+ }
+ env->fpc = cs->kvm_run->s.regs.fpc;
+ } else {
+ r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
+ if (r < 0) {
+ return r;
+ }
+ for (i = 0; i < 16; i++) {
+ get_freg(env, i)->ll = fpu.fprs[i];
+ }
+ env->fpc = fpu.fpc;
+ }
+
+ /* The prefix */
+ if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
+ env->psa = cs->kvm_run->s.regs.prefix;
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
+ env->cputm = cs->kvm_run->s.regs.cputm;
+ env->ckc = cs->kvm_run->s.regs.ckc;
+ env->todpr = cs->kvm_run->s.regs.todpr;
+ env->gbea = cs->kvm_run->s.regs.gbea;
+ env->pp = cs->kvm_run->s.regs.pp;
+ } else {
+ /*
+ * These ONE_REGS are not protected by a capability. As they are only
+ * necessary for migration we just trace a possible error, but don't
+ * return with an error return code.
+ */
+ kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
+ kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
+ kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
+ kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
+ kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
+ }
+
+ if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
+ memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
+ }
+
+ /* pfault parameters */
+ if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
+ env->pfault_token = cs->kvm_run->s.regs.pft;
+ env->pfault_select = cs->kvm_run->s.regs.pfs;
+ env->pfault_compare = cs->kvm_run->s.regs.pfc;
+ } else if (cap_async_pf) {
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
+ if (r < 0) {
+ return r;
+ }
+ r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
+ if (r < 0) {
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ int r;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_TOD,
+ .attr = KVM_S390_VM_TOD_LOW,
+ .addr = (uint64_t)tod_low,
+ };
+
+ r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (r) {
+ return r;
+ }
+
+ attr.attr = KVM_S390_VM_TOD_HIGH;
+ attr.addr = (uint64_t)tod_high;
+ return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+}
+
+int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
+{
+ int r;
+
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_TOD,
+ .attr = KVM_S390_VM_TOD_LOW,
+ .addr = (uint64_t)tod_low,
+ };
+
+ r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ if (r) {
+ return r;
+ }
+
+ attr.attr = KVM_S390_VM_TOD_HIGH;
+ attr.addr = (uint64_t)tod_high;
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+/**
+ * kvm_s390_mem_op:
+ * @addr: the logical start address in guest memory
+ * @ar: the access register number
+ * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
+ * @len: length that should be transferred
+ * @is_write: true = write, false = read
+ * Returns: 0 on success, non-zero if an exception or error occurred
+ *
+ * Use KVM ioctl to read/write from/to guest memory. An access exception
+ * is injected into the vCPU in case of translation errors.
+ */
+int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
+ int len, bool is_write)
+{
+ struct kvm_s390_mem_op mem_op = {
+ .gaddr = addr,
+ .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
+ .size = len,
+ .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
+ : KVM_S390_MEMOP_LOGICAL_READ,
+ .buf = (uint64_t)hostbuf,
+ .ar = ar,
+ };
+ int ret;
+
+ if (!cap_mem_op) {
+ return -ENOSYS;
+ }
+ if (!hostbuf) {
+ mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
+ if (ret < 0) {
+ error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret));
+ }
+ return ret;
+}
+
+/*
+ * Legacy layout for s390:
+ * Older S390 KVM requires the topmost vma of the RAM to be
+ * smaller than an system defined value, which is at least 256GB.
+ * Larger systems have larger values. We put the guest between
+ * the end of data segment (system break) and this value. We
+ * use 32GB as a base to have enough room for the system break
+ * to grow. We also have to use MAP parameters that avoid
+ * read-only mapping of guest pages.
+ */
+static void *legacy_s390_alloc(size_t size, uint64_t *align)
+{
+ void *mem;
+
+ mem = mmap((void *) 0x800000000ULL, size,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ return mem == MAP_FAILED ? NULL : mem;
+}
+
+static uint8_t const *sw_bp_inst;
+static uint8_t sw_bp_ilen;
+
+static void determine_sw_breakpoint_instr(void)
+{
+ /* DIAG 501 is used for sw breakpoints with old kernels */
+ static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
+ /* Instruction 0x0000 is used for sw breakpoints with recent kernels */
+ static const uint8_t instr_0x0000[] = {0x00, 0x00};
+
+ if (sw_bp_inst) {
+ return;
+ }
+ if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
+ sw_bp_inst = diag_501;
+ sw_bp_ilen = sizeof(diag_501);
+ DPRINTF("KVM: will use 4-byte sw breakpoints.\n");
+ } else {
+ sw_bp_inst = instr_0x0000;
+ sw_bp_ilen = sizeof(instr_0x0000);
+ DPRINTF("KVM: will use 2-byte sw breakpoints.\n");
+ }
+}
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ determine_sw_breakpoint_instr();
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sw_bp_ilen, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ uint8_t t[MAX_ILEN];
+
+ if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
+ return -EINVAL;
+ } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
+ return -EINVAL;
+ } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
+ sw_bp_ilen, 1)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
+ int len, int type)
+{
+ int n;
+
+ for (n = 0; n < nb_hw_breakpoints; n++) {
+ if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
+ (hw_breakpoints[n].len == len || len == -1)) {
+ return &hw_breakpoints[n];
+ }
+ }
+
+ return NULL;
+}
+
+static int insert_hw_breakpoint(target_ulong addr, int len, int type)
+{
+ int size;
+
+ if (find_hw_breakpoint(addr, len, type)) {
+ return -EEXIST;
+ }
+
+ size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
+
+ if (!hw_breakpoints) {
+ nb_hw_breakpoints = 0;
+ hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
+ } else {
+ hw_breakpoints =
+ (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
+ }
+
+ if (!hw_breakpoints) {
+ nb_hw_breakpoints = 0;
+ return -ENOMEM;
+ }
+
+ hw_breakpoints[nb_hw_breakpoints].addr = addr;
+ hw_breakpoints[nb_hw_breakpoints].len = len;
+ hw_breakpoints[nb_hw_breakpoints].type = type;
+
+ nb_hw_breakpoints++;
+
+ return 0;
+}
+
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ type = KVM_HW_BP;
+ break;
+ case GDB_WATCHPOINT_WRITE:
+ if (len < 1) {
+ return -EINVAL;
+ }
+ type = KVM_HW_WP_WRITE;
+ break;
+ default:
+ return -ENOSYS;
+ }
+ return insert_hw_breakpoint(addr, len, type);
+}
+
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
+ target_ulong len, int type)
+{
+ int size;
+ struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
+
+ if (bp == NULL) {
+ return -ENOENT;
+ }
+
+ nb_hw_breakpoints--;
+ if (nb_hw_breakpoints > 0) {
+ /*
+ * In order to trim the array, move the last element to the position to
+ * be removed - if necessary.
+ */
+ if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
+ *bp = hw_breakpoints[nb_hw_breakpoints];
+ }
+ size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
+ hw_breakpoints =
+ (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
+ } else {
+ g_free(hw_breakpoints);
+ hw_breakpoints = NULL;
+ }
+
+ return 0;
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ nb_hw_breakpoints = 0;
+ g_free(hw_breakpoints);
+ hw_breakpoints = NULL;
+}
+
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
+{
+ int i;
+
+ if (nb_hw_breakpoints > 0) {
+ dbg->arch.nr_hw_bp = nb_hw_breakpoints;
+ dbg->arch.hw_bp = hw_breakpoints;
+
+ for (i = 0; i < nb_hw_breakpoints; ++i) {
+ hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
+ hw_breakpoints[i].addr);
+ }
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
+ } else {
+ dbg->arch.nr_hw_bp = 0;
+ dbg->arch.hw_bp = NULL;
+ }
+}
+
+void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
+{
+}
+
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
+{
+ return MEMTXATTRS_UNSPECIFIED;
+}
+
+int kvm_arch_process_async_events(CPUState *cs)
+{
+ return cs->halted;
+}
+
+static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
+ struct kvm_s390_interrupt *interrupt)
+{
+ int r = 0;
+
+ interrupt->type = irq->type;
+ switch (irq->type) {
+ case KVM_S390_INT_VIRTIO:
+ interrupt->parm = irq->u.ext.ext_params;
+ /* fall through */
+ case KVM_S390_INT_PFAULT_INIT:
+ case KVM_S390_INT_PFAULT_DONE:
+ interrupt->parm64 = irq->u.ext.ext_params2;
+ break;
+ case KVM_S390_PROGRAM_INT:
+ interrupt->parm = irq->u.pgm.code;
+ break;
+ case KVM_S390_SIGP_SET_PREFIX:
+ interrupt->parm = irq->u.prefix.address;
+ break;
+ case KVM_S390_INT_SERVICE:
+ interrupt->parm = irq->u.ext.ext_params;
+ break;
+ case KVM_S390_MCHK:
+ interrupt->parm = irq->u.mchk.cr14;
+ interrupt->parm64 = irq->u.mchk.mcic;
+ break;
+ case KVM_S390_INT_EXTERNAL_CALL:
+ interrupt->parm = irq->u.extcall.code;
+ break;
+ case KVM_S390_INT_EMERGENCY:
+ interrupt->parm = irq->u.emerg.code;
+ break;
+ case KVM_S390_SIGP_STOP:
+ case KVM_S390_RESTART:
+ break; /* These types have no parameters */
+ case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+ interrupt->parm = irq->u.io.subchannel_id << 16;
+ interrupt->parm |= irq->u.io.subchannel_nr;
+ interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
+ interrupt->parm64 |= irq->u.io.io_int_word;
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+ return r;
+}
+
+static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_interrupt kvmint = {};
+ int r;
+
+ r = s390_kvm_irq_to_interrupt(irq, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "%s called with bogus interrupt\n", __func__);
+ exit(1);
+ }
+
+ r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "KVM failed to inject interrupt\n");
+ exit(1);
+ }
+}
+
+void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
+{
+ CPUState *cs = CPU(cpu);
+ int r;
+
+ if (cap_s390_irq) {
+ r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
+ if (!r) {
+ return;
+ }
+ error_report("KVM failed to inject interrupt %llx", irq->type);
+ exit(1);
+ }
+
+ inject_vcpu_irq_legacy(cs, irq);
+}
+
+static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
+{
+ struct kvm_s390_interrupt kvmint = {};
+ int r;
+
+ r = s390_kvm_irq_to_interrupt(irq, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "%s called with bogus interrupt\n", __func__);
+ exit(1);
+ }
+
+ r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
+ if (r < 0) {
+ fprintf(stderr, "KVM failed to inject interrupt\n");
+ exit(1);
+ }
+}
+
+void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
+{
+ static bool use_flic = true;
+ int r;
+
+ if (use_flic) {
+ r = kvm_s390_inject_flic(irq);
+ if (r == -ENOSYS) {
+ use_flic = false;
+ }
+ if (!r) {
+ return;
+ }
+ }
+ __kvm_s390_floating_interrupt(irq);
+}
+
+void kvm_s390_service_interrupt(uint32_t parm)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_INT_SERVICE,
+ .u.ext.ext_params = parm,
+ };
+
+ kvm_s390_floating_interrupt(&irq);
+}
+
+static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = code,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = code,
+ .u.pgm.trans_exc_code = te_code,
+ .u.pgm.exc_access_id = te_code & 3,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+}
+
+static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
+ uint16_t ipbh0)
+{
+ CPUS390XState *env = &cpu->env;
+ uint64_t sccb;
+ uint32_t code;
+ int r = 0;
+
+ cpu_synchronize_state(CPU(cpu));
+ sccb = env->regs[ipbh0 & 0xf];
+ code = env->regs[(ipbh0 & 0xf0) >> 4];
+
+ r = sclp_service_call(env, sccb, code);
+ if (r < 0) {
+ enter_pgmcheck(cpu, -r);
+ } else {
+ setcc(cpu, r);
+ }
+
+ return 0;
+}
+
+static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+ CPUS390XState *env = &cpu->env;
+ int rc = 0;
+ uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ switch (ipa1) {
+ case PRIV_B2_XSCH:
+ ioinst_handle_xsch(cpu, env->regs[1]);
+ break;
+ case PRIV_B2_CSCH:
+ ioinst_handle_csch(cpu, env->regs[1]);
+ break;
+ case PRIV_B2_HSCH:
+ ioinst_handle_hsch(cpu, env->regs[1]);
+ break;
+ case PRIV_B2_MSCH:
+ ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
+ break;
+ case PRIV_B2_SSCH:
+ ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
+ break;
+ case PRIV_B2_STCRW:
+ ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
+ break;
+ case PRIV_B2_STSCH:
+ ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
+ break;
+ case PRIV_B2_TSCH:
+ /* We should only get tsch via KVM_EXIT_S390_TSCH. */
+ fprintf(stderr, "Spurious tsch intercept\n");
+ break;
+ case PRIV_B2_CHSC:
+ ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
+ break;
+ case PRIV_B2_TPI:
+ /* This should have been handled by kvm already. */
+ fprintf(stderr, "Spurious tpi intercept\n");
+ break;
+ case PRIV_B2_SCHM:
+ ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
+ run->s390_sieic.ipb);
+ break;
+ case PRIV_B2_RSCH:
+ ioinst_handle_rsch(cpu, env->regs[1]);
+ break;
+ case PRIV_B2_RCHP:
+ ioinst_handle_rchp(cpu, env->regs[1]);
+ break;
+ case PRIV_B2_STCPS:
+ /* We do not provide this instruction, it is suppressed. */
+ break;
+ case PRIV_B2_SAL:
+ ioinst_handle_sal(cpu, env->regs[1]);
+ break;
+ case PRIV_B2_SIGA:
+ /* Not provided, set CC = 3 for subchannel not operational */
+ setcc(cpu, 3);
+ break;
+ case PRIV_B2_SCLP_CALL:
+ rc = kvm_sclp_service_call(cpu, run, ipbh0);
+ break;
+ default:
+ rc = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
+ break;
+ }
+
+ return rc;
+}
+
+static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
+ uint8_t *ar)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
+ uint32_t base2 = run->s390_sieic.ipb >> 28;
+ uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
+ ((run->s390_sieic.ipb & 0xff00) << 4);
+
+ if (disp2 & 0x80000) {
+ disp2 += 0xfff00000;
+ }
+ if (ar) {
+ *ar = base2;
+ }
+
+ return (base2 ? env->regs[base2] : 0) +
+ (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
+}
+
+static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
+ uint8_t *ar)
+{
+ CPUS390XState *env = &cpu->env;
+ uint32_t base2 = run->s390_sieic.ipb >> 28;
+ uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
+ ((run->s390_sieic.ipb & 0xff00) << 4);
+
+ if (disp2 & 0x80000) {
+ disp2 += 0xfff00000;
+ }
+ if (ar) {
+ *ar = base2;
+ }
+
+ return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
+}
+
+static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ return clp_service_call(cpu, r2);
+}
+
+static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ return pcilg_service_call(cpu, r1, r2);
+}
+
+static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ return pcistg_service_call(cpu, r1, r2);
+}
+
+static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint64_t fiba;
+ uint8_t ar;
+
+ cpu_synchronize_state(CPU(cpu));
+ fiba = get_base_disp_rxy(cpu, run, &ar);
+
+ return stpcifc_service_call(cpu, r1, fiba, ar);
+}
+
+static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ /* NOOP */
+ return 0;
+}
+
+static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
+ uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
+
+ return rpcit_service_call(cpu, r1, r2);
+}
+
+static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint8_t r3 = run->s390_sieic.ipa & 0x000f;
+ uint64_t gaddr;
+ uint8_t ar;
+
+ cpu_synchronize_state(CPU(cpu));
+ gaddr = get_base_disp_rsy(cpu, run, &ar);
+
+ return pcistb_service_call(cpu, r1, r3, gaddr, ar);
+}
+
+static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
+{
+ uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ uint64_t fiba;
+ uint8_t ar;
+
+ cpu_synchronize_state(CPU(cpu));
+ fiba = get_base_disp_rxy(cpu, run, &ar);
+
+ return mpcifc_service_call(cpu, r1, fiba, ar);
+}
+
+static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+ int r = 0;
+
+ switch (ipa1) {
+ case PRIV_B9_CLP:
+ r = kvm_clp_service_call(cpu, run);
+ break;
+ case PRIV_B9_PCISTG:
+ r = kvm_pcistg_service_call(cpu, run);
+ break;
+ case PRIV_B9_PCILG:
+ r = kvm_pcilg_service_call(cpu, run);
+ break;
+ case PRIV_B9_RPCIT:
+ r = kvm_rpcit_service_call(cpu, run);
+ break;
+ case PRIV_B9_EQBS:
+ /* just inject exception */
+ r = -1;
+ break;
+ default:
+ r = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
+{
+ int r = 0;
+
+ switch (ipbl) {
+ case PRIV_EB_PCISTB:
+ r = kvm_pcistb_service_call(cpu, run);
+ break;
+ case PRIV_EB_SIC:
+ r = kvm_sic_service_call(cpu, run);
+ break;
+ case PRIV_EB_SQBS:
+ /* just inject exception */
+ r = -1;
+ break;
+ default:
+ r = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
+{
+ int r = 0;
+
+ switch (ipbl) {
+ case PRIV_E3_MPCIFC:
+ r = kvm_mpcifc_service_call(cpu, run);
+ break;
+ case PRIV_E3_STPCIFC:
+ r = kvm_stpcifc_service_call(cpu, run);
+ break;
+ default:
+ r = -1;
+ DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUS390XState *env = &cpu->env;
+ int ret;
+
+ cpu_synchronize_state(CPU(cpu));
+ ret = s390_virtio_hypercall(env);
+ if (ret == -EINVAL) {
+ enter_pgmcheck(cpu, PGM_SPECIFICATION);
+ return 0;
+ }
+
+ return ret;
+}
+
+static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
+{
+ uint64_t r1, r3;
+ int rc;
+
+ cpu_synchronize_state(CPU(cpu));
+ r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ r3 = run->s390_sieic.ipa & 0x000f;
+ rc = handle_diag_288(&cpu->env, r1, r3);
+ if (rc) {
+ enter_pgmcheck(cpu, PGM_SPECIFICATION);
+ }
+}
+
+static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
+{
+ uint64_t r1, r3;
+
+ cpu_synchronize_state(CPU(cpu));
+ r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
+ r3 = run->s390_sieic.ipa & 0x000f;
+ handle_diag_308(&cpu->env, r1, r3);
+}
+
+static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
+{
+ CPUS390XState *env = &cpu->env;
+ unsigned long pc;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ pc = env->psw.addr - sw_bp_ilen;
+ if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
+ env->psw.addr = pc;
+ return EXCP_DEBUG;
+ }
+
+ return -ENOENT;
+}
+
+#define DIAG_KVM_CODE_MASK 0x000000000000ffff
+
+static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
+{
+ int r = 0;
+ uint16_t func_code;
+
+ /*
+ * For any diagnose call we support, bits 48-63 of the resulting
+ * address specify the function code; the remainder is ignored.
+ */
+ func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
+ switch (func_code) {
+ case DIAG_TIMEREVENT:
+ kvm_handle_diag_288(cpu, run);
+ break;
+ case DIAG_IPL:
+ kvm_handle_diag_308(cpu, run);
+ break;
+ case DIAG_KVM_HYPERCALL:
+ r = handle_hypercall(cpu, run);
+ break;
+ case DIAG_KVM_BREAKPOINT:
+ r = handle_sw_breakpoint(cpu, run);
+ break;
+ default:
+ DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
+ enter_pgmcheck(cpu, PGM_SPECIFICATION);
+ break;
+ }
+
+ return r;
+}
+
+typedef struct SigpInfo {
+ uint64_t param;
+ int cc;
+ uint64_t *status_reg;
+} SigpInfo;
+
+static void set_sigp_status(SigpInfo *si, uint64_t status)
+{
+ *si->status_reg &= 0xffffffff00000000ULL;
+ *si->status_reg |= status;
+ si->cc = SIGP_CC_STATUS_STORED;
+}
+
+static void sigp_start(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ return;
+ }
+
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ };
+
+ if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ return;
+ }
+
+ /* disabled wait - sleeping in user space */
+ if (cs->halted) {
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+ } else {
+ /* execute the stop function */
+ cpu->env.sigp_order = SIGP_STOP;
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+#define ADTL_SAVE_AREA_SIZE 1024
+static int kvm_s390_store_adtl_status(S390CPU *cpu, hwaddr addr)
+{
+ void *mem;
+ hwaddr len = ADTL_SAVE_AREA_SIZE;
+
+ mem = cpu_physical_memory_map(addr, &len, 1);
+ if (!mem) {
+ return -EFAULT;
+ }
+ if (len != ADTL_SAVE_AREA_SIZE) {
+ cpu_physical_memory_unmap(mem, len, 1, 0);
+ return -EFAULT;
+ }
+
+ memcpy(mem, &cpu->env.vregs, 512);
+
+ cpu_physical_memory_unmap(mem, len, 1, len);
+
+ return 0;
+}
+
+#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
+#define SAVE_AREA_SIZE 512
+static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
+{
+ static const uint8_t ar_id = 1;
+ uint64_t ckc = cpu->env.ckc >> 8;
+ void *mem;
+ int i;
+ hwaddr len = SAVE_AREA_SIZE;
+
+ mem = cpu_physical_memory_map(addr, &len, 1);
+ if (!mem) {
+ return -EFAULT;
+ }
+ if (len != SAVE_AREA_SIZE) {
+ cpu_physical_memory_unmap(mem, len, 1, 0);
+ return -EFAULT;
+ }
+
+ if (store_arch) {
+ cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
+ }
+ for (i = 0; i < 16; ++i) {
+ *((uint64_t *)mem + i) = get_freg(&cpu->env, i)->ll;
+ }
+ memcpy(mem + 128, &cpu->env.regs, 128);
+ memcpy(mem + 256, &cpu->env.psw, 16);
+ memcpy(mem + 280, &cpu->env.psa, 4);
+ memcpy(mem + 284, &cpu->env.fpc, 4);
+ memcpy(mem + 292, &cpu->env.todpr, 4);
+ memcpy(mem + 296, &cpu->env.cputm, 8);
+ memcpy(mem + 304, &ckc, 8);
+ memcpy(mem + 320, &cpu->env.aregs, 64);
+ memcpy(mem + 384, &cpu->env.cregs, 128);
+
+ cpu_physical_memory_unmap(mem, len, 1, len);
+
+ return 0;
+}
+
+static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_SIGP_STOP,
+ };
+
+ /* disabled wait - sleeping in user space */
+ if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
+ s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
+ }
+
+ switch (s390_cpu_get_state(cpu)) {
+ case CPU_STATE_OPERATING:
+ cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+ /* store will be performed when handling the stop intercept */
+ break;
+ case CPU_STATE_STOPPED:
+ /* already stopped, just store the status */
+ cpu_synchronize_state(cs);
+ kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
+ break;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint32_t address = si->param & 0x7ffffe00u;
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ cpu_synchronize_state(cs);
+
+ if (kvm_s390_store_status(cpu, address, false)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+
+ if (!s390_has_feat(S390_FEAT_VECTOR)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
+ return;
+ }
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ /* parameter must be aligned to 1024-byte boundary */
+ if (si->param & 0x3ff) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ cpu_synchronize_state(cs);
+
+ if (kvm_s390_store_adtl_status(cpu, si->param)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_RESTART,
+ };
+
+ switch (s390_cpu_get_state(cpu)) {
+ case CPU_STATE_STOPPED:
+ /* the restart irq has to be delivered prior to any other pending irq */
+ cpu_synchronize_state(cs);
+ do_restart_interrupt(&cpu->env);
+ s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
+ break;
+ case CPU_STATE_OPERATING:
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+ break;
+ }
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+int kvm_s390_cpu_restart(S390CPU *cpu)
+{
+ SigpInfo si = {};
+
+ run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
+ DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
+ return 0;
+}
+
+static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ SigpInfo *si = arg.host_ptr;
+
+ cpu_synchronize_state(cs);
+ scc->initial_cpu_reset(cs);
+ cpu_synchronize_post_reset(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ SigpInfo *si = arg.host_ptr;
+
+ cpu_synchronize_state(cs);
+ scc->cpu_reset(cs);
+ cpu_synchronize_post_reset(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ SigpInfo *si = arg.host_ptr;
+ uint32_t addr = si->param & 0x7fffe000u;
+
+ cpu_synchronize_state(cs);
+
+ if (!address_space_access_valid(&address_space_memory, addr,
+ sizeof(struct LowCore), false)) {
+ set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
+ return;
+ }
+
+ /* cpu has to be stopped */
+ if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
+ set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
+ return;
+ }
+
+ cpu->env.psa = addr;
+ cpu_synchronize_post_init(cs);
+ si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
+ uint64_t param, uint64_t *status_reg)
+{
+ SigpInfo si = {
+ .param = param,
+ .status_reg = status_reg,
+ };
+
+ /* cpu available? */
+ if (dst_cpu == NULL) {
+ return SIGP_CC_NOT_OPERATIONAL;
+ }
+
+ /* only resets can break pending orders */
+ if (dst_cpu->env.sigp_order != 0 &&
+ order != SIGP_CPU_RESET &&
+ order != SIGP_INITIAL_CPU_RESET) {
+ return SIGP_CC_BUSY;
+ }
+
+ switch (order) {
+ case SIGP_START:
+ run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STOP:
+ run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_RESTART:
+ run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STOP_STORE_STATUS:
+ run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STORE_STATUS_ADDR:
+ run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_STORE_ADTL_STATUS:
+ run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_SET_PREFIX:
+ run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_INITIAL_CPU_RESET:
+ run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ case SIGP_CPU_RESET:
+ run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
+ break;
+ default:
+ DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
+ set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
+ }
+
+ return si.cc;
+}
+
+static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
+ uint64_t *status_reg)
+{
+ CPUState *cur_cs;
+ S390CPU *cur_cpu;
+
+ /* due to the BQL, we are the only active cpu */
+ CPU_FOREACH(cur_cs) {
+ cur_cpu = S390_CPU(cur_cs);
+ if (cur_cpu->env.sigp_order != 0) {
+ return SIGP_CC_BUSY;
+ }
+ cpu_synchronize_state(cur_cs);
+ /* all but the current one have to be stopped */
+ if (cur_cpu != cpu &&
+ s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
+ *status_reg &= 0xffffffff00000000ULL;
+ *status_reg |= SIGP_STAT_INCORRECT_STATE;
+ return SIGP_CC_STATUS_STORED;
+ }
+ }
+
+ switch (param & 0xff) {
+ case SIGP_MODE_ESA_S390:
+ /* not supported */
+ return SIGP_CC_NOT_OPERATIONAL;
+ case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
+ case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
+ CPU_FOREACH(cur_cs) {
+ cur_cpu = S390_CPU(cur_cs);
+ cur_cpu->env.pfault_token = -1UL;
+ }
+ break;
+ default:
+ *status_reg &= 0xffffffff00000000ULL;
+ *status_reg |= SIGP_STAT_INVALID_PARAMETER;
+ return SIGP_CC_STATUS_STORED;
+ }
+
+ return SIGP_CC_ORDER_CODE_ACCEPTED;
+}
+
+#define SIGP_ORDER_MASK 0x000000ff
+
+static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
+{
+ CPUS390XState *env = &cpu->env;
+ const uint8_t r1 = ipa1 >> 4;
+ const uint8_t r3 = ipa1 & 0x0f;
+ int ret;
+ uint8_t order;
+ uint64_t *status_reg;
+ uint64_t param;
+ S390CPU *dst_cpu = NULL;
+
+ cpu_synchronize_state(CPU(cpu));
+
+ /* get order code */
+ order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
+ & SIGP_ORDER_MASK;
+ status_reg = &env->regs[r1];
+ param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
+
+ if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
+ ret = SIGP_CC_BUSY;
+ goto out;
+ }
+
+ switch (order) {
+ case SIGP_SET_ARCH:
+ ret = sigp_set_architecture(cpu, param, status_reg);
+ break;
+ default:
+ /* all other sigp orders target a single vcpu */
+ dst_cpu = s390_cpu_addr2state(env->regs[r3]);
+ ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
+ }
+ qemu_mutex_unlock(&qemu_sigp_mutex);
+
+out:
+ trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
+ dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
+
+ if (ret >= 0) {
+ setcc(cpu, ret);
+ return 0;
+ }
+
+ return ret;
+}
+
+static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
+{
+ unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
+ uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
+ int r = -1;
+
+ DPRINTF("handle_instruction 0x%x 0x%x\n",
+ run->s390_sieic.ipa, run->s390_sieic.ipb);
+ switch (ipa0) {
+ case IPA0_B2:
+ r = handle_b2(cpu, run, ipa1);
+ break;
+ case IPA0_B9:
+ r = handle_b9(cpu, run, ipa1);
+ break;
+ case IPA0_EB:
+ r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
+ break;
+ case IPA0_E3:
+ r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
+ break;
+ case IPA0_DIAG:
+ r = handle_diag(cpu, run, run->s390_sieic.ipb);
+ break;
+ case IPA0_SIGP:
+ r = handle_sigp(cpu, run, ipa1);
+ break;
+ }
+
+ if (r < 0) {
+ r = 0;
+ enter_pgmcheck(cpu, 0x0001);
+ }
+
+ return r;
+}
+
+static bool is_special_wait_psw(CPUState *cs)
+{
+ /* signal quiesce */
+ return cs->kvm_run->psw_addr == 0xfffUL;
+}
+
+static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
+{
+ CPUState *cs = CPU(cpu);
+
+ error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
+ str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
+ ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
+ s390_cpu_halt(cpu);
+ qemu_system_guest_panicked();
+}
+
+static int handle_intercept(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+ int icpt_code = run->s390_sieic.icptcode;
+ int r = 0;
+
+ DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
+ (long)cs->kvm_run->psw_addr);
+ switch (icpt_code) {
+ case ICPT_INSTRUCTION:
+ r = handle_instruction(cpu, run);
+ break;
+ case ICPT_PROGRAM:
+ unmanageable_intercept(cpu, "program interrupt",
+ offsetof(LowCore, program_new_psw));
+ r = EXCP_HALTED;
+ break;
+ case ICPT_EXT_INT:
+ unmanageable_intercept(cpu, "external interrupt",
+ offsetof(LowCore, external_new_psw));
+ r = EXCP_HALTED;
+ break;
+ case ICPT_WAITPSW:
+ /* disabled wait, since enabled wait is handled in kernel */
+ cpu_synchronize_state(cs);
+ if (s390_cpu_halt(cpu) == 0) {
+ if (is_special_wait_psw(cs)) {
+ qemu_system_shutdown_request();
+ } else {
+ qemu_system_guest_panicked();
+ }
+ }
+ r = EXCP_HALTED;
+ break;
+ case ICPT_CPU_STOP:
+ if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
+ qemu_system_shutdown_request();
+ }
+ if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
+ kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
+ true);
+ }
+ cpu->env.sigp_order = 0;
+ r = EXCP_HALTED;
+ break;
+ case ICPT_OPEREXC:
+ /* currently only instr 0x0000 after enabled via capability */
+ r = handle_sw_breakpoint(cpu, run);
+ if (r == -ENOENT) {
+ enter_pgmcheck(cpu, PGM_OPERATION);
+ r = 0;
+ }
+ break;
+ case ICPT_SOFT_INTERCEPT:
+ fprintf(stderr, "KVM unimplemented icpt SOFT\n");
+ exit(1);
+ break;
+ case ICPT_IO:
+ fprintf(stderr, "KVM unimplemented icpt IO\n");
+ exit(1);
+ break;
+ default:
+ fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
+ exit(1);
+ break;
+ }
+
+ return r;
+}
+
+static int handle_tsch(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+ int ret;
+
+ cpu_synchronize_state(cs);
+
+ ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
+ if (ret < 0) {
+ /*
+ * Failure.
+ * If an I/O interrupt had been dequeued, we have to reinject it.
+ */
+ if (run->s390_tsch.dequeued) {
+ kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
+ run->s390_tsch.subchannel_nr,
+ run->s390_tsch.io_int_parm,
+ run->s390_tsch.io_int_word);
+ }
+ ret = 0;
+ }
+ return ret;
+}
+
+static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
+{
+ struct sysib_322 sysib;
+ int del;
+
+ if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
+ return;
+ }
+ /* Shift the stack of Extended Names to prepare for our own data */
+ memmove(&sysib.ext_names[1], &sysib.ext_names[0],
+ sizeof(sysib.ext_names[0]) * (sysib.count - 1));
+ /* First virt level, that doesn't provide Ext Names delimits stack. It is
+ * assumed it's not capable of managing Extended Names for lower levels.
+ */
+ for (del = 1; del < sysib.count; del++) {
+ if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
+ break;
+ }
+ }
+ if (del < sysib.count) {
+ memset(sysib.ext_names[del], 0,
+ sizeof(sysib.ext_names[0]) * (sysib.count - del));
+ }
+ /* Insert short machine name in EBCDIC, padded with blanks */
+ if (qemu_name) {
+ memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
+ ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
+ strlen(qemu_name)));
+ }
+ sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
+ memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0]));
+ /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
+ * considered by s390 as not capable of providing any Extended Name.
+ * Therefore if no name was specified on qemu invocation, we go with the
+ * same "KVMguest" default, which KVM has filled into short name field.
+ */
+ if (qemu_name) {
+ strncpy((char *)sysib.ext_names[0], qemu_name,
+ sizeof(sysib.ext_names[0]));
+ } else {
+ strcpy((char *)sysib.ext_names[0], "KVMguest");
+ }
+ /* Insert UUID */
+ memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
+
+ s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
+}
+
+static int handle_stsi(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+
+ switch (run->s390_stsi.fc) {
+ case 3:
+ if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
+ return 0;
+ }
+ /* Only sysib 3.2.2 needs post-handling for now. */
+ insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static int kvm_arch_handle_debug_exit(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_run *run = cs->kvm_run;
+
+ int ret = 0;
+ struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
+
+ switch (arch_info->type) {
+ case KVM_HW_WP_WRITE:
+ if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
+ cs->watchpoint_hit = &hw_watchpoint;
+ hw_watchpoint.vaddr = arch_info->addr;
+ hw_watchpoint.flags = BP_MEM_WRITE;
+ ret = EXCP_DEBUG;
+ }
+ break;
+ case KVM_HW_BP:
+ if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
+ ret = EXCP_DEBUG;
+ }
+ break;
+ case KVM_SINGLESTEP:
+ if (cs->singlestep_enabled) {
+ ret = EXCP_DEBUG;
+ }
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ int ret = 0;
+
+ qemu_mutex_lock_iothread();
+
+ switch (run->exit_reason) {
+ case KVM_EXIT_S390_SIEIC:
+ ret = handle_intercept(cpu);
+ break;
+ case KVM_EXIT_S390_RESET:
+ s390_reipl_request();
+ break;
+ case KVM_EXIT_S390_TSCH:
+ ret = handle_tsch(cpu);
+ break;
+ case KVM_EXIT_S390_STSI:
+ ret = handle_stsi(cpu);
+ break;
+ case KVM_EXIT_DEBUG:
+ ret = kvm_arch_handle_debug_exit(cpu);
+ break;
+ default:
+ fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
+ break;
+ }
+ qemu_mutex_unlock_iothread();
+
+ if (ret == 0) {
+ ret = EXCP_INTERRUPT;
+ }
+ return ret;
+}
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
+{
+ return true;
+}
+
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
+{
+ return 1;
+}
+
+int kvm_arch_on_sigbus(int code, void *addr)
+{
+ return 1;
+}
+
+void kvm_s390_io_interrupt(uint16_t subchannel_id,
+ uint16_t subchannel_nr, uint32_t io_int_parm,
+ uint32_t io_int_word)
+{
+ struct kvm_s390_irq irq = {
+ .u.io.subchannel_id = subchannel_id,
+ .u.io.subchannel_nr = subchannel_nr,
+ .u.io.io_int_parm = io_int_parm,
+ .u.io.io_int_word = io_int_word,
+ };
+
+ if (io_int_word & IO_INT_WORD_AI) {
+ irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
+ } else {
+ irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8,
+ (subchannel_id & 0x0006),
+ subchannel_nr);
+ }
+ kvm_s390_floating_interrupt(&irq);
+}
+
+static uint64_t build_channel_report_mcic(void)
+{
+ uint64_t mcic;
+
+ /* subclass: indicate channel report pending */
+ mcic = MCIC_SC_CP |
+ /* subclass modifiers: none */
+ /* storage errors: none */
+ /* validity bits: no damage */
+ MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
+ MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
+ MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
+ if (s390_has_feat(S390_FEAT_VECTOR)) {
+ mcic |= MCIC_VB_VR;
+ }
+ return mcic;
+}
+
+void kvm_s390_crw_mchk(void)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_MCHK,
+ .u.mchk.cr14 = 1 << 28,
+ .u.mchk.mcic = build_channel_report_mcic(),
+ };
+ kvm_s390_floating_interrupt(&irq);
+}
+
+void kvm_s390_enable_css_support(S390CPU *cpu)
+{
+ int r;
+
+ /* Activate host kernel channel subsystem support. */
+ r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
+ assert(r == 0);
+}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+ /*
+ * Note that while irqchip capabilities generally imply that cpustates
+ * are handled in-kernel, it is not true for s390 (yet); therefore, we
+ * have to override the common code kvm_halt_in_kernel_allowed setting.
+ */
+ if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
+ kvm_gsi_routing_allowed = true;
+ kvm_halt_in_kernel_allowed = false;
+ }
+}
+
+int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
+ int vq, bool assign)
+{
+ struct kvm_ioeventfd kick = {
+ .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
+ KVM_IOEVENTFD_FLAG_DATAMATCH,
+ .fd = event_notifier_get_fd(notifier),
+ .datamatch = vq,
+ .addr = sch,
+ .len = 8,
+ };
+ if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
+ return -ENOSYS;
+ }
+ if (!assign) {
+ kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+ }
+ return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+}
+
+int kvm_s390_get_memslot_count(KVMState *s)
+{
+ return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
+}
+
+int kvm_s390_get_ri(void)
+{
+ return cap_ri;
+}
+
+int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
+{
+ struct kvm_mp_state mp_state = {};
+ int ret;
+
+ /* the kvm part might not have been initialized yet */
+ if (CPU(cpu)->kvm_state == NULL) {
+ return 0;
+ }
+
+ switch (cpu_state) {
+ case CPU_STATE_STOPPED:
+ mp_state.mp_state = KVM_MP_STATE_STOPPED;
+ break;
+ case CPU_STATE_CHECK_STOP:
+ mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
+ break;
+ case CPU_STATE_OPERATING:
+ mp_state.mp_state = KVM_MP_STATE_OPERATING;
+ break;
+ case CPU_STATE_LOAD:
+ mp_state.mp_state = KVM_MP_STATE_LOAD;
+ break;
+ default:
+ error_report("Requested CPU state is not a valid S390 CPU state: %u",
+ cpu_state);
+ exit(1);
+ }
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
+ if (ret) {
+ trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
+ strerror(-ret));
+ }
+
+ return ret;
+}
+
+void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
+{
+ struct kvm_s390_irq_state irq_state;
+ CPUState *cs = CPU(cpu);
+ int32_t bytes;
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
+ return;
+ }
+
+ irq_state.buf = (uint64_t) cpu->irqstate;
+ irq_state.len = VCPU_IRQ_BUF_SIZE;
+
+ bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
+ if (bytes < 0) {
+ cpu->irqstate_saved_size = 0;
+ error_report("Migration of interrupt state failed");
+ return;
+ }
+
+ cpu->irqstate_saved_size = bytes;
+}
+
+int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_s390_irq_state irq_state;
+ int r;
+
+ if (cpu->irqstate_saved_size == 0) {
+ return 0;
+ }
+
+ if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
+ return -ENOSYS;
+ }
+
+ irq_state.buf = (uint64_t) cpu->irqstate;
+ irq_state.len = cpu->irqstate_saved_size;
+
+ r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
+ if (r) {
+ error_report("Setting interrupt state failed %d", r);
+ }
+ return r;
+}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, PCIDevice *dev)
+{
+ S390PCIBusDevice *pbdev;
+ uint32_t idx = data >> ZPCI_MSI_VEC_BITS;
+ uint32_t vec = data & ZPCI_MSI_VEC_MASK;
+
+ pbdev = s390_pci_find_dev_by_idx(idx);
+ if (!pbdev) {
+ DPRINTF("add_msi_route no dev\n");
+ return -ENODEV;
+ }
+
+ pbdev->routes.adapter.ind_offset = vec;
+
+ route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
+ route->flags = 0;
+ route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
+ route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
+ route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
+ route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
+ route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
+ return 0;
+}
+
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ abort();
+}
+
+static inline int test_bit_inv(long nr, const unsigned long *addr)
+{
+ return test_bit(BE_BIT_NR(nr), addr);
+}
+
+static inline void set_bit_inv(long nr, unsigned long *addr)
+{
+ set_bit(BE_BIT_NR(nr), addr);
+}
+
+static int query_cpu_subfunc(S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_subfunc prop;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
+ .addr = (uint64_t) &prop,
+ };
+ int rc;
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (rc) {
+ return rc;
+ }
+
+ /*
+ * We're going to add all subfunctions now, if the corresponding feature
+ * is available that unlocks the query functions.
+ */
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
+ if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
+ }
+ if (test_bit(S390_FEAT_MSA, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
+ s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
+ }
+ return 0;
+}
+
+static int configure_cpu_subfunc(const S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_subfunc prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
+ .addr = (uint64_t) &prop,
+ };
+
+ if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
+ /* hardware support might be missing, IBC will handle most of this */
+ return 0;
+ }
+
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
+ if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
+ prop.ptff[0] |= 0x80; /* query is always available */
+ }
+ if (test_bit(S390_FEAT_MSA, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
+ prop.kmac[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
+ prop.kmc[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
+ prop.km[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
+ prop.kimd[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
+ prop.klmd[0] |= 0x80; /* query is always available */
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
+ prop.pckmo[0] |= 0x80; /* query is always available */
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
+ prop.kmctr[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
+ prop.kmf[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
+ prop.kmo[0] |= 0x80; /* query is always available */
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
+ prop.pcc[0] |= 0x80; /* query is always available */
+ }
+ if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
+ s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
+ prop.ppno[0] |= 0x80; /* query is always available */
+ }
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+static int kvm_to_feat[][2] = {
+ { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
+ { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
+ { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
+ { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
+ { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
+ { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
+ { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
+ { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
+ { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
+ { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
+ { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
+ { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
+ { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
+};
+
+static int query_cpu_feat(S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_feat prop;
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_MACHINE_FEAT,
+ .addr = (uint64_t) &prop,
+ };
+ int rc;
+ int i;
+
+ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (rc) {
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
+ if (test_bit_inv(kvm_to_feat[i][0], (unsigned long *)prop.feat)) {
+ set_bit(kvm_to_feat[i][1], features);
+ }
+ }
+ return 0;
+}
+
+static int configure_cpu_feat(const S390FeatBitmap features)
+{
+ struct kvm_s390_vm_cpu_feat prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
+ .addr = (uint64_t) &prop,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
+ if (test_bit(kvm_to_feat[i][1], features)) {
+ set_bit_inv(kvm_to_feat[i][0], (unsigned long *)prop.feat);
+ }
+ }
+ return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+}
+
+bool kvm_s390_cpu_models_supported(void)
+{
+ if (!cpu_model_allowed()) {
+ /* compatibility machines interfere with the cpu model */
+ return false;
+ }
+ return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_MACHINE) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_MACHINE_FEAT) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
+ kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
+ KVM_S390_VM_CPU_MACHINE_SUBFUNC);
+}
+
+void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
+{
+ struct kvm_s390_vm_cpu_machine prop = {};
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_MACHINE,
+ .addr = (uint64_t) &prop,
+ };
+ uint16_t unblocked_ibc = 0, cpu_type = 0;
+ int rc;
+
+ memset(model, 0, sizeof(*model));
+
+ if (!kvm_s390_cpu_models_supported()) {
+ error_setg(errp, "KVM doesn't support CPU models");
+ return;
+ }
+
+ /* query the basic cpu model properties */
+ rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
+ if (rc) {
+ error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
+ return;
+ }
+
+ cpu_type = cpuid_type(prop.cpuid);
+ if (has_ibc(prop.ibc)) {
+ model->lowest_ibc = lowest_ibc(prop.ibc);
+ unblocked_ibc = unblocked_ibc(prop.ibc);
+ }
+ model->cpu_id = cpuid_id(prop.cpuid);
+ model->cpu_ver = 0xff;
+
+ /* get supported cpu features indicated via STFL(E) */
+ s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
+ (uint8_t *) prop.fac_mask);
+ /* dat-enhancement facility 2 has no bit but was introduced with stfle */
+ if (test_bit(S390_FEAT_STFLE, model->features)) {
+ set_bit(S390_FEAT_DAT_ENH_2, model->features);
+ }
+ /* get supported cpu features indicated e.g. via SCLP */
+ rc = query_cpu_feat(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error querying CPU features: %d", rc);
+ return;
+ }
+ /* get supported cpu subfunctions indicated via query / test bit */
+ rc = query_cpu_subfunc(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
+ return;
+ }
+
+ /* with cpu model support, CMM is only indicated if really available */
+ if (kvm_s390_cmma_available()) {
+ set_bit(S390_FEAT_CMM, model->features);
+ }
+
+ if (s390_known_cpu_type(cpu_type)) {
+ /* we want the exact model, even if some features are missing */
+ model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
+ ibc_ec_ga(unblocked_ibc), NULL);
+ } else {
+ /* model unknown, e.g. too new - search using features */
+ model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
+ ibc_ec_ga(unblocked_ibc),
+ model->features);
+ }
+ if (!model->def) {
+ error_setg(errp, "KVM: host CPU model could not be identified");
+ return;
+ }
+ /* strip of features that are not part of the maximum model */
+ bitmap_and(model->features, model->features, model->def->full_feat,
+ S390_FEAT_MAX);
+}
+
+void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
+{
+ struct kvm_s390_vm_cpu_processor prop = {
+ .fac_list = { 0 },
+ };
+ struct kvm_device_attr attr = {
+ .group = KVM_S390_VM_CPU_MODEL,
+ .attr = KVM_S390_VM_CPU_PROCESSOR,
+ .addr = (uint64_t) &prop,
+ };
+ int rc;
+
+ if (!model) {
+ /* compatibility handling if cpu models are disabled */
+ if (kvm_s390_cmma_available() && !mem_path) {
+ kvm_s390_enable_cmma();
+ }
+ return;
+ }
+ if (!kvm_s390_cpu_models_supported()) {
+ error_setg(errp, "KVM doesn't support CPU models");
+ return;
+ }
+ prop.cpuid = s390_cpuid_from_cpu_model(model);
+ prop.ibc = s390_ibc_from_cpu_model(model);
+ /* configure cpu features indicated via STFL(e) */
+ s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
+ (uint8_t *) prop.fac_list);
+ rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
+ if (rc) {
+ error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
+ return;
+ }
+ /* configure cpu features indicated e.g. via SCLP */
+ rc = configure_cpu_feat(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
+ return;
+ }
+ /* configure cpu subfunctions indicated via query / test bit */
+ rc = configure_cpu_subfunc(model->features);
+ if (rc) {
+ error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
+ return;
+ }
+ /* enable CMM via CMMA - disable on hugetlbfs */
+ if (test_bit(S390_FEAT_CMM, model->features)) {
+ if (mem_path) {
+ error_report("Warning: CMM will not be enabled because it is not "
+ "compatible to hugetlbfs.");
+ } else {
+ kvm_s390_enable_cmma();
+ }
+ }
+}
diff --git a/target/s390x/machine.c b/target/s390x/machine.c
new file mode 100644
index 0000000000..edc3a4717b
--- /dev/null
+++ b/target/s390x/machine.c
@@ -0,0 +1,193 @@
+/*
+ * S390x machine definitions and functions
+ *
+ * Copyright IBM Corp. 2014
+ *
+ * Authors:
+ * Thomas Huth <thuth@linux.vnet.ibm.com>
+ * Christian Borntraeger <borntraeger@de.ibm.com>
+ * Jason J. Herne <jjherne@us.ibm.com>
+ *
+ * This work is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+
+static int cpu_post_load(void *opaque, int version_id)
+{
+ S390CPU *cpu = opaque;
+
+ /*
+ * As the cpu state is pushed to kvm via kvm_set_mp_state rather
+ * than via cpu_synchronize_state, we need update kvm here.
+ */
+ if (kvm_enabled()) {
+ kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
+ return kvm_s390_vcpu_interrupt_post_load(cpu);
+ }
+
+ return 0;
+}
+static void cpu_pre_save(void *opaque)
+{
+ S390CPU *cpu = opaque;
+
+ if (kvm_enabled()) {
+ kvm_s390_vcpu_interrupt_pre_save(cpu);
+ }
+}
+
+static inline bool fpu_needed(void *opaque)
+{
+ /* This looks odd, but we might want to NOT transfer fprs in the future */
+ return true;
+}
+
+static const VMStateDescription vmstate_fpu = {
+ .name = "cpu/fpu",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = fpu_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.vregs[0][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[1][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[2][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[3][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[4][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[5][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[6][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[7][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[8][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[9][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[10][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[11][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[12][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[13][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[14][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[15][0].ll, S390CPU),
+ VMSTATE_UINT32(env.fpc, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vregs_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_VECTOR);
+}
+
+static const VMStateDescription vmstate_vregs = {
+ .name = "cpu/vregs",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vregs_needed,
+ .fields = (VMStateField[]) {
+ /* vregs[0][0] -> vregs[15][0] and fregs are overlays */
+ VMSTATE_UINT64(env.vregs[16][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[17][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[18][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[19][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[20][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[21][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[22][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[23][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[24][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[25][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[26][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[27][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[28][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[29][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[30][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[31][0].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[0][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[1][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[2][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[3][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[4][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[5][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[6][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[7][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[8][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[9][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[10][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[11][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[12][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[13][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[14][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[15][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[16][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[17][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[18][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[19][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[20][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[21][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[22][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[23][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[24][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[25][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[26][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[27][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[28][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[29][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[30][1].ll, S390CPU),
+ VMSTATE_UINT64(env.vregs[31][1].ll, S390CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool riccb_needed(void *opaque)
+{
+ return s390_has_feat(S390_FEAT_RUNTIME_INSTRUMENTATION);
+}
+
+const VMStateDescription vmstate_riccb = {
+ .name = "cpu/riccb",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = riccb_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(env.riccb, S390CPU, 64),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+const VMStateDescription vmstate_s390_cpu = {
+ .name = "cpu",
+ .post_load = cpu_post_load,
+ .pre_save = cpu_pre_save,
+ .version_id = 4,
+ .minimum_version_id = 3,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64_ARRAY(env.regs, S390CPU, 16),
+ VMSTATE_UINT64(env.psw.mask, S390CPU),
+ VMSTATE_UINT64(env.psw.addr, S390CPU),
+ VMSTATE_UINT64(env.psa, S390CPU),
+ VMSTATE_UINT32(env.todpr, S390CPU),
+ VMSTATE_UINT64(env.pfault_token, S390CPU),
+ VMSTATE_UINT64(env.pfault_compare, S390CPU),
+ VMSTATE_UINT64(env.pfault_select, S390CPU),
+ VMSTATE_UINT64(env.cputm, S390CPU),
+ VMSTATE_UINT64(env.ckc, S390CPU),
+ VMSTATE_UINT64(env.gbea, S390CPU),
+ VMSTATE_UINT64(env.pp, S390CPU),
+ VMSTATE_UINT32_ARRAY(env.aregs, S390CPU, 16),
+ VMSTATE_UINT64_ARRAY(env.cregs, S390CPU, 16),
+ VMSTATE_UINT8(env.cpu_state, S390CPU),
+ VMSTATE_UINT8(env.sigp_order, S390CPU),
+ VMSTATE_UINT32_V(irqstate_saved_size, S390CPU, 4),
+ VMSTATE_VBUFFER_UINT32(irqstate, S390CPU, 4, NULL, 0,
+ irqstate_saved_size),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_fpu,
+ &vmstate_vregs,
+ &vmstate_riccb,
+ NULL
+ },
+};
diff --git a/target/s390x/mem_helper.c b/target/s390x/mem_helper.c
new file mode 100644
index 0000000000..99bc5e2834
--- /dev/null
+++ b/target/s390x/mem_helper.c
@@ -0,0 +1,1202 @@
+/*
+ * S/390 memory access helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/s390x/storage-keys.h"
+#endif
+
+/*****************************************************************************/
+/* Softmmu support */
+#if !defined(CONFIG_USER_ONLY)
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = s390_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret != 0)) {
+ if (likely(retaddr)) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+
+#endif
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* Reduce the length so that addr + len doesn't cross a page boundary. */
+static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
+{
+#ifndef CONFIG_USER_ONLY
+ if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
+ return -addr & ~TARGET_PAGE_MASK;
+ }
+#endif
+ return len;
+}
+
+static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
+ uint32_t l)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+
+ while (l > 0) {
+ void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
+ if (p) {
+ /* Access to the whole page in write mode granted. */
+ int l_adj = adj_len_to_page(l, dest);
+ memset(p, byte, l_adj);
+ dest += l_adj;
+ l -= l_adj;
+ } else {
+ /* We failed to get access to the whole page. The next write
+ access will likely fill the QEMU TLB for the next iteration. */
+ cpu_stb_data(env, dest, byte);
+ dest++;
+ l--;
+ }
+ }
+}
+
+static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
+ uint32_t l)
+{
+ int mmu_idx = cpu_mmu_index(env, false);
+
+ while (l > 0) {
+ void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
+ void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
+ if (src_p && dest_p) {
+ /* Access to both whole pages granted. */
+ int l_adj = adj_len_to_page(l, src);
+ l_adj = adj_len_to_page(l_adj, dest);
+ memmove(dest_p, src_p, l_adj);
+ src += l_adj;
+ dest += l_adj;
+ l -= l_adj;
+ } else {
+ /* We failed to get access to one or both whole pages. The next
+ read or write access will likely fill the QEMU TLB for the
+ next iteration. */
+ cpu_stb_data(env, dest, cpu_ldub_data(env, src));
+ src++;
+ dest++;
+ l--;
+ }
+ }
+}
+
+/* and on array */
+uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+ for (i = 0; i <= l; i++) {
+ x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
+ if (x) {
+ cc = 1;
+ }
+ cpu_stb_data(env, dest + i, x);
+ }
+ return cc;
+}
+
+/* xor on array */
+uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+
+ /* xor with itself is the same as memset(0) */
+ if (src == dest) {
+ fast_memset(env, dest, 0, l + 1);
+ return 0;
+ }
+
+ for (i = 0; i <= l; i++) {
+ x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
+ if (x) {
+ cc = 1;
+ }
+ cpu_stb_data(env, dest + i, x);
+ }
+ return cc;
+}
+
+/* or on array */
+uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
+ uint64_t src)
+{
+ int i;
+ unsigned char x;
+ uint32_t cc = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+ for (i = 0; i <= l; i++) {
+ x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
+ if (x) {
+ cc = 1;
+ }
+ cpu_stb_data(env, dest + i, x);
+ }
+ return cc;
+}
+
+/* memmove */
+void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
+{
+ int i = 0;
+
+ HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
+ __func__, l, dest, src);
+
+ /* mvc with source pointing to the byte after the destination is the
+ same as memset with the first source byte */
+ if (dest == (src + 1)) {
+ fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
+ return;
+ }
+
+ /* mvc and memmove do not behave the same when areas overlap! */
+ if ((dest < src) || (src + l < dest)) {
+ fast_memmove(env, dest, src, l + 1);
+ return;
+ }
+
+ /* slow version with byte accesses which always work */
+ for (i = 0; i <= l; i++) {
+ cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
+ }
+}
+
+/* compare unsigned byte arrays */
+uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
+{
+ int i;
+ unsigned char x, y;
+ uint32_t cc;
+
+ HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
+ __func__, l, s1, s2);
+ for (i = 0; i <= l; i++) {
+ x = cpu_ldub_data(env, s1 + i);
+ y = cpu_ldub_data(env, s2 + i);
+ HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
+ if (x < y) {
+ cc = 1;
+ goto done;
+ } else if (x > y) {
+ cc = 2;
+ goto done;
+ }
+ }
+ cc = 0;
+ done:
+ HELPER_LOG("\n");
+ return cc;
+}
+
+/* compare logical under mask */
+uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
+ uint64_t addr)
+{
+ uint8_t r, d;
+ uint32_t cc;
+
+ HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
+ mask, addr);
+ cc = 0;
+ while (mask) {
+ if (mask & 8) {
+ d = cpu_ldub_data(env, addr);
+ r = (r1 & 0xff000000UL) >> 24;
+ HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
+ addr);
+ if (r < d) {
+ cc = 1;
+ break;
+ } else if (r > d) {
+ cc = 2;
+ break;
+ }
+ addr++;
+ }
+ mask = (mask << 1) & 0xf;
+ r1 <<= 8;
+ }
+ HELPER_LOG("\n");
+ return cc;
+}
+
+static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
+{
+ /* 31-Bit mode */
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ a &= 0x7fffffff;
+ }
+ return a;
+}
+
+static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
+{
+ uint64_t r = d2;
+ if (x2) {
+ r += env->regs[x2];
+ }
+ if (b2) {
+ r += env->regs[b2];
+ }
+ return fix_address(env, r);
+}
+
+static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
+{
+ return fix_address(env, env->regs[reg]);
+}
+
+/* search string (c is byte to search, r2 is string, r1 end of string) */
+uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
+ uint64_t str)
+{
+ uint32_t len;
+ uint8_t v, c = r0;
+
+ str = fix_address(env, str);
+ end = fix_address(env, end);
+
+ /* Assume for now that R2 is unmodified. */
+ env->retxl = str;
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ for (len = 0; len < 0x2000; ++len) {
+ if (str + len == end) {
+ /* Character not found. R1 & R2 are unmodified. */
+ env->cc_op = 2;
+ return end;
+ }
+ v = cpu_ldub_data(env, str + len);
+ if (v == c) {
+ /* Character found. Set R1 to the location; R2 is unmodified. */
+ env->cc_op = 1;
+ return str + len;
+ }
+ }
+
+ /* CPU-determined bytes processed. Advance R2 to next byte to process. */
+ env->retxl = str + len;
+ env->cc_op = 3;
+ return end;
+}
+
+/* unsigned string compare (c is string terminator) */
+uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
+{
+ uint32_t len;
+
+ c = c & 0xff;
+ s1 = fix_address(env, s1);
+ s2 = fix_address(env, s2);
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ for (len = 0; len < 0x2000; ++len) {
+ uint8_t v1 = cpu_ldub_data(env, s1 + len);
+ uint8_t v2 = cpu_ldub_data(env, s2 + len);
+ if (v1 == v2) {
+ if (v1 == c) {
+ /* Equal. CC=0, and don't advance the registers. */
+ env->cc_op = 0;
+ env->retxl = s2;
+ return s1;
+ }
+ } else {
+ /* Unequal. CC={1,2}, and advance the registers. Note that
+ the terminator need not be zero, but the string that contains
+ the terminator is by definition "low". */
+ env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
+ env->retxl = s2 + len;
+ return s1 + len;
+ }
+ }
+
+ /* CPU-determined bytes equal; advance the registers. */
+ env->cc_op = 3;
+ env->retxl = s2 + len;
+ return s1 + len;
+}
+
+/* move page */
+void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
+{
+ /* XXX missing r0 handling */
+ env->cc_op = 0;
+ fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
+}
+
+/* string copy (c is string terminator) */
+uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
+{
+ uint32_t len;
+
+ c = c & 0xff;
+ d = fix_address(env, d);
+ s = fix_address(env, s);
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ for (len = 0; len < 0x2000; ++len) {
+ uint8_t v = cpu_ldub_data(env, s + len);
+ cpu_stb_data(env, d + len, v);
+ if (v == c) {
+ /* Complete. Set CC=1 and advance R1. */
+ env->cc_op = 1;
+ env->retxl = s;
+ return d + len;
+ }
+ }
+
+ /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
+ env->cc_op = 3;
+ env->retxl = s + len;
+ return d + len;
+}
+
+static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
+ uint32_t mask)
+{
+ int pos = 24; /* top of the lower half of r1 */
+ uint64_t rmask = 0xff000000ULL;
+ uint8_t val = 0;
+ int ccd = 0;
+ uint32_t cc = 0;
+
+ while (mask) {
+ if (mask & 8) {
+ env->regs[r1] &= ~rmask;
+ val = cpu_ldub_data(env, address);
+ if ((val & 0x80) && !ccd) {
+ cc = 1;
+ }
+ ccd = 1;
+ if (val && cc == 0) {
+ cc = 2;
+ }
+ env->regs[r1] |= (uint64_t)val << pos;
+ address++;
+ }
+ mask = (mask << 1) & 0xf;
+ pos -= 8;
+ rmask >>= 8;
+ }
+
+ return cc;
+}
+
+/* execute instruction
+ this instruction executes an insn modified with the contents of r1
+ it does not change the executed instruction in memory
+ it does not change the program counter
+ in other words: tricky...
+ currently implemented by interpreting the cases it is most commonly used in
+*/
+uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
+ uint64_t addr, uint64_t ret)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ uint16_t insn = cpu_lduw_code(env, addr);
+
+ HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
+ insn);
+ if ((insn & 0xf0ff) == 0xd000) {
+ uint32_t l, insn2, b1, b2, d1, d2;
+
+ l = v1 & 0xff;
+ insn2 = cpu_ldl_code(env, addr + 2);
+ b1 = (insn2 >> 28) & 0xf;
+ b2 = (insn2 >> 12) & 0xf;
+ d1 = (insn2 >> 16) & 0xfff;
+ d2 = insn2 & 0xfff;
+ switch (insn & 0xf00) {
+ case 0x200:
+ helper_mvc(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ case 0x400:
+ cc = helper_nc(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ case 0x500:
+ cc = helper_clc(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ case 0x600:
+ cc = helper_oc(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ case 0x700:
+ cc = helper_xc(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ case 0xc00:
+ helper_tr(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ case 0xd00:
+ cc = helper_trt(env, l, get_address(env, 0, b1, d1),
+ get_address(env, 0, b2, d2));
+ break;
+ default:
+ goto abort;
+ }
+ } else if ((insn & 0xff00) == 0x0a00) {
+ /* supervisor call */
+ HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
+ env->psw.addr = ret - 4;
+ env->int_svc_code = (insn | v1) & 0xff;
+ env->int_svc_ilen = 4;
+ helper_exception(env, EXCP_SVC);
+ } else if ((insn & 0xff00) == 0xbf00) {
+ uint32_t insn2, r1, r3, b2, d2;
+
+ insn2 = cpu_ldl_code(env, addr + 2);
+ r1 = (insn2 >> 20) & 0xf;
+ r3 = (insn2 >> 16) & 0xf;
+ b2 = (insn2 >> 12) & 0xf;
+ d2 = insn2 & 0xfff;
+ cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
+ } else {
+ abort:
+ cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
+ insn);
+ }
+ return cc;
+}
+
+/* load access registers r1 to r3 from memory at a2 */
+void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ env->aregs[i] = cpu_ldl_data(env, a2);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* store access registers r1 to r3 in memory at a2 */
+void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ cpu_stl_data(env, a2, env->aregs[i]);
+ a2 += 4;
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+/* move long */
+uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
+ uint64_t dest = get_address_31fix(env, r1);
+ uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
+ uint64_t src = get_address_31fix(env, r2);
+ uint8_t pad = env->regs[r2 + 1] >> 24;
+ uint8_t v;
+ uint32_t cc;
+
+ if (destlen == srclen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+ v = cpu_ldub_data(env, src);
+ cpu_stb_data(env, dest, v);
+ }
+
+ for (; destlen; dest++, destlen--) {
+ cpu_stb_data(env, dest, pad);
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ env->regs[r2 + 1] -= src - env->regs[r2];
+ env->regs[r1] = dest;
+ env->regs[r2] = src;
+
+ return cc;
+}
+
+/* move long extended another memcopy insn with more bells and whistles */
+uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uint64_t destlen = env->regs[r1 + 1];
+ uint64_t dest = env->regs[r1];
+ uint64_t srclen = env->regs[r3 + 1];
+ uint64_t src = env->regs[r3];
+ uint8_t pad = a2 & 0xff;
+ uint8_t v;
+ uint32_t cc;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ destlen = (uint32_t)destlen;
+ srclen = (uint32_t)srclen;
+ dest &= 0x7fffffff;
+ src &= 0x7fffffff;
+ }
+
+ if (destlen == srclen) {
+ cc = 0;
+ } else if (destlen < srclen) {
+ cc = 1;
+ } else {
+ cc = 2;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
+ v = cpu_ldub_data(env, src);
+ cpu_stb_data(env, dest, v);
+ }
+
+ for (; destlen; dest++, destlen--) {
+ cpu_stb_data(env, dest, pad);
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ /* FIXME: 31-bit mode! */
+ env->regs[r3 + 1] -= src - env->regs[r3];
+ env->regs[r1] = dest;
+ env->regs[r3] = src;
+
+ return cc;
+}
+
+/* compare logical long extended memcompare insn with padding */
+uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
+ uint32_t r3)
+{
+ uint64_t destlen = env->regs[r1 + 1];
+ uint64_t dest = get_address_31fix(env, r1);
+ uint64_t srclen = env->regs[r3 + 1];
+ uint64_t src = get_address_31fix(env, r3);
+ uint8_t pad = a2 & 0xff;
+ uint8_t v1 = 0, v2 = 0;
+ uint32_t cc = 0;
+
+ if (!(destlen || srclen)) {
+ return cc;
+ }
+
+ if (srclen > destlen) {
+ srclen = destlen;
+ }
+
+ for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
+ v1 = srclen ? cpu_ldub_data(env, src) : pad;
+ v2 = destlen ? cpu_ldub_data(env, dest) : pad;
+ if (v1 != v2) {
+ cc = (v1 < v2) ? 1 : 2;
+ break;
+ }
+ }
+
+ env->regs[r1 + 1] = destlen;
+ /* can't use srclen here, we trunc'ed it */
+ env->regs[r3 + 1] -= src - env->regs[r3];
+ env->regs[r1] = dest;
+ env->regs[r3] = src;
+
+ return cc;
+}
+
+/* checksum */
+uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
+ uint64_t src, uint64_t src_len)
+{
+ uint64_t max_len, len;
+ uint64_t cksm = (uint32_t)r1;
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ max_len = (src_len > 0x2000 ? 0x2000 : src_len);
+
+ /* Process full words as available. */
+ for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
+ cksm += (uint32_t)cpu_ldl_data(env, src);
+ }
+
+ switch (max_len - len) {
+ case 1:
+ cksm += cpu_ldub_data(env, src) << 24;
+ len += 1;
+ break;
+ case 2:
+ cksm += cpu_lduw_data(env, src) << 16;
+ len += 2;
+ break;
+ case 3:
+ cksm += cpu_lduw_data(env, src) << 16;
+ cksm += cpu_ldub_data(env, src + 2) << 8;
+ len += 3;
+ break;
+ }
+
+ /* Fold the carry from the checksum. Note that we can see carry-out
+ during folding more than once (but probably not more than twice). */
+ while (cksm > 0xffffffffull) {
+ cksm = (uint32_t)cksm + (cksm >> 32);
+ }
+
+ /* Indicate whether or not we've processed everything. */
+ env->cc_op = (len == src_len ? 0 : 3);
+
+ /* Return both cksm and processed length. */
+ env->retxl = cksm;
+ return len;
+}
+
+void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
+ uint64_t src)
+{
+ int len_dest = len >> 4;
+ int len_src = len & 0xf;
+ uint8_t b;
+ int second_nibble = 0;
+
+ dest += len_dest;
+ src += len_src;
+
+ /* last byte is special, it only flips the nibbles */
+ b = cpu_ldub_data(env, src);
+ cpu_stb_data(env, dest, (b << 4) | (b >> 4));
+ src--;
+ len_src--;
+
+ /* now pad every nibble with 0xf0 */
+
+ while (len_dest > 0) {
+ uint8_t cur_byte = 0;
+
+ if (len_src > 0) {
+ cur_byte = cpu_ldub_data(env, src);
+ }
+
+ len_dest--;
+ dest--;
+
+ /* only advance one nibble at a time */
+ if (second_nibble) {
+ cur_byte >>= 4;
+ len_src--;
+ src--;
+ }
+ second_nibble = !second_nibble;
+
+ /* digit */
+ cur_byte = (cur_byte & 0xf);
+ /* zone bits */
+ cur_byte |= 0xf0;
+
+ cpu_stb_data(env, dest, cur_byte);
+ }
+}
+
+void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ int i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data(env, array + i);
+ uint8_t new_byte = cpu_ldub_data(env, trans + byte);
+
+ cpu_stb_data(env, array + i, new_byte);
+ }
+}
+
+uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
+ uint64_t len, uint64_t trans)
+{
+ uint8_t end = env->regs[0] & 0xff;
+ uint64_t l = len;
+ uint64_t i;
+
+ if (!(env->psw.mask & PSW_MASK_64)) {
+ array &= 0x7fffffff;
+ l = (uint32_t)l;
+ }
+
+ /* Lest we fail to service interrupts in a timely manner, limit the
+ amount of work we're willing to do. For now, let's cap at 8k. */
+ if (l > 0x2000) {
+ l = 0x2000;
+ env->cc_op = 3;
+ } else {
+ env->cc_op = 0;
+ }
+
+ for (i = 0; i < l; i++) {
+ uint8_t byte, new_byte;
+
+ byte = cpu_ldub_data(env, array + i);
+
+ if (byte == end) {
+ env->cc_op = 1;
+ break;
+ }
+
+ new_byte = cpu_ldub_data(env, trans + byte);
+ cpu_stb_data(env, array + i, new_byte);
+ }
+
+ env->retxl = len - i;
+ return array + i;
+}
+
+uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
+ uint64_t trans)
+{
+ uint32_t cc = 0;
+ int i;
+
+ for (i = 0; i <= len; i++) {
+ uint8_t byte = cpu_ldub_data(env, array + i);
+ uint8_t sbyte = cpu_ldub_data(env, trans + byte);
+
+ if (sbyte != 0) {
+ env->regs[1] = array + i;
+ env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
+ cc = (i == len) ? 2 : 1;
+ break;
+ }
+ }
+
+ return cc;
+}
+
+#if !defined(CONFIG_USER_ONLY)
+void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ bool PERchanged = false;
+ int i;
+ uint64_t src = a2;
+ uint64_t val;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ val = cpu_ldq_data(env, src);
+ if (env->cregs[i] != val && i >= 9 && i <= 11) {
+ PERchanged = true;
+ }
+ env->cregs[i] = val;
+ HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
+ i, src, env->cregs[i]);
+ src += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ if (PERchanged && env->psw.mask & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(CPU(cpu));
+ }
+
+ tlb_flush(CPU(cpu), 1);
+}
+
+void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ bool PERchanged = false;
+ int i;
+ uint64_t src = a2;
+ uint32_t val;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ val = cpu_ldl_data(env, src);
+ if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
+ PERchanged = true;
+ }
+ env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
+ src += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+
+ if (PERchanged && env->psw.mask & PSW_MASK_PER) {
+ s390_cpu_recompute_watchpoints(CPU(cpu));
+ }
+
+ tlb_flush(CPU(cpu), 1);
+}
+
+void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t dest = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ cpu_stq_data(env, dest, env->cregs[i]);
+ dest += sizeof(uint64_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
+{
+ int i;
+ uint64_t dest = a2;
+
+ for (i = r1;; i = (i + 1) % 16) {
+ cpu_stl_data(env, dest, env->cregs[i]);
+ dest += sizeof(uint32_t);
+
+ if (i == r3) {
+ break;
+ }
+ }
+}
+
+uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
+{
+ /* XXX implement */
+
+ return 0;
+}
+
+/* insert storage key extended */
+uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
+{
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ uint64_t addr = get_address(env, 0, 0, r2);
+ uint8_t key;
+
+ if (addr > ram_size) {
+ return 0;
+ }
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ }
+
+ if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
+ return 0;
+ }
+ return key;
+}
+
+/* set storage key extended */
+void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
+{
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ uint64_t addr = get_address(env, 0, 0, r2);
+ uint8_t key;
+
+ if (addr > ram_size) {
+ return;
+ }
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ }
+
+ key = (uint8_t) r1;
+ skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
+}
+
+/* reset reference bit extended */
+uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
+{
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ uint8_t re, key;
+
+ if (r2 > ram_size) {
+ return 0;
+ }
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ }
+
+ if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
+ return 0;
+ }
+
+ re = key & (SK_R | SK_C);
+ key &= ~SK_R;
+
+ if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
+ return 0;
+ }
+
+ /*
+ * cc
+ *
+ * 0 Reference bit zero; change bit zero
+ * 1 Reference bit zero; change bit one
+ * 2 Reference bit one; change bit zero
+ * 3 Reference bit one; change bit one
+ */
+
+ return re >> 1;
+}
+
+/* compare and swap and purge */
+uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ uint32_t cc;
+ uint32_t o1 = env->regs[r1];
+ uint64_t a2 = r2 & ~3ULL;
+ uint32_t o2 = cpu_ldl_data(env, a2);
+
+ if (o1 == o2) {
+ cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
+ if (r2 & 0x3) {
+ /* flush TLB / ALB */
+ tlb_flush(CPU(cpu), 1);
+ }
+ cc = 0;
+ } else {
+ env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
+ cc = 1;
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
+{
+ int cc = 0, i;
+
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __func__, l, a1, a2);
+
+ if (l > 256) {
+ /* max 256 */
+ l = 256;
+ cc = 3;
+ }
+
+ /* XXX replace w/ memcpy */
+ for (i = 0; i < l; i++) {
+ cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
+{
+ int cc = 0, i;
+
+ HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
+ __func__, l, a1, a2);
+
+ if (l > 256) {
+ /* max 256 */
+ l = 256;
+ cc = 3;
+ }
+
+ /* XXX replace w/ memcpy */
+ for (i = 0; i < l; i++) {
+ cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
+ }
+
+ return cc;
+}
+
+/* invalidate pte */
+void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint64_t page = vaddr & TARGET_PAGE_MASK;
+ uint64_t pte = 0;
+
+ /* XXX broadcast to other CPUs */
+
+ /* XXX Linux is nice enough to give us the exact pte address.
+ According to spec we'd have to find it out ourselves */
+ /* XXX Linux is fine with overwriting the pte, the spec requires
+ us to only set the invalid bit */
+ stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
+
+ /* XXX we exploit the fact that Linux passes the exact virtual
+ address here - it's not obliged to! */
+ tlb_flush_page(cs, page);
+
+ /* XXX 31-bit hack */
+ if (page & 0x80000000) {
+ tlb_flush_page(cs, page & ~0x80000000);
+ } else {
+ tlb_flush_page(cs, page | 0x80000000);
+ }
+}
+
+/* flush local tlb */
+void HELPER(ptlb)(CPUS390XState *env)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
+}
+
+/* load using real address */
+uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
+}
+
+uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ return ldq_phys(cs->as, get_address(env, 0, 0, addr));
+}
+
+/* store using real address */
+void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
+
+ if ((env->psw.mask & PSW_MASK_PER) &&
+ (env->cregs[9] & PER_CR9_EVENT_STORE) &&
+ (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
+ /* PSW is saved just before calling the helper. */
+ env->per_address = env->psw.addr;
+ env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
+ }
+}
+
+void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
+
+ if ((env->psw.mask & PSW_MASK_PER) &&
+ (env->cregs[9] & PER_CR9_EVENT_STORE) &&
+ (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
+ /* PSW is saved just before calling the helper. */
+ env->per_address = env->psw.addr;
+ env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
+ }
+}
+
+/* load real address */
+uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint32_t cc = 0;
+ int old_exc = cs->exception_index;
+ uint64_t asc = env->psw.mask & PSW_MASK_ASC;
+ uint64_t ret;
+ int flags;
+
+ /* XXX incomplete - has more corner cases */
+ if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
+ program_interrupt(env, PGM_SPECIAL_OP, 2);
+ }
+
+ cs->exception_index = old_exc;
+ if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
+ cc = 3;
+ }
+ if (cs->exception_index == EXCP_PGM) {
+ ret = env->int_pgm_code | 0x80000000;
+ } else {
+ ret |= addr & ~TARGET_PAGE_MASK;
+ }
+ cs->exception_index = old_exc;
+
+ env->cc_op = cc;
+ return ret;
+}
+#endif
diff --git a/target/s390x/misc_helper.c b/target/s390x/misc_helper.c
new file mode 100644
index 0000000000..c9604ea9c7
--- /dev/null
+++ b/target/s390x/misc_helper.c
@@ -0,0 +1,653 @@
+/*
+ * S/390 misc helper routines
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2009 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/memory.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "sysemu/kvm.h"
+#include "qemu/timer.h"
+#include "exec/address-spaces.h"
+#ifdef CONFIG_KVM
+#include <linux/kvm.h>
+#endif
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/watchdog/wdt_diag288.h"
+#include "sysemu/cpus.h"
+#include "sysemu/sysemu.h"
+#include "hw/s390x/ebcdic.h"
+#include "hw/s390x/ipl.h"
+#endif
+
+/* #define DEBUG_HELPER */
+#ifdef DEBUG_HELPER
+#define HELPER_LOG(x...) qemu_log(x)
+#else
+#define HELPER_LOG(x...)
+#endif
+
+/* Raise an exception dynamically from a helper function. */
+void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
+ uintptr_t retaddr)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ int t;
+
+ cs->exception_index = EXCP_PGM;
+ env->int_pgm_code = excp;
+
+ /* Use the (ultimate) callers address to find the insn that trapped. */
+ cpu_restore_state(cs, retaddr);
+
+ /* Advance past the insn. */
+ t = cpu_ldub_code(env, env->psw.addr);
+ env->int_pgm_ilen = t = get_ilen(t);
+ env->psw.addr += t;
+
+ cpu_loop_exit(cs);
+}
+
+/* Raise an exception statically from a TB. */
+void HELPER(exception)(CPUS390XState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ HELPER_LOG("%s: exception %d\n", __func__, excp);
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+#ifndef CONFIG_USER_ONLY
+
+void program_interrupt(CPUS390XState *env, uint32_t code, int ilen)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
+ env->psw.addr);
+
+ if (kvm_enabled()) {
+#ifdef CONFIG_KVM
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = code,
+ };
+
+ kvm_s390_vcpu_interrupt(cpu, &irq);
+#endif
+ } else {
+ CPUState *cs = CPU(cpu);
+
+ env->int_pgm_code = code;
+ env->int_pgm_ilen = ilen;
+ cs->exception_index = EXCP_PGM;
+ cpu_loop_exit(cs);
+ }
+}
+
+/* SCLP service call */
+uint32_t HELPER(servc)(CPUS390XState *env, uint64_t r1, uint64_t r2)
+{
+ int r = sclp_service_call(env, r1, r2);
+ if (r < 0) {
+ program_interrupt(env, -r, 4);
+ return 0;
+ }
+ return r;
+}
+
+#ifndef CONFIG_USER_ONLY
+static int modified_clear_reset(S390CPU *cpu)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ CPUState *t;
+
+ pause_all_vcpus();
+ cpu_synchronize_all_states();
+ CPU_FOREACH(t) {
+ run_on_cpu(t, s390_do_cpu_full_reset, RUN_ON_CPU_NULL);
+ }
+ s390_cmma_reset();
+ subsystem_reset();
+ s390_crypto_reset();
+ scc->load_normal(CPU(cpu));
+ cpu_synchronize_all_post_reset();
+ resume_all_vcpus();
+ return 0;
+}
+
+static int load_normal_reset(S390CPU *cpu)
+{
+ S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
+ CPUState *t;
+
+ pause_all_vcpus();
+ cpu_synchronize_all_states();
+ CPU_FOREACH(t) {
+ run_on_cpu(t, s390_do_cpu_reset, RUN_ON_CPU_NULL);
+ }
+ s390_cmma_reset();
+ subsystem_reset();
+ scc->initial_cpu_reset(CPU(cpu));
+ scc->load_normal(CPU(cpu));
+ cpu_synchronize_all_post_reset();
+ resume_all_vcpus();
+ return 0;
+}
+
+int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3)
+{
+ uint64_t func = env->regs[r1];
+ uint64_t timeout = env->regs[r1 + 1];
+ uint64_t action = env->regs[r3];
+ Object *obj;
+ DIAG288State *diag288;
+ DIAG288Class *diag288_class;
+
+ if (r1 % 2 || action != 0) {
+ return -1;
+ }
+
+ /* Timeout must be more than 15 seconds except for timer deletion */
+ if (func != WDT_DIAG288_CANCEL && timeout < 15) {
+ return -1;
+ }
+
+ obj = object_resolve_path_type("", TYPE_WDT_DIAG288, NULL);
+ if (!obj) {
+ return -1;
+ }
+
+ diag288 = DIAG288(obj);
+ diag288_class = DIAG288_GET_CLASS(diag288);
+ return diag288_class->handle_timer(diag288, func, timeout);
+}
+
+#define DIAG_308_RC_OK 0x0001
+#define DIAG_308_RC_NO_CONF 0x0102
+#define DIAG_308_RC_INVALID 0x0402
+
+void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3)
+{
+ uint64_t addr = env->regs[r1];
+ uint64_t subcode = env->regs[r3];
+ IplParameterBlock *iplb;
+
+ if (env->psw.mask & PSW_MASK_PSTATE) {
+ program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC);
+ return;
+ }
+
+ if ((subcode & ~0x0ffffULL) || (subcode > 6)) {
+ program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
+ return;
+ }
+
+ switch (subcode) {
+ case 0:
+ modified_clear_reset(s390_env_get_cpu(env));
+ if (tcg_enabled()) {
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+ }
+ break;
+ case 1:
+ load_normal_reset(s390_env_get_cpu(env));
+ if (tcg_enabled()) {
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+ }
+ break;
+ case 3:
+ s390_reipl_request();
+ if (tcg_enabled()) {
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+ }
+ break;
+ case 5:
+ if ((r1 & 1) || (addr & 0x0fffULL)) {
+ program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
+ return;
+ }
+ if (!address_space_access_valid(&address_space_memory, addr,
+ sizeof(IplParameterBlock), false)) {
+ program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
+ return;
+ }
+ iplb = g_malloc0(sizeof(IplParameterBlock));
+ cpu_physical_memory_read(addr, iplb, sizeof(iplb->len));
+ if (!iplb_valid_len(iplb)) {
+ env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+ goto out;
+ }
+
+ cpu_physical_memory_read(addr, iplb, be32_to_cpu(iplb->len));
+
+ if (!iplb_valid_ccw(iplb) && !iplb_valid_fcp(iplb)) {
+ env->regs[r1 + 1] = DIAG_308_RC_INVALID;
+ goto out;
+ }
+
+ s390_ipl_update_diag308(iplb);
+ env->regs[r1 + 1] = DIAG_308_RC_OK;
+out:
+ g_free(iplb);
+ return;
+ case 6:
+ if ((r1 & 1) || (addr & 0x0fffULL)) {
+ program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC);
+ return;
+ }
+ if (!address_space_access_valid(&address_space_memory, addr,
+ sizeof(IplParameterBlock), true)) {
+ program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC);
+ return;
+ }
+ iplb = s390_ipl_get_iplb();
+ if (iplb) {
+ cpu_physical_memory_write(addr, iplb, be32_to_cpu(iplb->len));
+ env->regs[r1 + 1] = DIAG_308_RC_OK;
+ } else {
+ env->regs[r1 + 1] = DIAG_308_RC_NO_CONF;
+ }
+ return;
+ default:
+ hw_error("Unhandled diag308 subcode %" PRIx64, subcode);
+ break;
+ }
+}
+#endif
+
+void HELPER(diag)(CPUS390XState *env, uint32_t r1, uint32_t r3, uint32_t num)
+{
+ uint64_t r;
+
+ switch (num) {
+ case 0x500:
+ /* KVM hypercall */
+ r = s390_virtio_hypercall(env);
+ break;
+ case 0x44:
+ /* yield */
+ r = 0;
+ break;
+ case 0x308:
+ /* ipl */
+ handle_diag_308(env, r1, r3);
+ r = 0;
+ break;
+ default:
+ r = -1;
+ break;
+ }
+
+ if (r) {
+ program_interrupt(env, PGM_OPERATION, ILEN_LATER_INC);
+ }
+}
+
+/* Set Prefix */
+void HELPER(spx)(CPUS390XState *env, uint64_t a1)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint32_t prefix = a1 & 0x7fffe000;
+
+ env->psa = prefix;
+ HELPER_LOG("prefix: %#x\n", prefix);
+ tlb_flush_page(cs, 0);
+ tlb_flush_page(cs, TARGET_PAGE_SIZE);
+}
+
+/* Store Clock */
+uint64_t HELPER(stck)(CPUS390XState *env)
+{
+ uint64_t time;
+
+ time = env->tod_offset +
+ time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - env->tod_basetime);
+
+ return time;
+}
+
+/* Set Clock Comparator */
+void HELPER(sckc)(CPUS390XState *env, uint64_t time)
+{
+ if (time == -1ULL) {
+ return;
+ }
+
+ env->ckc = time;
+
+ /* difference between origins */
+ time -= env->tod_offset;
+
+ /* nanoseconds */
+ time = tod2time(time);
+
+ timer_mod(env->tod_timer, env->tod_basetime + time);
+}
+
+/* Store Clock Comparator */
+uint64_t HELPER(stckc)(CPUS390XState *env)
+{
+ return env->ckc;
+}
+
+/* Set CPU Timer */
+void HELPER(spt)(CPUS390XState *env, uint64_t time)
+{
+ if (time == -1ULL) {
+ return;
+ }
+
+ /* nanoseconds */
+ time = tod2time(time);
+
+ env->cputm = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + time;
+
+ timer_mod(env->cpu_timer, env->cputm);
+}
+
+/* Store CPU Timer */
+uint64_t HELPER(stpt)(CPUS390XState *env)
+{
+ return time2tod(env->cputm - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL));
+}
+
+/* Store System Information */
+uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0,
+ uint64_t r0, uint64_t r1)
+{
+ int cc = 0;
+ int sel1, sel2;
+
+ if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
+ ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
+ /* valid function code, invalid reserved bits */
+ program_interrupt(env, PGM_SPECIFICATION, 2);
+ }
+
+ sel1 = r0 & STSI_R0_SEL1_MASK;
+ sel2 = r1 & STSI_R1_SEL2_MASK;
+
+ /* XXX: spec exception if sysib is not 4k-aligned */
+
+ switch (r0 & STSI_LEVEL_MASK) {
+ case STSI_LEVEL_1:
+ if ((sel1 == 1) && (sel2 == 1)) {
+ /* Basic Machine Configuration */
+ struct sysib_111 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ ebcdic_put(sysib.manuf, "QEMU ", 16);
+ /* same as machine type number in STORE CPU ID */
+ ebcdic_put(sysib.type, "QEMU", 4);
+ /* same as model number in STORE CPU ID */
+ ebcdic_put(sysib.model, "QEMU ", 16);
+ ebcdic_put(sysib.sequence, "QEMU ", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+ } else if ((sel1 == 2) && (sel2 == 1)) {
+ /* Basic Machine CPU */
+ struct sysib_121 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ /* XXX make different for different CPUs? */
+ ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ stw_p(&sysib.cpu_addr, env->cpu_num);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* Basic Machine CPUs */
+ struct sysib_122 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ stl_p(&sysib.capability, 0x443afc29);
+ /* XXX change when SMP comes */
+ stw_p(&sysib.total_cpus, 1);
+ stw_p(&sysib.active_cpus, 1);
+ stw_p(&sysib.standby_cpus, 0);
+ stw_p(&sysib.reserved_cpus, 0);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+ } else {
+ cc = 3;
+ }
+ break;
+ case STSI_LEVEL_2:
+ {
+ if ((sel1 == 2) && (sel2 == 1)) {
+ /* LPAR CPU */
+ struct sysib_221 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ /* XXX make different for different CPUs? */
+ ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
+ ebcdic_put(sysib.plant, "QEMU", 4);
+ stw_p(&sysib.cpu_addr, env->cpu_num);
+ stw_p(&sysib.cpu_id, 0);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+ } else if ((sel1 == 2) && (sel2 == 2)) {
+ /* LPAR CPUs */
+ struct sysib_222 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ stw_p(&sysib.lpar_num, 0);
+ sysib.lcpuc = 0;
+ /* XXX change when SMP comes */
+ stw_p(&sysib.total_cpus, 1);
+ stw_p(&sysib.conf_cpus, 1);
+ stw_p(&sysib.standby_cpus, 0);
+ stw_p(&sysib.reserved_cpus, 0);
+ ebcdic_put(sysib.name, "QEMU ", 8);
+ stl_p(&sysib.caf, 1000);
+ stw_p(&sysib.dedicated_cpus, 0);
+ stw_p(&sysib.shared_cpus, 0);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+ case STSI_LEVEL_3:
+ {
+ if ((sel1 == 2) && (sel2 == 2)) {
+ /* VM CPUs */
+ struct sysib_322 sysib;
+
+ memset(&sysib, 0, sizeof(sysib));
+ sysib.count = 1;
+ /* XXX change when SMP comes */
+ stw_p(&sysib.vm[0].total_cpus, 1);
+ stw_p(&sysib.vm[0].conf_cpus, 1);
+ stw_p(&sysib.vm[0].standby_cpus, 0);
+ stw_p(&sysib.vm[0].reserved_cpus, 0);
+ ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
+ stl_p(&sysib.vm[0].caf, 1000);
+ ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
+ cpu_physical_memory_write(a0, &sysib, sizeof(sysib));
+ } else {
+ cc = 3;
+ }
+ break;
+ }
+ case STSI_LEVEL_CURRENT:
+ env->regs[0] = STSI_LEVEL_3;
+ break;
+ default:
+ cc = 3;
+ break;
+ }
+
+ return cc;
+}
+
+uint32_t HELPER(sigp)(CPUS390XState *env, uint64_t order_code, uint32_t r1,
+ uint64_t cpu_addr)
+{
+ int cc = SIGP_CC_ORDER_CODE_ACCEPTED;
+
+ HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
+ __func__, order_code, r1, cpu_addr);
+
+ /* Remember: Use "R1 or R1 + 1, whichever is the odd-numbered register"
+ as parameter (input). Status (output) is always R1. */
+
+ switch (order_code) {
+ case SIGP_SET_ARCH:
+ /* switch arch */
+ break;
+ case SIGP_SENSE:
+ /* enumerate CPU status */
+ if (cpu_addr) {
+ /* XXX implement when SMP comes */
+ return 3;
+ }
+ env->regs[r1] &= 0xffffffff00000000ULL;
+ cc = 1;
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case SIGP_RESTART:
+ qemu_system_reset_request();
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+ break;
+ case SIGP_STOP:
+ qemu_system_shutdown_request();
+ cpu_loop_exit(CPU(s390_env_get_cpu(env)));
+ break;
+#endif
+ default:
+ /* unknown sigp */
+ fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
+ cc = SIGP_CC_NOT_OPERATIONAL;
+ }
+
+ return cc;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(xsch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_xsch(cpu, r1);
+}
+
+void HELPER(csch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_csch(cpu, r1);
+}
+
+void HELPER(hsch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_hsch(cpu, r1);
+}
+
+void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_msch(cpu, r1, inst >> 16);
+}
+
+void HELPER(rchp)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_rchp(cpu, r1);
+}
+
+void HELPER(rsch)(CPUS390XState *env, uint64_t r1)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_rsch(cpu, r1);
+}
+
+void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_ssch(cpu, r1, inst >> 16);
+}
+
+void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_stsch(cpu, r1, inst >> 16);
+}
+
+void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_tsch(cpu, r1, inst >> 16);
+}
+
+void HELPER(chsc)(CPUS390XState *env, uint64_t inst)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ ioinst_handle_chsc(cpu, inst >> 16);
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+void HELPER(per_check_exception)(CPUS390XState *env)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ if (env->per_perc_atmid) {
+ env->int_pgm_code = PGM_PER;
+ env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, env->per_address));
+
+ cs->exception_index = EXCP_PGM;
+ cpu_loop_exit(cs);
+ }
+}
+
+void HELPER(per_branch)(CPUS390XState *env, uint64_t from, uint64_t to)
+{
+ if ((env->cregs[9] & PER_CR9_EVENT_BRANCH)) {
+ if (!(env->cregs[9] & PER_CR9_CONTROL_BRANCH_ADDRESS)
+ || get_per_in_range(env, to)) {
+ env->per_address = from;
+ env->per_perc_atmid = PER_CODE_EVENT_BRANCH | get_per_atmid(env);
+ }
+ }
+}
+
+void HELPER(per_ifetch)(CPUS390XState *env, uint64_t addr)
+{
+ if ((env->cregs[9] & PER_CR9_EVENT_IFETCH) && get_per_in_range(env, addr)) {
+ env->per_address = addr;
+ env->per_perc_atmid = PER_CODE_EVENT_IFETCH | get_per_atmid(env);
+
+ /* If the instruction has to be nullified, trigger the
+ exception immediately. */
+ if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) {
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+
+ env->int_pgm_code = PGM_PER;
+ env->int_pgm_ilen = get_ilen(cpu_ldub_code(env, addr));
+
+ cs->exception_index = EXCP_PGM;
+ cpu_loop_exit(cs);
+ }
+ }
+}
+#endif
diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c
new file mode 100644
index 0000000000..b11a02706c
--- /dev/null
+++ b/target/s390x/mmu_helper.c
@@ -0,0 +1,499 @@
+/*
+ * S390x MMU related functions
+ *
+ * Copyright (c) 2011 Alexander Graf
+ * Copyright (c) 2015 Thomas Huth, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/error-report.h"
+#include "exec/address-spaces.h"
+#include "cpu.h"
+#include "sysemu/kvm.h"
+#include "trace.h"
+#include "hw/s390x/storage-keys.h"
+
+/* #define DEBUG_S390 */
+/* #define DEBUG_S390_PTE */
+/* #define DEBUG_S390_STDOUT */
+
+#ifdef DEBUG_S390
+#ifdef DEBUG_S390_STDOUT
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); \
+ if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
+#endif
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+#ifdef DEBUG_S390_PTE
+#define PTE_DPRINTF DPRINTF
+#else
+#define PTE_DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+/* Fetch/store bits in the translation exception code: */
+#define FS_READ 0x800
+#define FS_WRITE 0x400
+
+static void trigger_access_exception(CPUS390XState *env, uint32_t type,
+ uint32_t ilen, uint64_t tec)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+
+ if (kvm_enabled()) {
+ kvm_s390_access_exception(cpu, type, tec);
+ } else {
+ CPUState *cs = CPU(cpu);
+ stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec);
+ trigger_pgm_exception(env, type, ilen);
+ }
+}
+
+static void trigger_prot_fault(CPUS390XState *env, target_ulong vaddr,
+ uint64_t asc, int rw, bool exc)
+{
+ uint64_t tec;
+
+ tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | 4 | asc >> 46;
+
+ DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
+
+ if (!exc) {
+ return;
+ }
+
+ trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, tec);
+}
+
+static void trigger_page_fault(CPUS390XState *env, target_ulong vaddr,
+ uint32_t type, uint64_t asc, int rw, bool exc)
+{
+ int ilen = ILEN_LATER;
+ uint64_t tec;
+
+ tec = vaddr | (rw == MMU_DATA_STORE ? FS_WRITE : FS_READ) | asc >> 46;
+
+ DPRINTF("%s: trans_exc_code=%016" PRIx64 "\n", __func__, tec);
+
+ if (!exc) {
+ return;
+ }
+
+ /* Code accesses have an undefined ilc. */
+ if (rw == MMU_INST_FETCH) {
+ ilen = 2;
+ }
+
+ trigger_access_exception(env, type, ilen, tec);
+}
+
+/**
+ * Translate real address to absolute (= physical)
+ * address by taking care of the prefix mapping.
+ */
+static target_ulong mmu_real2abs(CPUS390XState *env, target_ulong raddr)
+{
+ if (raddr < 0x2000) {
+ return raddr + env->psa; /* Map the lowcore. */
+ } else if (raddr >= env->psa && raddr < env->psa + 0x2000) {
+ return raddr - env->psa; /* Map the 0 page. */
+ }
+ return raddr;
+}
+
+/* Decode page table entry (normal 4KB page) */
+static int mmu_translate_pte(CPUS390XState *env, target_ulong vaddr,
+ uint64_t asc, uint64_t pt_entry,
+ target_ulong *raddr, int *flags, int rw, bool exc)
+{
+ if (pt_entry & _PAGE_INVALID) {
+ DPRINTF("%s: PTE=0x%" PRIx64 " invalid\n", __func__, pt_entry);
+ trigger_page_fault(env, vaddr, PGM_PAGE_TRANS, asc, rw, exc);
+ return -1;
+ }
+ if (pt_entry & _PAGE_RES0) {
+ trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
+ return -1;
+ }
+ if (pt_entry & _PAGE_RO) {
+ *flags &= ~PAGE_WRITE;
+ }
+
+ *raddr = pt_entry & _ASCE_ORIGIN;
+
+ PTE_DPRINTF("%s: PTE=0x%" PRIx64 "\n", __func__, pt_entry);
+
+ return 0;
+}
+
+#define VADDR_PX 0xff000 /* Page index bits */
+
+/* Decode segment table entry */
+static int mmu_translate_segment(CPUS390XState *env, target_ulong vaddr,
+ uint64_t asc, uint64_t st_entry,
+ target_ulong *raddr, int *flags, int rw,
+ bool exc)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint64_t origin, offs, pt_entry;
+
+ if (st_entry & _SEGMENT_ENTRY_RO) {
+ *flags &= ~PAGE_WRITE;
+ }
+
+ if ((st_entry & _SEGMENT_ENTRY_FC) && (env->cregs[0] & CR0_EDAT)) {
+ /* Decode EDAT1 segment frame absolute address (1MB page) */
+ *raddr = (st_entry & 0xfffffffffff00000ULL) | (vaddr & 0xfffff);
+ PTE_DPRINTF("%s: SEG=0x%" PRIx64 "\n", __func__, st_entry);
+ return 0;
+ }
+
+ /* Look up 4KB page entry */
+ origin = st_entry & _SEGMENT_ENTRY_ORIGIN;
+ offs = (vaddr & VADDR_PX) >> 9;
+ pt_entry = ldq_phys(cs->as, origin + offs);
+ PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
+ __func__, origin, offs, pt_entry);
+ return mmu_translate_pte(env, vaddr, asc, pt_entry, raddr, flags, rw, exc);
+}
+
+/* Decode region table entries */
+static int mmu_translate_region(CPUS390XState *env, target_ulong vaddr,
+ uint64_t asc, uint64_t entry, int level,
+ target_ulong *raddr, int *flags, int rw,
+ bool exc)
+{
+ CPUState *cs = CPU(s390_env_get_cpu(env));
+ uint64_t origin, offs, new_entry;
+ const int pchks[4] = {
+ PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
+ PGM_REG_SEC_TRANS, PGM_REG_FIRST_TRANS
+ };
+
+ PTE_DPRINTF("%s: 0x%" PRIx64 "\n", __func__, entry);
+
+ origin = entry & _REGION_ENTRY_ORIGIN;
+ offs = (vaddr >> (17 + 11 * level / 4)) & 0x3ff8;
+
+ new_entry = ldq_phys(cs->as, origin + offs);
+ PTE_DPRINTF("%s: 0x%" PRIx64 " + 0x%" PRIx64 " => 0x%016" PRIx64 "\n",
+ __func__, origin, offs, new_entry);
+
+ if ((new_entry & _REGION_ENTRY_INV) != 0) {
+ DPRINTF("%s: invalid region\n", __func__);
+ trigger_page_fault(env, vaddr, pchks[level / 4], asc, rw, exc);
+ return -1;
+ }
+
+ if ((new_entry & _REGION_ENTRY_TYPE_MASK) != level) {
+ trigger_page_fault(env, vaddr, PGM_TRANS_SPEC, asc, rw, exc);
+ return -1;
+ }
+
+ if (level == _ASCE_TYPE_SEGMENT) {
+ return mmu_translate_segment(env, vaddr, asc, new_entry, raddr, flags,
+ rw, exc);
+ }
+
+ /* Check region table offset and length */
+ offs = (vaddr >> (28 + 11 * (level - 4) / 4)) & 3;
+ if (offs < ((new_entry & _REGION_ENTRY_TF) >> 6)
+ || offs > (new_entry & _REGION_ENTRY_LENGTH)) {
+ DPRINTF("%s: invalid offset or len (%lx)\n", __func__, new_entry);
+ trigger_page_fault(env, vaddr, pchks[level / 4 - 1], asc, rw, exc);
+ return -1;
+ }
+
+ if ((env->cregs[0] & CR0_EDAT) && (new_entry & _REGION_ENTRY_RO)) {
+ *flags &= ~PAGE_WRITE;
+ }
+
+ /* yet another region */
+ return mmu_translate_region(env, vaddr, asc, new_entry, level - 4,
+ raddr, flags, rw, exc);
+}
+
+static int mmu_translate_asce(CPUS390XState *env, target_ulong vaddr,
+ uint64_t asc, uint64_t asce, target_ulong *raddr,
+ int *flags, int rw, bool exc)
+{
+ int level;
+ int r;
+
+ if (asce & _ASCE_REAL_SPACE) {
+ /* direct mapping */
+ *raddr = vaddr;
+ return 0;
+ }
+
+ level = asce & _ASCE_TYPE_MASK;
+ switch (level) {
+ case _ASCE_TYPE_REGION1:
+ if ((vaddr >> 62) > (asce & _ASCE_TABLE_LENGTH)) {
+ trigger_page_fault(env, vaddr, PGM_REG_FIRST_TRANS, asc, rw, exc);
+ return -1;
+ }
+ break;
+ case _ASCE_TYPE_REGION2:
+ if (vaddr & 0xffe0000000000000ULL) {
+ DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+ " 0xffe0000000000000ULL\n", __func__, vaddr);
+ trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
+ return -1;
+ }
+ if ((vaddr >> 51 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+ trigger_page_fault(env, vaddr, PGM_REG_SEC_TRANS, asc, rw, exc);
+ return -1;
+ }
+ break;
+ case _ASCE_TYPE_REGION3:
+ if (vaddr & 0xfffffc0000000000ULL) {
+ DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+ " 0xfffffc0000000000ULL\n", __func__, vaddr);
+ trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
+ return -1;
+ }
+ if ((vaddr >> 40 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+ trigger_page_fault(env, vaddr, PGM_REG_THIRD_TRANS, asc, rw, exc);
+ return -1;
+ }
+ break;
+ case _ASCE_TYPE_SEGMENT:
+ if (vaddr & 0xffffffff80000000ULL) {
+ DPRINTF("%s: vaddr doesn't fit 0x%16" PRIx64
+ " 0xffffffff80000000ULL\n", __func__, vaddr);
+ trigger_page_fault(env, vaddr, PGM_ASCE_TYPE, asc, rw, exc);
+ return -1;
+ }
+ if ((vaddr >> 29 & 3) > (asce & _ASCE_TABLE_LENGTH)) {
+ trigger_page_fault(env, vaddr, PGM_SEGMENT_TRANS, asc, rw, exc);
+ return -1;
+ }
+ break;
+ }
+
+ r = mmu_translate_region(env, vaddr, asc, asce, level, raddr, flags, rw,
+ exc);
+ if (rw == MMU_DATA_STORE && !(*flags & PAGE_WRITE)) {
+ trigger_prot_fault(env, vaddr, asc, rw, exc);
+ return -1;
+ }
+
+ return r;
+}
+
+/**
+ * Translate a virtual (logical) address into a physical (absolute) address.
+ * @param vaddr the virtual address
+ * @param rw 0 = read, 1 = write, 2 = code fetch
+ * @param asc address space control (one of the PSW_ASC_* modes)
+ * @param raddr the translated address is stored to this pointer
+ * @param flags the PAGE_READ/WRITE/EXEC flags are stored to this pointer
+ * @param exc true = inject a program check if a fault occurred
+ * @return 0 if the translation was successful, -1 if a fault occurred
+ */
+int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
+ target_ulong *raddr, int *flags, bool exc)
+{
+ static S390SKeysState *ss;
+ static S390SKeysClass *skeyclass;
+ int r = -1;
+ uint8_t key;
+
+ if (unlikely(!ss)) {
+ ss = s390_get_skeys_device();
+ skeyclass = S390_SKEYS_GET_CLASS(ss);
+ }
+
+ *flags = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ vaddr &= TARGET_PAGE_MASK;
+
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ *raddr = vaddr;
+ r = 0;
+ goto out;
+ }
+
+ switch (asc) {
+ case PSW_ASC_PRIMARY:
+ PTE_DPRINTF("%s: asc=primary\n", __func__);
+ r = mmu_translate_asce(env, vaddr, asc, env->cregs[1], raddr, flags,
+ rw, exc);
+ break;
+ case PSW_ASC_HOME:
+ PTE_DPRINTF("%s: asc=home\n", __func__);
+ r = mmu_translate_asce(env, vaddr, asc, env->cregs[13], raddr, flags,
+ rw, exc);
+ break;
+ case PSW_ASC_SECONDARY:
+ PTE_DPRINTF("%s: asc=secondary\n", __func__);
+ /*
+ * Instruction: Primary
+ * Data: Secondary
+ */
+ if (rw == MMU_INST_FETCH) {
+ r = mmu_translate_asce(env, vaddr, PSW_ASC_PRIMARY, env->cregs[1],
+ raddr, flags, rw, exc);
+ *flags &= ~(PAGE_READ | PAGE_WRITE);
+ } else {
+ r = mmu_translate_asce(env, vaddr, PSW_ASC_SECONDARY, env->cregs[7],
+ raddr, flags, rw, exc);
+ *flags &= ~(PAGE_EXEC);
+ }
+ break;
+ case PSW_ASC_ACCREG:
+ default:
+ hw_error("guest switched to unknown asc mode\n");
+ break;
+ }
+
+ out:
+ /* Convert real address -> absolute address */
+ *raddr = mmu_real2abs(env, *raddr);
+
+ if (r == 0 && *raddr < ram_size) {
+ if (skeyclass->get_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
+ trace_get_skeys_nonzero(r);
+ return 0;
+ }
+
+ if (*flags & PAGE_READ) {
+ key |= SK_R;
+ }
+
+ if (*flags & PAGE_WRITE) {
+ key |= SK_C;
+ }
+
+ if (skeyclass->set_skeys(ss, *raddr / TARGET_PAGE_SIZE, 1, &key)) {
+ trace_set_skeys_nonzero(r);
+ return 0;
+ }
+ }
+
+ return r;
+}
+
+/**
+ * lowprot_enabled: Check whether low-address protection is enabled
+ */
+static bool lowprot_enabled(const CPUS390XState *env)
+{
+ if (!(env->cregs[0] & CR0_LOWPROT)) {
+ return false;
+ }
+ if (!(env->psw.mask & PSW_MASK_DAT)) {
+ return true;
+ }
+
+ /* Check the private-space control bit */
+ switch (env->psw.mask & PSW_MASK_ASC) {
+ case PSW_ASC_PRIMARY:
+ return !(env->cregs[1] & _ASCE_PRIVATE_SPACE);
+ case PSW_ASC_SECONDARY:
+ return !(env->cregs[7] & _ASCE_PRIVATE_SPACE);
+ case PSW_ASC_HOME:
+ return !(env->cregs[13] & _ASCE_PRIVATE_SPACE);
+ default:
+ /* We don't support access register mode */
+ error_report("unsupported addressing mode");
+ exit(1);
+ }
+}
+
+/**
+ * translate_pages: Translate a set of consecutive logical page addresses
+ * to absolute addresses
+ */
+static int translate_pages(S390CPU *cpu, vaddr addr, int nr_pages,
+ target_ulong *pages, bool is_write)
+{
+ bool lowprot = is_write && lowprot_enabled(&cpu->env);
+ uint64_t asc = cpu->env.psw.mask & PSW_MASK_ASC;
+ CPUS390XState *env = &cpu->env;
+ int ret, i, pflags;
+
+ for (i = 0; i < nr_pages; i++) {
+ /* Low-address protection? */
+ if (lowprot && (addr < 512 || (addr >= 4096 && addr < 4096 + 512))) {
+ trigger_access_exception(env, PGM_PROTECTION, ILEN_LATER_INC, 0);
+ return -EACCES;
+ }
+ ret = mmu_translate(env, addr, is_write, asc, &pages[i], &pflags, true);
+ if (ret) {
+ return ret;
+ }
+ if (!address_space_access_valid(&address_space_memory, pages[i],
+ TARGET_PAGE_SIZE, is_write)) {
+ program_interrupt(env, PGM_ADDRESSING, 0);
+ return -EFAULT;
+ }
+ addr += TARGET_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/**
+ * s390_cpu_virt_mem_rw:
+ * @laddr: the logical start address
+ * @ar: the access register number
+ * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
+ * @len: length that should be transferred
+ * @is_write: true = write, false = read
+ * Returns: 0 on success, non-zero if an exception occurred
+ *
+ * Copy from/to guest memory using logical addresses. Note that we inject a
+ * program interrupt in case there is an error while accessing the memory.
+ */
+int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
+ int len, bool is_write)
+{
+ int currlen, nr_pages, i;
+ target_ulong *pages;
+ int ret;
+
+ if (kvm_enabled()) {
+ ret = kvm_s390_mem_op(cpu, laddr, ar, hostbuf, len, is_write);
+ if (ret >= 0) {
+ return ret;
+ }
+ }
+
+ nr_pages = (((laddr & ~TARGET_PAGE_MASK) + len - 1) >> TARGET_PAGE_BITS)
+ + 1;
+ pages = g_malloc(nr_pages * sizeof(*pages));
+
+ ret = translate_pages(cpu, laddr, nr_pages, pages, is_write);
+ if (ret == 0 && hostbuf != NULL) {
+ /* Copy data by stepping through the area page by page */
+ for (i = 0; i < nr_pages; i++) {
+ currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
+ cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
+ hostbuf, currlen, is_write);
+ laddr += currlen;
+ hostbuf += currlen;
+ len -= currlen;
+ }
+ }
+
+ g_free(pages);
+ return ret;
+}
diff --git a/target/s390x/trace-events b/target/s390x/trace-events
new file mode 100644
index 0000000000..1574033e31
--- /dev/null
+++ b/target/s390x/trace-events
@@ -0,0 +1,22 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/s390x/mmu_helper.c
+get_skeys_nonzero(int rc) "SKEY: Call to get_skeys unexpectedly returned %d"
+set_skeys_nonzero(int rc) "SKEY: Call to set_skeys unexpectedly returned %d"
+
+# target/s390x/ioinst.c
+ioinst(const char *insn) "IOINST: %s"
+ioinst_sch_id(const char *insn, int cssid, int ssid, int schid) "IOINST: %s (%x.%x.%04x)"
+ioinst_chp_id(const char *insn, int cssid, int chpid) "IOINST: %s (%x.%02x)"
+ioinst_chsc_cmd(uint16_t cmd, uint16_t len) "IOINST: chsc command %04x, len %04x"
+
+# target/s390x/kvm.c
+kvm_enable_cmma(int rc) "CMMA: enabling with result code %d"
+kvm_clear_cmma(int rc) "CMMA: clearing with result code %d"
+kvm_failed_cpu_state_set(int cpu_index, uint8_t state, const char *msg) "Warning: Unable to set cpu %d state %" PRIu8 " to KVM: %s"
+kvm_sigp_finished(uint8_t order, int cpu_index, int dst_index, int cc) "SIGP: Finished order %u on cpu %d -> cpu %d with cc=%d"
+
+# target/s390x/cpu.c
+cpu_set_state(int cpu_index, uint8_t state) "setting cpu %d state to %" PRIu8
+cpu_halt(int cpu_index) "halting cpu %d"
+cpu_unhalt(int cpu_index) "unhalting cpu %d"
diff --git a/target/s390x/translate.c b/target/s390x/translate.c
new file mode 100644
index 0000000000..02bc7058fd
--- /dev/null
+++ b/target/s390x/translate.c
@@ -0,0 +1,5452 @@
+/*
+ * S/390 translation
+ *
+ * Copyright (c) 2009 Ulrich Hecht
+ * Copyright (c) 2010 Alexander Graf
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* #define DEBUG_INLINE_BRANCHES */
+#define S390X_DEBUG_DISAS
+/* #define S390X_DEBUG_DISAS_VERBOSE */
+
+#ifdef S390X_DEBUG_DISAS_VERBOSE
+# define LOG_DISAS(...) qemu_log(__VA_ARGS__)
+#else
+# define LOG_DISAS(...) do { } while (0)
+#endif
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "qemu/host-utils.h"
+#include "exec/cpu_ldst.h"
+
+/* global register indexes */
+static TCGv_env cpu_env;
+
+#include "exec/gen-icount.h"
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+/* Information that (most) every instruction needs to manipulate. */
+typedef struct DisasContext DisasContext;
+typedef struct DisasInsn DisasInsn;
+typedef struct DisasFields DisasFields;
+
+struct DisasContext {
+ struct TranslationBlock *tb;
+ const DisasInsn *insn;
+ DisasFields *fields;
+ uint64_t pc, next_pc;
+ enum cc_op cc_op;
+ bool singlestep_enabled;
+};
+
+/* Information carried about a condition to be evaluated. */
+typedef struct {
+ TCGCond cond:8;
+ bool is_64;
+ bool g1;
+ bool g2;
+ union {
+ struct { TCGv_i64 a, b; } s64;
+ struct { TCGv_i32 a, b; } s32;
+ } u;
+} DisasCompare;
+
+#define DISAS_EXCP 4
+
+#ifdef DEBUG_INLINE_BRANCHES
+static uint64_t inline_branch_hit[CC_OP_MAX];
+static uint64_t inline_branch_miss[CC_OP_MAX];
+#endif
+
+static uint64_t pc_to_link_info(DisasContext *s, uint64_t pc)
+{
+ if (!(s->tb->flags & FLAG_MASK_64)) {
+ if (s->tb->flags & FLAG_MASK_32) {
+ return pc | 0x80000000;
+ }
+ }
+ return pc;
+}
+
+void s390_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ S390CPU *cpu = S390_CPU(cs);
+ CPUS390XState *env = &cpu->env;
+ int i;
+
+ if (env->cc_op > 3) {
+ cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %15s\n",
+ env->psw.mask, env->psw.addr, cc_name(env->cc_op));
+ } else {
+ cpu_fprintf(f, "PSW=mask %016" PRIx64 " addr %016" PRIx64 " cc %02x\n",
+ env->psw.mask, env->psw.addr, env->cc_op);
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "R%02d=%016" PRIx64, i, env->regs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "F%02d=%016" PRIx64, i, get_freg(env, i)->ll);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+
+ for (i = 0; i < 32; i++) {
+ cpu_fprintf(f, "V%02d=%016" PRIx64 "%016" PRIx64, i,
+ env->vregs[i][0].ll, env->vregs[i][1].ll);
+ cpu_fprintf(f, (i % 2) ? "\n" : " ");
+ }
+
+#ifndef CONFIG_USER_ONLY
+ for (i = 0; i < 16; i++) {
+ cpu_fprintf(f, "C%02d=%016" PRIx64, i, env->cregs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+#endif
+
+#ifdef DEBUG_INLINE_BRANCHES
+ for (i = 0; i < CC_OP_MAX; i++) {
+ cpu_fprintf(f, " %15s = %10ld\t%10ld\n", cc_name(i),
+ inline_branch_miss[i], inline_branch_hit[i]);
+ }
+#endif
+
+ cpu_fprintf(f, "\n");
+}
+
+static TCGv_i64 psw_addr;
+static TCGv_i64 psw_mask;
+static TCGv_i64 gbea;
+
+static TCGv_i32 cc_op;
+static TCGv_i64 cc_src;
+static TCGv_i64 cc_dst;
+static TCGv_i64 cc_vr;
+
+static char cpu_reg_names[32][4];
+static TCGv_i64 regs[16];
+static TCGv_i64 fregs[16];
+
+void s390x_translate_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ psw_addr = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUS390XState, psw.addr),
+ "psw_addr");
+ psw_mask = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUS390XState, psw.mask),
+ "psw_mask");
+ gbea = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUS390XState, gbea),
+ "gbea");
+
+ cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op),
+ "cc_op");
+ cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src),
+ "cc_src");
+ cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst),
+ "cc_dst");
+ cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr),
+ "cc_vr");
+
+ for (i = 0; i < 16; i++) {
+ snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d", i);
+ regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUS390XState, regs[i]),
+ cpu_reg_names[i]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ snprintf(cpu_reg_names[i + 16], sizeof(cpu_reg_names[0]), "f%d", i);
+ fregs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUS390XState, vregs[i][0].d),
+ cpu_reg_names[i + 16]);
+ }
+}
+
+static TCGv_i64 load_reg(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_mov_i64(r, regs[reg]);
+ return r;
+}
+
+static TCGv_i64 load_freg32_i64(int reg)
+{
+ TCGv_i64 r = tcg_temp_new_i64();
+ tcg_gen_shri_i64(r, fregs[reg], 32);
+ return r;
+}
+
+static void store_reg(int reg, TCGv_i64 v)
+{
+ tcg_gen_mov_i64(regs[reg], v);
+}
+
+static void store_freg(int reg, TCGv_i64 v)
+{
+ tcg_gen_mov_i64(fregs[reg], v);
+}
+
+static void store_reg32_i64(int reg, TCGv_i64 v)
+{
+ /* 32 bit register writes keep the upper half */
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32);
+}
+
+static void store_reg32h_i64(int reg, TCGv_i64 v)
+{
+ tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32);
+}
+
+static void store_freg32_i64(int reg, TCGv_i64 v)
+{
+ tcg_gen_deposit_i64(fregs[reg], fregs[reg], v, 32, 32);
+}
+
+static void return_low128(TCGv_i64 dest)
+{
+ tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl));
+}
+
+static void update_psw_addr(DisasContext *s)
+{
+ /* psw.addr */
+ tcg_gen_movi_i64(psw_addr, s->pc);
+}
+
+static void per_branch(DisasContext *s, bool to_next)
+{
+#ifndef CONFIG_USER_ONLY
+ tcg_gen_movi_i64(gbea, s->pc);
+
+ if (s->tb->flags & FLAG_MASK_PER) {
+ TCGv_i64 next_pc = to_next ? tcg_const_i64(s->next_pc) : psw_addr;
+ gen_helper_per_branch(cpu_env, gbea, next_pc);
+ if (to_next) {
+ tcg_temp_free_i64(next_pc);
+ }
+ }
+#endif
+}
+
+static void per_branch_cond(DisasContext *s, TCGCond cond,
+ TCGv_i64 arg1, TCGv_i64 arg2)
+{
+#ifndef CONFIG_USER_ONLY
+ if (s->tb->flags & FLAG_MASK_PER) {
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab);
+
+ tcg_gen_movi_i64(gbea, s->pc);
+ gen_helper_per_branch(cpu_env, gbea, psw_addr);
+
+ gen_set_label(lab);
+ } else {
+ TCGv_i64 pc = tcg_const_i64(s->pc);
+ tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc);
+ tcg_temp_free_i64(pc);
+ }
+#endif
+}
+
+static void per_breaking_event(DisasContext *s)
+{
+ tcg_gen_movi_i64(gbea, s->pc);
+}
+
+static void update_cc_op(DisasContext *s)
+{
+ if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) {
+ tcg_gen_movi_i32(cc_op, s->cc_op);
+ }
+}
+
+static void potential_page_fault(DisasContext *s)
+{
+ update_psw_addr(s);
+ update_cc_op(s);
+}
+
+static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc)
+{
+ return (uint64_t)cpu_lduw_code(env, pc);
+}
+
+static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc)
+{
+ return (uint64_t)(uint32_t)cpu_ldl_code(env, pc);
+}
+
+static int get_mem_index(DisasContext *s)
+{
+ switch (s->tb->flags & FLAG_MASK_ASC) {
+ case PSW_ASC_PRIMARY >> 32:
+ return 0;
+ case PSW_ASC_SECONDARY >> 32:
+ return 1;
+ case PSW_ASC_HOME >> 32:
+ return 2;
+ default:
+ tcg_abort();
+ break;
+ }
+}
+
+static void gen_exception(int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_program_exception(DisasContext *s, int code)
+{
+ TCGv_i32 tmp;
+
+ /* Remember what pgm exeption this was. */
+ tmp = tcg_const_i32(code);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code));
+ tcg_temp_free_i32(tmp);
+
+ tmp = tcg_const_i32(s->next_pc - s->pc);
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen));
+ tcg_temp_free_i32(tmp);
+
+ /* Advance past instruction. */
+ s->pc = s->next_pc;
+ update_psw_addr(s);
+
+ /* Save off cc. */
+ update_cc_op(s);
+
+ /* Trigger exception. */
+ gen_exception(EXCP_PGM);
+}
+
+static inline void gen_illegal_opcode(DisasContext *s)
+{
+ gen_program_exception(s, PGM_OPERATION);
+}
+
+static inline void gen_trap(DisasContext *s)
+{
+ TCGv_i32 t;
+
+ /* Set DXC to 0xff. */
+ t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_gen_ori_i32(t, t, 0xff00);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_temp_free_i32(t);
+
+ gen_program_exception(s, PGM_DATA);
+}
+
+#ifndef CONFIG_USER_ONLY
+static void check_privileged(DisasContext *s)
+{
+ if (s->tb->flags & (PSW_MASK_PSTATE >> 32)) {
+ gen_program_exception(s, PGM_PRIVILEGED);
+ }
+}
+#endif
+
+static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2)
+{
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ bool need_31 = !(s->tb->flags & FLAG_MASK_64);
+
+ /* Note that d2 is limited to 20 bits, signed. If we crop negative
+ displacements early we create larger immedate addends. */
+
+ /* Note that addi optimizes the imm==0 case. */
+ if (b2 && x2) {
+ tcg_gen_add_i64(tmp, regs[b2], regs[x2]);
+ tcg_gen_addi_i64(tmp, tmp, d2);
+ } else if (b2) {
+ tcg_gen_addi_i64(tmp, regs[b2], d2);
+ } else if (x2) {
+ tcg_gen_addi_i64(tmp, regs[x2], d2);
+ } else {
+ if (need_31) {
+ d2 &= 0x7fffffff;
+ need_31 = false;
+ }
+ tcg_gen_movi_i64(tmp, d2);
+ }
+ if (need_31) {
+ tcg_gen_andi_i64(tmp, tmp, 0x7fffffff);
+ }
+
+ return tmp;
+}
+
+static inline bool live_cc_data(DisasContext *s)
+{
+ return (s->cc_op != CC_OP_DYNAMIC
+ && s->cc_op != CC_OP_STATIC
+ && s->cc_op > 3);
+}
+
+static inline void gen_op_movi_cc(DisasContext *s, uint32_t val)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_dst);
+ tcg_gen_discard_i64(cc_vr);
+ }
+ s->cc_op = CC_OP_CONST0 + val;
+}
+
+static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_vr);
+ }
+ tcg_gen_mov_i64(cc_dst, dst);
+ s->cc_op = op;
+}
+
+static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_vr);
+ }
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ s->cc_op = op;
+}
+
+static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src,
+ TCGv_i64 dst, TCGv_i64 vr)
+{
+ tcg_gen_mov_i64(cc_src, src);
+ tcg_gen_mov_i64(cc_dst, dst);
+ tcg_gen_mov_i64(cc_vr, vr);
+ s->cc_op = op;
+}
+
+static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ, val);
+}
+
+static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val);
+}
+
+static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val);
+}
+
+static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl)
+{
+ gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl);
+}
+
+/* CC value is in env->cc_op */
+static void set_cc_static(DisasContext *s)
+{
+ if (live_cc_data(s)) {
+ tcg_gen_discard_i64(cc_src);
+ tcg_gen_discard_i64(cc_dst);
+ tcg_gen_discard_i64(cc_vr);
+ }
+ s->cc_op = CC_OP_STATIC;
+}
+
+/* calculates cc into cc_op */
+static void gen_op_calc_cc(DisasContext *s)
+{
+ TCGv_i32 local_cc_op;
+ TCGv_i64 dummy;
+
+ TCGV_UNUSED_I32(local_cc_op);
+ TCGV_UNUSED_I64(dummy);
+ switch (s->cc_op) {
+ default:
+ dummy = tcg_const_i64(0);
+ /* FALLTHRU */
+ case CC_OP_ADD_64:
+ case CC_OP_ADDU_64:
+ case CC_OP_ADDC_64:
+ case CC_OP_SUB_64:
+ case CC_OP_SUBU_64:
+ case CC_OP_SUBB_64:
+ case CC_OP_ADD_32:
+ case CC_OP_ADDU_32:
+ case CC_OP_ADDC_32:
+ case CC_OP_SUB_32:
+ case CC_OP_SUBU_32:
+ case CC_OP_SUBB_32:
+ local_cc_op = tcg_const_i32(s->cc_op);
+ break;
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ case CC_OP_STATIC:
+ case CC_OP_DYNAMIC:
+ break;
+ }
+
+ switch (s->cc_op) {
+ case CC_OP_CONST0:
+ case CC_OP_CONST1:
+ case CC_OP_CONST2:
+ case CC_OP_CONST3:
+ /* s->cc_op is the cc value */
+ tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0);
+ break;
+ case CC_OP_STATIC:
+ /* env->cc_op already is the cc value */
+ break;
+ case CC_OP_NZ:
+ case CC_OP_ABS_64:
+ case CC_OP_NABS_64:
+ case CC_OP_ABS_32:
+ case CC_OP_NABS_32:
+ case CC_OP_LTGT0_32:
+ case CC_OP_LTGT0_64:
+ case CC_OP_COMP_32:
+ case CC_OP_COMP_64:
+ case CC_OP_NZ_F32:
+ case CC_OP_NZ_F64:
+ case CC_OP_FLOGR:
+ /* 1 argument */
+ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy);
+ break;
+ case CC_OP_ICM:
+ case CC_OP_LTGT_32:
+ case CC_OP_LTGT_64:
+ case CC_OP_LTUGTU_32:
+ case CC_OP_LTUGTU_64:
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ case CC_OP_SLA_32:
+ case CC_OP_SLA_64:
+ case CC_OP_NZ_F128:
+ /* 2 arguments */
+ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy);
+ break;
+ case CC_OP_ADD_64:
+ case CC_OP_ADDU_64:
+ case CC_OP_ADDC_64:
+ case CC_OP_SUB_64:
+ case CC_OP_SUBU_64:
+ case CC_OP_SUBB_64:
+ case CC_OP_ADD_32:
+ case CC_OP_ADDU_32:
+ case CC_OP_ADDC_32:
+ case CC_OP_SUB_32:
+ case CC_OP_SUBU_32:
+ case CC_OP_SUBB_32:
+ /* 3 arguments */
+ gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ case CC_OP_DYNAMIC:
+ /* unknown operation - assume 3 arguments and cc_op in env */
+ gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr);
+ break;
+ default:
+ tcg_abort();
+ }
+
+ if (!TCGV_IS_UNUSED_I32(local_cc_op)) {
+ tcg_temp_free_i32(local_cc_op);
+ }
+ if (!TCGV_IS_UNUSED_I64(dummy)) {
+ tcg_temp_free_i64(dummy);
+ }
+
+ /* We now have cc in cc_op as constant */
+ set_cc_static(s);
+}
+
+static int use_goto_tb(DisasContext *s, uint64_t dest)
+{
+ if (unlikely(s->singlestep_enabled) ||
+ (s->tb->cflags & CF_LAST_IO) ||
+ (s->tb->flags & FLAG_MASK_PER)) {
+ return false;
+ }
+#ifndef CONFIG_USER_ONLY
+ return (dest & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
+ (dest & TARGET_PAGE_MASK) == (s->pc & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void account_noninline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_miss[cc_op]++;
+#endif
+}
+
+static void account_inline_branch(DisasContext *s, int cc_op)
+{
+#ifdef DEBUG_INLINE_BRANCHES
+ inline_branch_hit[cc_op]++;
+#endif
+}
+
+/* Table of mask values to comparison codes, given a comparison as input.
+ For such, CC=3 should not be possible. */
+static const TCGCond ltgt_cond[16] = {
+ TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */
+ TCG_COND_GT, TCG_COND_GT, /* | | GT | x */
+ TCG_COND_LT, TCG_COND_LT, /* | LT | | x */
+ TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */
+ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */
+ TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */
+ TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */
+};
+
+/* Table of mask values to comparison codes, given a logic op as input.
+ For such, only CC=0 and CC=1 should be possible. */
+static const TCGCond nz_cond[16] = {
+ TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */
+ TCG_COND_NEVER, TCG_COND_NEVER,
+ TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */
+ TCG_COND_NE, TCG_COND_NE,
+ TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */
+ TCG_COND_EQ, TCG_COND_EQ,
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */
+ TCG_COND_ALWAYS, TCG_COND_ALWAYS,
+};
+
+/* Interpret MASK in terms of S->CC_OP, and fill in C with all the
+ details required to generate a TCG comparison. */
+static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
+{
+ TCGCond cond;
+ enum cc_op old_cc_op = s->cc_op;
+
+ if (mask == 15 || mask == 0) {
+ c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
+ c->u.s32.a = cc_op;
+ c->u.s32.b = cc_op;
+ c->g1 = c->g2 = true;
+ c->is_64 = false;
+ return;
+ }
+
+ /* Find the TCG condition for the mask + cc op. */
+ switch (old_cc_op) {
+ case CC_OP_LTGT0_32:
+ case CC_OP_LTGT0_64:
+ case CC_OP_LTGT_32:
+ case CC_OP_LTGT_64:
+ cond = ltgt_cond[mask];
+ if (cond == TCG_COND_NEVER) {
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_LTUGTU_32:
+ case CC_OP_LTUGTU_64:
+ cond = tcg_unsigned_cond(ltgt_cond[mask]);
+ if (cond == TCG_COND_NEVER) {
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_NZ:
+ cond = nz_cond[mask];
+ if (cond == TCG_COND_NEVER) {
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ switch (mask) {
+ case 8:
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 2 | 1:
+ cond = TCG_COND_NE;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_ICM:
+ switch (mask) {
+ case 8:
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 2 | 1:
+ case 4 | 2:
+ cond = TCG_COND_NE;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_FLOGR:
+ switch (mask & 0xa) {
+ case 8: /* src == 0 -> no one bit found */
+ cond = TCG_COND_EQ;
+ break;
+ case 2: /* src != 0 -> one bit found */
+ cond = TCG_COND_NE;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_ADDU_32:
+ case CC_OP_ADDU_64:
+ switch (mask) {
+ case 8 | 2: /* vr == 0 */
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 1: /* vr != 0 */
+ cond = TCG_COND_NE;
+ break;
+ case 8 | 4: /* no carry -> vr >= src */
+ cond = TCG_COND_GEU;
+ break;
+ case 2 | 1: /* carry -> vr < src */
+ cond = TCG_COND_LTU;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ case CC_OP_SUBU_32:
+ case CC_OP_SUBU_64:
+ /* Note that CC=0 is impossible; treat it as dont-care. */
+ switch (mask & 7) {
+ case 2: /* zero -> op1 == op2 */
+ cond = TCG_COND_EQ;
+ break;
+ case 4 | 1: /* !zero -> op1 != op2 */
+ cond = TCG_COND_NE;
+ break;
+ case 4: /* borrow (!carry) -> op1 < op2 */
+ cond = TCG_COND_LTU;
+ break;
+ case 2 | 1: /* !borrow (carry) -> op1 >= op2 */
+ cond = TCG_COND_GEU;
+ break;
+ default:
+ goto do_dynamic;
+ }
+ account_inline_branch(s, old_cc_op);
+ break;
+
+ default:
+ do_dynamic:
+ /* Calculate cc value. */
+ gen_op_calc_cc(s);
+ /* FALLTHRU */
+
+ case CC_OP_STATIC:
+ /* Jump based on CC. We'll load up the real cond below;
+ the assignment here merely avoids a compiler warning. */
+ account_noninline_branch(s, old_cc_op);
+ old_cc_op = CC_OP_STATIC;
+ cond = TCG_COND_NEVER;
+ break;
+ }
+
+ /* Load up the arguments of the comparison. */
+ c->is_64 = true;
+ c->g1 = c->g2 = false;
+ switch (old_cc_op) {
+ case CC_OP_LTGT0_32:
+ c->is_64 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst);
+ c->u.s32.b = tcg_const_i32(0);
+ break;
+ case CC_OP_LTGT_32:
+ case CC_OP_LTUGTU_32:
+ case CC_OP_SUBU_32:
+ c->is_64 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src);
+ c->u.s32.b = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst);
+ break;
+
+ case CC_OP_LTGT0_64:
+ case CC_OP_NZ:
+ case CC_OP_FLOGR:
+ c->u.s64.a = cc_dst;
+ c->u.s64.b = tcg_const_i64(0);
+ c->g1 = true;
+ break;
+ case CC_OP_LTGT_64:
+ case CC_OP_LTUGTU_64:
+ case CC_OP_SUBU_64:
+ c->u.s64.a = cc_src;
+ c->u.s64.b = cc_dst;
+ c->g1 = c->g2 = true;
+ break;
+
+ case CC_OP_TM_32:
+ case CC_OP_TM_64:
+ case CC_OP_ICM:
+ c->u.s64.a = tcg_temp_new_i64();
+ c->u.s64.b = tcg_const_i64(0);
+ tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
+ break;
+
+ case CC_OP_ADDU_32:
+ c->is_64 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr);
+ if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
+ tcg_gen_movi_i32(c->u.s32.b, 0);
+ } else {
+ tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src);
+ }
+ break;
+
+ case CC_OP_ADDU_64:
+ c->u.s64.a = cc_vr;
+ c->g1 = true;
+ if (cond == TCG_COND_EQ || cond == TCG_COND_NE) {
+ c->u.s64.b = tcg_const_i64(0);
+ } else {
+ c->u.s64.b = cc_src;
+ c->g2 = true;
+ }
+ break;
+
+ case CC_OP_STATIC:
+ c->is_64 = false;
+ c->u.s32.a = cc_op;
+ c->g1 = true;
+ switch (mask) {
+ case 0x8 | 0x4 | 0x2: /* cc != 3 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(3);
+ break;
+ case 0x8 | 0x4 | 0x1: /* cc != 2 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(2);
+ break;
+ case 0x8 | 0x2 | 0x1: /* cc != 1 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(1);
+ break;
+ case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
+ cond = TCG_COND_EQ;
+ c->g1 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_const_i32(0);
+ tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
+ break;
+ case 0x8 | 0x4: /* cc < 2 */
+ cond = TCG_COND_LTU;
+ c->u.s32.b = tcg_const_i32(2);
+ break;
+ case 0x8: /* cc == 0 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(0);
+ break;
+ case 0x4 | 0x2 | 0x1: /* cc != 0 */
+ cond = TCG_COND_NE;
+ c->u.s32.b = tcg_const_i32(0);
+ break;
+ case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
+ cond = TCG_COND_NE;
+ c->g1 = false;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_const_i32(0);
+ tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
+ break;
+ case 0x4: /* cc == 1 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(1);
+ break;
+ case 0x2 | 0x1: /* cc > 1 */
+ cond = TCG_COND_GTU;
+ c->u.s32.b = tcg_const_i32(1);
+ break;
+ case 0x2: /* cc == 2 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(2);
+ break;
+ case 0x1: /* cc == 3 */
+ cond = TCG_COND_EQ;
+ c->u.s32.b = tcg_const_i32(3);
+ break;
+ default:
+ /* CC is masked by something else: (8 >> cc) & mask. */
+ cond = TCG_COND_NE;
+ c->g1 = false;
+ c->u.s32.a = tcg_const_i32(8);
+ c->u.s32.b = tcg_const_i32(0);
+ tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op);
+ tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
+ break;
+ }
+ break;
+
+ default:
+ abort();
+ }
+ c->cond = cond;
+}
+
+static void free_compare(DisasCompare *c)
+{
+ if (!c->g1) {
+ if (c->is_64) {
+ tcg_temp_free_i64(c->u.s64.a);
+ } else {
+ tcg_temp_free_i32(c->u.s32.a);
+ }
+ }
+ if (!c->g2) {
+ if (c->is_64) {
+ tcg_temp_free_i64(c->u.s64.b);
+ } else {
+ tcg_temp_free_i32(c->u.s32.b);
+ }
+ }
+}
+
+/* ====================================================================== */
+/* Define the insn format enumeration. */
+#define F0(N) FMT_##N,
+#define F1(N, X1) F0(N)
+#define F2(N, X1, X2) F0(N)
+#define F3(N, X1, X2, X3) F0(N)
+#define F4(N, X1, X2, X3, X4) F0(N)
+#define F5(N, X1, X2, X3, X4, X5) F0(N)
+
+typedef enum {
+#include "insn-format.def"
+} DisasFormat;
+
+#undef F0
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef F5
+
+/* Define a structure to hold the decoded fields. We'll store each inside
+ an array indexed by an enum. In order to conserve memory, we'll arrange
+ for fields that do not exist at the same time to overlap, thus the "C"
+ for compact. For checking purposes there is an "O" for original index
+ as well that will be applied to availability bitmaps. */
+
+enum DisasFieldIndexO {
+ FLD_O_r1,
+ FLD_O_r2,
+ FLD_O_r3,
+ FLD_O_m1,
+ FLD_O_m3,
+ FLD_O_m4,
+ FLD_O_b1,
+ FLD_O_b2,
+ FLD_O_b4,
+ FLD_O_d1,
+ FLD_O_d2,
+ FLD_O_d4,
+ FLD_O_x2,
+ FLD_O_l1,
+ FLD_O_l2,
+ FLD_O_i1,
+ FLD_O_i2,
+ FLD_O_i3,
+ FLD_O_i4,
+ FLD_O_i5
+};
+
+enum DisasFieldIndexC {
+ FLD_C_r1 = 0,
+ FLD_C_m1 = 0,
+ FLD_C_b1 = 0,
+ FLD_C_i1 = 0,
+
+ FLD_C_r2 = 1,
+ FLD_C_b2 = 1,
+ FLD_C_i2 = 1,
+
+ FLD_C_r3 = 2,
+ FLD_C_m3 = 2,
+ FLD_C_i3 = 2,
+
+ FLD_C_m4 = 3,
+ FLD_C_b4 = 3,
+ FLD_C_i4 = 3,
+ FLD_C_l1 = 3,
+
+ FLD_C_i5 = 4,
+ FLD_C_d1 = 4,
+
+ FLD_C_d2 = 5,
+
+ FLD_C_d4 = 6,
+ FLD_C_x2 = 6,
+ FLD_C_l2 = 6,
+
+ NUM_C_FIELD = 7
+};
+
+struct DisasFields {
+ uint64_t raw_insn;
+ unsigned op:8;
+ unsigned op2:8;
+ unsigned presentC:16;
+ unsigned int presentO;
+ int c[NUM_C_FIELD];
+};
+
+/* This is the way fields are to be accessed out of DisasFields. */
+#define have_field(S, F) have_field1((S), FLD_O_##F)
+#define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F)
+
+static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c)
+{
+ return (f->presentO >> c) & 1;
+}
+
+static int get_field1(const DisasFields *f, enum DisasFieldIndexO o,
+ enum DisasFieldIndexC c)
+{
+ assert(have_field1(f, o));
+ return f->c[c];
+}
+
+/* Describe the layout of each field in each format. */
+typedef struct DisasField {
+ unsigned int beg:8;
+ unsigned int size:8;
+ unsigned int type:2;
+ unsigned int indexC:6;
+ enum DisasFieldIndexO indexO:8;
+} DisasField;
+
+typedef struct DisasFormatInfo {
+ DisasField op[NUM_C_FIELD];
+} DisasFormatInfo;
+
+#define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N }
+#define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N }
+#define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { BD, 12, 0, FLD_C_d##N, FLD_O_d##N }
+#define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
+ { 20, 12, 0, FLD_C_d##N, FLD_O_d##N }
+#define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
+#define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \
+ { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \
+ { 20, 20, 2, FLD_C_d##N, FLD_O_d##N }
+#define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N }
+#define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N }
+
+#define F0(N) { { } },
+#define F1(N, X1) { { X1 } },
+#define F2(N, X1, X2) { { X1, X2 } },
+#define F3(N, X1, X2, X3) { { X1, X2, X3 } },
+#define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } },
+#define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } },
+
+static const DisasFormatInfo format_info[] = {
+#include "insn-format.def"
+};
+
+#undef F0
+#undef F1
+#undef F2
+#undef F3
+#undef F4
+#undef F5
+#undef R
+#undef M
+#undef BD
+#undef BXD
+#undef BDL
+#undef BXDL
+#undef I
+#undef L
+
+/* Generally, we'll extract operands into this structures, operate upon
+ them, and store them back. See the "in1", "in2", "prep", "wout" sets
+ of routines below for more details. */
+typedef struct {
+ bool g_out, g_out2, g_in1, g_in2;
+ TCGv_i64 out, out2, in1, in2;
+ TCGv_i64 addr1;
+} DisasOps;
+
+/* Instructions can place constraints on their operands, raising specification
+ exceptions if they are violated. To make this easy to automate, each "in1",
+ "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one
+ of the following, or 0. To make this easy to document, we'll put the
+ SPEC_<name> defines next to <name>. */
+
+#define SPEC_r1_even 1
+#define SPEC_r2_even 2
+#define SPEC_r3_even 4
+#define SPEC_r1_f128 8
+#define SPEC_r2_f128 16
+
+/* Return values from translate_one, indicating the state of the TB. */
+typedef enum {
+ /* Continue the TB. */
+ NO_EXIT,
+ /* We have emitted one or more goto_tb. No fixup required. */
+ EXIT_GOTO_TB,
+ /* We are not using a goto_tb (for whatever reason), but have updated
+ the PC (for whatever reason), so there's no need to do it again on
+ exiting the TB. */
+ EXIT_PC_UPDATED,
+ /* We are exiting the TB, but have neither emitted a goto_tb, nor
+ updated the PC for the next instruction to be executed. */
+ EXIT_PC_STALE,
+ /* We are ending the TB with a noreturn function call, e.g. longjmp.
+ No following code will be executed. */
+ EXIT_NORETURN,
+} ExitStatus;
+
+typedef enum DisasFacility {
+ FAC_Z, /* zarch (default) */
+ FAC_CASS, /* compare and swap and store */
+ FAC_CASS2, /* compare and swap and store 2*/
+ FAC_DFP, /* decimal floating point */
+ FAC_DFPR, /* decimal floating point rounding */
+ FAC_DO, /* distinct operands */
+ FAC_EE, /* execute extensions */
+ FAC_EI, /* extended immediate */
+ FAC_FPE, /* floating point extension */
+ FAC_FPSSH, /* floating point support sign handling */
+ FAC_FPRGR, /* FPR-GR transfer */
+ FAC_GIE, /* general instructions extension */
+ FAC_HFP_MA, /* HFP multiply-and-add/subtract */
+ FAC_HW, /* high-word */
+ FAC_IEEEE_SIM, /* IEEE exception sumilation */
+ FAC_MIE, /* miscellaneous-instruction-extensions */
+ FAC_LAT, /* load-and-trap */
+ FAC_LOC, /* load/store on condition */
+ FAC_LD, /* long displacement */
+ FAC_PC, /* population count */
+ FAC_SCF, /* store clock fast */
+ FAC_SFLE, /* store facility list extended */
+ FAC_ILA, /* interlocked access facility 1 */
+} DisasFacility;
+
+struct DisasInsn {
+ unsigned opc:16;
+ DisasFormat fmt:8;
+ DisasFacility fac:8;
+ unsigned spec:8;
+
+ const char *name;
+
+ void (*help_in1)(DisasContext *, DisasFields *, DisasOps *);
+ void (*help_in2)(DisasContext *, DisasFields *, DisasOps *);
+ void (*help_prep)(DisasContext *, DisasFields *, DisasOps *);
+ void (*help_wout)(DisasContext *, DisasFields *, DisasOps *);
+ void (*help_cout)(DisasContext *, DisasOps *);
+ ExitStatus (*help_op)(DisasContext *, DisasOps *);
+
+ uint64_t data;
+};
+
+/* ====================================================================== */
+/* Miscellaneous helpers, used by several operations. */
+
+static void help_l2_shift(DisasContext *s, DisasFields *f,
+ DisasOps *o, int mask)
+{
+ int b2 = get_field(f, b2);
+ int d2 = get_field(f, d2);
+
+ if (b2 == 0) {
+ o->in2 = tcg_const_i64(d2 & mask);
+ } else {
+ o->in2 = get_address(s, 0, b2, d2);
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ }
+}
+
+static ExitStatus help_goto_direct(DisasContext *s, uint64_t dest)
+{
+ if (dest == s->next_pc) {
+ per_branch(s, true);
+ return NO_EXIT;
+ }
+ if (use_goto_tb(s, dest)) {
+ update_cc_op(s);
+ per_breaking_event(s);
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(psw_addr, dest);
+ tcg_gen_exit_tb((uintptr_t)s->tb);
+ return EXIT_GOTO_TB;
+ } else {
+ tcg_gen_movi_i64(psw_addr, dest);
+ per_branch(s, false);
+ return EXIT_PC_UPDATED;
+ }
+}
+
+static ExitStatus help_branch(DisasContext *s, DisasCompare *c,
+ bool is_imm, int imm, TCGv_i64 cdest)
+{
+ ExitStatus ret;
+ uint64_t dest = s->pc + 2 * imm;
+ TCGLabel *lab;
+
+ /* Take care of the special cases first. */
+ if (c->cond == TCG_COND_NEVER) {
+ ret = NO_EXIT;
+ goto egress;
+ }
+ if (is_imm) {
+ if (dest == s->next_pc) {
+ /* Branch to next. */
+ per_branch(s, true);
+ ret = NO_EXIT;
+ goto egress;
+ }
+ if (c->cond == TCG_COND_ALWAYS) {
+ ret = help_goto_direct(s, dest);
+ goto egress;
+ }
+ } else {
+ if (TCGV_IS_UNUSED_I64(cdest)) {
+ /* E.g. bcr %r0 -> no branch. */
+ ret = NO_EXIT;
+ goto egress;
+ }
+ if (c->cond == TCG_COND_ALWAYS) {
+ tcg_gen_mov_i64(psw_addr, cdest);
+ per_branch(s, false);
+ ret = EXIT_PC_UPDATED;
+ goto egress;
+ }
+ }
+
+ if (use_goto_tb(s, s->next_pc)) {
+ if (is_imm && use_goto_tb(s, dest)) {
+ /* Both exits can use goto_tb. */
+ update_cc_op(s);
+
+ lab = gen_new_label();
+ if (c->is_64) {
+ tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
+ }
+
+ /* Branch not taken. */
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(psw_addr, s->next_pc);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
+
+ /* Branch taken. */
+ gen_set_label(lab);
+ per_breaking_event(s);
+ tcg_gen_goto_tb(1);
+ tcg_gen_movi_i64(psw_addr, dest);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 1);
+
+ ret = EXIT_GOTO_TB;
+ } else {
+ /* Fallthru can use goto_tb, but taken branch cannot. */
+ /* Store taken branch destination before the brcond. This
+ avoids having to allocate a new local temp to hold it.
+ We'll overwrite this in the not taken case anyway. */
+ if (!is_imm) {
+ tcg_gen_mov_i64(psw_addr, cdest);
+ }
+
+ lab = gen_new_label();
+ if (c->is_64) {
+ tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab);
+ }
+
+ /* Branch not taken. */
+ update_cc_op(s);
+ tcg_gen_goto_tb(0);
+ tcg_gen_movi_i64(psw_addr, s->next_pc);
+ tcg_gen_exit_tb((uintptr_t)s->tb + 0);
+
+ gen_set_label(lab);
+ if (is_imm) {
+ tcg_gen_movi_i64(psw_addr, dest);
+ }
+ per_breaking_event(s);
+ ret = EXIT_PC_UPDATED;
+ }
+ } else {
+ /* Fallthru cannot use goto_tb. This by itself is vanishingly rare.
+ Most commonly we're single-stepping or some other condition that
+ disables all use of goto_tb. Just update the PC and exit. */
+
+ TCGv_i64 next = tcg_const_i64(s->next_pc);
+ if (is_imm) {
+ cdest = tcg_const_i64(dest);
+ }
+
+ if (c->is_64) {
+ tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b,
+ cdest, next);
+ per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 z = tcg_const_i64(0);
+ tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
+ tcg_gen_extu_i32_i64(t1, t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
+ per_branch_cond(s, TCG_COND_NE, t1, z);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(z);
+ }
+
+ if (is_imm) {
+ tcg_temp_free_i64(cdest);
+ }
+ tcg_temp_free_i64(next);
+
+ ret = EXIT_PC_UPDATED;
+ }
+
+ egress:
+ free_compare(c);
+ return ret;
+}
+
+/* ====================================================================== */
+/* The operations. These perform the bulk of the work for any insn,
+ usually after the operands have been loaded and output initialized. */
+
+static ExitStatus op_abs(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 z, n;
+ z = tcg_const_i64(0);
+ n = tcg_temp_new_i64();
+ tcg_gen_neg_i64(n, o->in2);
+ tcg_gen_movcond_i64(TCG_COND_LT, o->out, o->in2, z, n, o->in2);
+ tcg_temp_free_i64(n);
+ tcg_temp_free_i64(z);
+ return NO_EXIT;
+}
+
+static ExitStatus op_absf32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull);
+ return NO_EXIT;
+}
+
+static ExitStatus op_absf64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
+ return NO_EXIT;
+}
+
+static ExitStatus op_absf128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull);
+ tcg_gen_mov_i64(o->out2, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_add(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_addc(DisasContext *s, DisasOps *o)
+{
+ DisasCompare cmp;
+ TCGv_i64 carry;
+
+ tcg_gen_add_i64(o->out, o->in1, o->in2);
+
+ /* The carry flag is the msb of CC, therefore the branch mask that would
+ create that comparison is 3. Feeding the generated comparison to
+ setcond produces the carry flag that we desire. */
+ disas_jcc(s, &cmp, 3);
+ carry = tcg_temp_new_i64();
+ if (cmp.is_64) {
+ tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+ tcg_gen_extu_i32_i64(carry, t);
+ tcg_temp_free_i32(t);
+ }
+ free_compare(&cmp);
+
+ tcg_gen_add_i64(o->out, o->out, carry);
+ tcg_temp_free_i64(carry);
+ return NO_EXIT;
+}
+
+static ExitStatus op_aeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_aeb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_adb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_adb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_axb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_and(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_andi(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ uint64_t mask = ((1ull << size) - 1) << shift;
+
+ assert(!o->g_in2);
+ tcg_gen_shli_i64(o->in2, o->in2, shift);
+ tcg_gen_ori_i64(o->in2, o->in2, ~mask);
+ tcg_gen_and_i64(o->out, o->in1, o->in2);
+
+ /* Produce the CC from only the bits manipulated. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return NO_EXIT;
+}
+
+static ExitStatus op_bas(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
+ if (!TCGV_IS_UNUSED_I64(o->in2)) {
+ tcg_gen_mov_i64(psw_addr, o->in2);
+ per_branch(s, false);
+ return EXIT_PC_UPDATED;
+ } else {
+ return NO_EXIT;
+ }
+}
+
+static ExitStatus op_basi(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_movi_i64(o->out, pc_to_link_info(s, s->next_pc));
+ return help_goto_direct(s, s->pc + 2 * get_field(s->fields, i2));
+}
+
+static ExitStatus op_bc(DisasContext *s, DisasOps *o)
+{
+ int m1 = get_field(s->fields, m1);
+ bool is_imm = have_field(s->fields, i2);
+ int imm = is_imm ? get_field(s->fields, i2) : 0;
+ DisasCompare c;
+
+ disas_jcc(s, &c, m1);
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bct32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ bool is_imm = have_field(s->fields, i2);
+ int imm = is_imm ? get_field(s->fields, i2) : 0;
+ DisasCompare c;
+ TCGv_i64 t;
+
+ c.cond = TCG_COND_NE;
+ c.is_64 = false;
+ c.g1 = false;
+ c.g2 = false;
+
+ t = tcg_temp_new_i64();
+ tcg_gen_subi_i64(t, regs[r1], 1);
+ store_reg32_i64(r1, t);
+ c.u.s32.a = tcg_temp_new_i32();
+ c.u.s32.b = tcg_const_i32(0);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_temp_free_i64(t);
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bcth(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int imm = get_field(s->fields, i2);
+ DisasCompare c;
+ TCGv_i64 t;
+
+ c.cond = TCG_COND_NE;
+ c.is_64 = false;
+ c.g1 = false;
+ c.g2 = false;
+
+ t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, regs[r1], 32);
+ tcg_gen_subi_i64(t, t, 1);
+ store_reg32h_i64(r1, t);
+ c.u.s32.a = tcg_temp_new_i32();
+ c.u.s32.b = tcg_const_i32(0);
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_temp_free_i64(t);
+
+ return help_branch(s, &c, 1, imm, o->in2);
+}
+
+static ExitStatus op_bct64(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ bool is_imm = have_field(s->fields, i2);
+ int imm = is_imm ? get_field(s->fields, i2) : 0;
+ DisasCompare c;
+
+ c.cond = TCG_COND_NE;
+ c.is_64 = true;
+ c.g1 = true;
+ c.g2 = false;
+
+ tcg_gen_subi_i64(regs[r1], regs[r1], 1);
+ c.u.s64.a = regs[r1];
+ c.u.s64.b = tcg_const_i64(0);
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bx32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ bool is_imm = have_field(s->fields, i2);
+ int imm = is_imm ? get_field(s->fields, i2) : 0;
+ DisasCompare c;
+ TCGv_i64 t;
+
+ c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
+ c.is_64 = false;
+ c.g1 = false;
+ c.g2 = false;
+
+ t = tcg_temp_new_i64();
+ tcg_gen_add_i64(t, regs[r1], regs[r3]);
+ c.u.s32.a = tcg_temp_new_i32();
+ c.u.s32.b = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(c.u.s32.a, t);
+ tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
+ store_reg32_i64(r1, t);
+ tcg_temp_free_i64(t);
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_bx64(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ bool is_imm = have_field(s->fields, i2);
+ int imm = is_imm ? get_field(s->fields, i2) : 0;
+ DisasCompare c;
+
+ c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
+ c.is_64 = true;
+
+ if (r1 == (r3 | 1)) {
+ c.u.s64.b = load_reg(r3 | 1);
+ c.g2 = false;
+ } else {
+ c.u.s64.b = regs[r3 | 1];
+ c.g2 = true;
+ }
+
+ tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
+ c.u.s64.a = regs[r1];
+ c.g1 = true;
+
+ return help_branch(s, &c, is_imm, imm, o->in2);
+}
+
+static ExitStatus op_cj(DisasContext *s, DisasOps *o)
+{
+ int imm, m3 = get_field(s->fields, m3);
+ bool is_imm;
+ DisasCompare c;
+
+ c.cond = ltgt_cond[m3];
+ if (s->insn->data) {
+ c.cond = tcg_unsigned_cond(c.cond);
+ }
+ c.is_64 = c.g1 = c.g2 = true;
+ c.u.s64.a = o->in1;
+ c.u.s64.b = o->in2;
+
+ is_imm = have_field(s->fields, i4);
+ if (is_imm) {
+ imm = get_field(s->fields, i4);
+ } else {
+ imm = 0;
+ o->out = get_address(s, 0, get_field(s->fields, b4),
+ get_field(s->fields, d4));
+ }
+
+ return help_branch(s, &c, is_imm, imm, o->out);
+}
+
+static ExitStatus op_ceb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cfeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cfeb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f32(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cfdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cfdb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f64(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cfxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f128(s, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cgeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cgeb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f32(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cgdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cgdb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f64(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cgxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f128(s, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clfeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_clfeb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f32(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clfdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_clfdb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f64(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clfxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f128(s, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clgeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_clgeb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f32(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clgdb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_clgdb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f64(s, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clgxb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ gen_set_cc_nz_f128(s, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cegb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cegb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cdgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cdgb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cxgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cxgb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_celgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_celgb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cdlgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cdlgb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cxlgb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_cxlgb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cksm(DisasContext *s, DisasOps *o)
+{
+ int r2 = get_field(s->fields, r2);
+ TCGv_i64 len = tcg_temp_new_i64();
+
+ potential_page_fault(s);
+ gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]);
+ set_cc_static(s);
+ return_low128(o->out);
+
+ tcg_gen_add_i64(regs[r2], regs[r2], len);
+ tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
+ tcg_temp_free_i64(len);
+
+ return NO_EXIT;
+}
+
+static ExitStatus op_clc(DisasContext *s, DisasOps *o)
+{
+ int l = get_field(s->fields, l1);
+ TCGv_i32 vl;
+
+ switch (l + 1) {
+ case 1:
+ tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s));
+ break;
+ case 2:
+ tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s));
+ break;
+ case 4:
+ tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s));
+ break;
+ case 8:
+ tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s));
+ tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s));
+ break;
+ default:
+ potential_page_fault(s);
+ vl = tcg_const_i32(l);
+ gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2);
+ tcg_temp_free_i32(vl);
+ set_cc_static(s);
+ return NO_EXIT;
+ }
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clcle(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ potential_page_fault(s);
+ gen_helper_clcle(cc_op, cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clm(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, o->in1);
+ potential_page_fault(s);
+ gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
+ set_cc_static(s);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_clst(DisasContext *s, DisasOps *o)
+{
+ potential_page_fault(s);
+ gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+ set_cc_static(s);
+ return_low128(o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cps(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t = tcg_temp_new_i64();
+ tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
+ tcg_gen_or_i64(o->out, o->out, t);
+ tcg_temp_free_i64(t);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cs(DisasContext *s, DisasOps *o)
+{
+ /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
+ int d2 = get_field(s->fields, d2);
+ int b2 = get_field(s->fields, b2);
+ int is_64 = s->insn->data;
+ TCGv_i64 addr, mem, cc, z;
+
+ /* Note that in1 = R3 (new value) and
+ in2 = (zero-extended) R1 (expected value). */
+
+ /* Load the memory into the (temporary) output. While the PoO only talks
+ about moving the memory to R1 on inequality, if we include equality it
+ means that R1 is equal to the memory in all conditions. */
+ addr = get_address(s, 0, b2, d2);
+ if (is_64) {
+ tcg_gen_qemu_ld64(o->out, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_ld32u(o->out, addr, get_mem_index(s));
+ }
+
+ /* Are the memory and expected values (un)equal? Note that this setcond
+ produces the output CC value, thus the NE sense of the test. */
+ cc = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
+
+ /* If the memory and expected values are equal (CC==0), copy R3 to MEM.
+ Recall that we are allowed to unconditionally issue the store (and
+ thus any possible write trap), so (re-)store the original contents
+ of MEM in case of inequality. */
+ z = tcg_const_i64(0);
+ mem = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, mem, cc, z, o->in1, o->out);
+ if (is_64) {
+ tcg_gen_qemu_st64(mem, addr, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_st32(mem, addr, get_mem_index(s));
+ }
+ tcg_temp_free_i64(z);
+ tcg_temp_free_i64(mem);
+ tcg_temp_free_i64(addr);
+
+ /* Store CC back to cc_op. Wait until after the store so that any
+ exception gets the old cc_op value. */
+ tcg_gen_extrl_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(cc);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_cdsg(DisasContext *s, DisasOps *o)
+{
+ /* FIXME: needs an atomic solution for CONFIG_USER_ONLY. */
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ int d2 = get_field(s->fields, d2);
+ int b2 = get_field(s->fields, b2);
+ TCGv_i64 addrh, addrl, memh, meml, outh, outl, cc, z;
+
+ /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */
+
+ addrh = get_address(s, 0, b2, d2);
+ addrl = get_address(s, 0, b2, d2 + 8);
+ outh = tcg_temp_new_i64();
+ outl = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld64(outh, addrh, get_mem_index(s));
+ tcg_gen_qemu_ld64(outl, addrl, get_mem_index(s));
+
+ /* Fold the double-word compare with arithmetic. */
+ cc = tcg_temp_new_i64();
+ z = tcg_temp_new_i64();
+ tcg_gen_xor_i64(cc, outh, regs[r1]);
+ tcg_gen_xor_i64(z, outl, regs[r1 + 1]);
+ tcg_gen_or_i64(cc, cc, z);
+ tcg_gen_movi_i64(z, 0);
+ tcg_gen_setcond_i64(TCG_COND_NE, cc, cc, z);
+
+ memh = tcg_temp_new_i64();
+ meml = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, memh, cc, z, regs[r3], outh);
+ tcg_gen_movcond_i64(TCG_COND_EQ, meml, cc, z, regs[r3 + 1], outl);
+ tcg_temp_free_i64(z);
+
+ tcg_gen_qemu_st64(memh, addrh, get_mem_index(s));
+ tcg_gen_qemu_st64(meml, addrl, get_mem_index(s));
+ tcg_temp_free_i64(memh);
+ tcg_temp_free_i64(meml);
+ tcg_temp_free_i64(addrh);
+ tcg_temp_free_i64(addrl);
+
+ /* Save back state now that we've passed all exceptions. */
+ tcg_gen_mov_i64(regs[r1], outh);
+ tcg_gen_mov_i64(regs[r1 + 1], outl);
+ tcg_gen_extrl_i64_i32(cc_op, cc);
+ tcg_temp_free_i64(outh);
+ tcg_temp_free_i64(outl);
+ tcg_temp_free_i64(cc);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_csp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ check_privileged(s);
+ gen_helper_csp(cc_op, cpu_env, r1, o->in2);
+ tcg_temp_free_i32(r1);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_cvd(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t2, o->in1);
+ gen_helper_cvd(t1, t2);
+ tcg_temp_free_i32(t2);
+ tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
+ tcg_temp_free_i64(t1);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ct(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s->fields, m3);
+ TCGLabel *lab = gen_new_label();
+ TCGCond c;
+
+ c = tcg_invert_cond(ltgt_cond[m3]);
+ if (s->insn->data) {
+ c = tcg_unsigned_cond(c);
+ }
+ tcg_gen_brcond_i64(c, o->in1, o->in2, lab);
+
+ /* Trap. */
+ gen_trap(s);
+
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_diag(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2));
+
+ check_privileged(s);
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+
+ gen_helper_diag(cpu_env, r1, r3, func_code);
+
+ tcg_temp_free_i32(func_code);
+ tcg_temp_free_i32(r3);
+ tcg_temp_free_i32(r1);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_divs32(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2);
+ return_low128(o->out);
+ return NO_EXIT;
+}
+
+static ExitStatus op_divu32(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2);
+ return_low128(o->out);
+ return NO_EXIT;
+}
+
+static ExitStatus op_divs64(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2);
+ return_low128(o->out);
+ return NO_EXIT;
+}
+
+static ExitStatus op_divu64(DisasContext *s, DisasOps *o)
+{
+ gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2);
+ return_low128(o->out);
+ return NO_EXIT;
+}
+
+static ExitStatus op_deb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_deb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ddb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ddb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_dxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ear(DisasContext *s, DisasOps *o)
+{
+ int r2 = get_field(s->fields, r2);
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2]));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ecag(DisasContext *s, DisasOps *o)
+{
+ /* No cache information provided. */
+ tcg_gen_movi_i64(o->out, -1);
+ return NO_EXIT;
+}
+
+static ExitStatus op_efpc(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc));
+ return NO_EXIT;
+}
+
+static ExitStatus op_epsw(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r2 = get_field(s->fields, r2);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ /* Note the "subsequently" in the PoO, which implies a defined result
+ if r1 == r2. Thus we cannot defer these writes to an output hook. */
+ tcg_gen_shri_i64(t, psw_mask, 32);
+ store_reg32_i64(r1, t);
+ if (r2 != 0) {
+ store_reg32_i64(r2, psw_mask);
+ }
+
+ tcg_temp_free_i64(t);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ex(DisasContext *s, DisasOps *o)
+{
+ /* ??? Perhaps a better way to implement EXECUTE is to set a bit in
+ tb->flags, (ab)use the tb->cs_base field as the address of
+ the template in memory, and grab 8 bits of tb->flags/cflags for
+ the contents of the register. We would then recognize all this
+ in gen_intermediate_code_internal, generating code for exactly
+ one instruction. This new TB then gets executed normally.
+
+ On the other hand, this seems to be mostly used for modifying
+ MVC inside of memcpy, which needs a helper call anyway. So
+ perhaps this doesn't bear thinking about any further. */
+
+ TCGv_i64 tmp;
+
+ update_psw_addr(s);
+ gen_op_calc_cc(s);
+
+ tmp = tcg_const_i64(s->next_pc);
+ gen_helper_ex(cc_op, cpu_env, cc_op, o->in1, o->in2, tmp);
+ tcg_temp_free_i64(tmp);
+
+ return NO_EXIT;
+}
+
+static ExitStatus op_fieb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_fieb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_fidb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_fidb(o->out, cpu_env, o->in2, m3);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_fixb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3));
+ gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m3);
+ return_low128(o->out2);
+ tcg_temp_free_i32(m3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_flogr(DisasContext *s, DisasOps *o)
+{
+ /* We'll use the original input for cc computation, since we get to
+ compare that against 0, which ought to be better than comparing
+ the real output against 64. It also lets cc_dst be a convenient
+ temporary during our computation. */
+ gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2);
+
+ /* R1 = IN ? CLZ(IN) : 64. */
+ gen_helper_clz(o->out, o->in2);
+
+ /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this
+ value by 64, which is undefined. But since the shift is 64 iff the
+ input is zero, we still get the correct result after and'ing. */
+ tcg_gen_movi_i64(o->out2, 0x8000000000000000ull);
+ tcg_gen_shr_i64(o->out2, o->out2, o->out);
+ tcg_gen_andc_i64(o->out2, cc_dst, o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_icm(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s->fields, m3);
+ int pos, len, base = s->insn->data;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+ uint64_t ccm;
+
+ switch (m3) {
+ case 0xf:
+ /* Effectively a 32-bit load. */
+ tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s));
+ len = 32;
+ goto one_insert;
+
+ case 0xc:
+ case 0x6:
+ case 0x3:
+ /* Effectively a 16-bit load. */
+ tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s));
+ len = 16;
+ goto one_insert;
+
+ case 0x8:
+ case 0x4:
+ case 0x2:
+ case 0x1:
+ /* Effectively an 8-bit load. */
+ tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
+ len = 8;
+ goto one_insert;
+
+ one_insert:
+ pos = base + ctz32(m3) * 8;
+ tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len);
+ ccm = ((1ull << len) - 1) << pos;
+ break;
+
+ default:
+ /* This is going to be a sequence of loads and inserts. */
+ pos = base + 32 - 8;
+ ccm = 0;
+ while (m3) {
+ if (m3 & 0x8) {
+ tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 1);
+ tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8);
+ ccm |= 0xff << pos;
+ }
+ m3 = (m3 << 1) & 0xf;
+ pos -= 8;
+ }
+ break;
+ }
+
+ tcg_gen_movi_i64(tmp, ccm);
+ gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
+ tcg_temp_free_i64(tmp);
+ return NO_EXIT;
+}
+
+static ExitStatus op_insi(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ipm(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1;
+
+ gen_op_calc_cc(s);
+ tcg_gen_andi_i64(o->out, o->out, ~0xff000000ull);
+
+ t1 = tcg_temp_new_i64();
+ tcg_gen_shli_i64(t1, psw_mask, 20);
+ tcg_gen_shri_i64(t1, t1, 36);
+ tcg_gen_or_i64(o->out, o->out, t1);
+
+ tcg_gen_extu_i32_i64(t1, cc_op);
+ tcg_gen_shli_i64(t1, t1, 28);
+ tcg_gen_or_i64(o->out, o->out, t1);
+ tcg_temp_free_i64(t1);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_ipte(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_ipte(cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_iske(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_iske(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_ldeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ldeb(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ledb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ledb(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ldxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lexb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lexb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lxdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lxdb(o->out, cpu_env, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lxeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_lxeb(o->out, cpu_env, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_llgt(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld8s(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld8u(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld16s(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld16u(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld32s(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld32u(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_ld64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_lat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ store_reg32_i64(get_field(s->fields, r1), o->in2);
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lgat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s));
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lfhat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ store_reg32h_i64(get_field(s->fields, r1), o->in2);
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+static ExitStatus op_llgfat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s));
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+static ExitStatus op_llgtat(DisasContext *s, DisasOps *o)
+{
+ TCGLabel *lab = gen_new_label();
+ tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff);
+ /* The value is stored even in case of trap. */
+ tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab);
+ gen_trap(s);
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+static ExitStatus op_loc(DisasContext *s, DisasOps *o)
+{
+ DisasCompare c;
+
+ disas_jcc(s, &c, get_field(s->fields, m3));
+
+ if (c.is_64) {
+ tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
+ o->in2, o->in1);
+ free_compare(&c);
+ } else {
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ TCGv_i64 t, z;
+
+ tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
+ free_compare(&c);
+
+ t = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t, t32);
+ tcg_temp_free_i32(t32);
+
+ z = tcg_const_i64(0);
+ tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(z);
+ }
+
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_lctl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_lctl(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lctlg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_lctlg(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return NO_EXIT;
+}
+static ExitStatus op_lra(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_lra(o->out, cpu_env, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lpsw(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1, t2;
+
+ check_privileged(s);
+ per_breaking_event(s);
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 4);
+ tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s));
+ /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
+ tcg_gen_shli_i64(t1, t1, 32);
+ gen_helper_load_psw(cpu_env, t1, t2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return EXIT_NORETURN;
+}
+
+static ExitStatus op_lpswe(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1, t2;
+
+ check_privileged(s);
+ per_breaking_event(s);
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 8);
+ tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
+ gen_helper_load_psw(cpu_env, t1, t2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ return EXIT_NORETURN;
+}
+#endif
+
+static ExitStatus op_lam(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ potential_page_fault(s);
+ gen_helper_lam(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lm32(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i64 t1, t2;
+
+ /* Only one register to read. */
+ t1 = tcg_temp_new_i64();
+ if (unlikely(r1 == r3)) {
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32_i64(r1, t1);
+ tcg_temp_free(t1);
+ return NO_EXIT;
+ }
+
+ /* First load the values of the first and last registers to trigger
+ possible page faults. */
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
+ tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
+ store_reg32_i64(r1, t1);
+ store_reg32_i64(r3, t2);
+
+ /* Only two registers to read. */
+ if (((r1 + 1) & 15) == r3) {
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ return NO_EXIT;
+ }
+
+ /* Then load the remaining registers. Page fault can't occur. */
+ r3 = (r3 - 1) & 15;
+ tcg_gen_movi_i64(t2, 4);
+ while (r1 != r3) {
+ r1 = (r1 + 1) & 15;
+ tcg_gen_add_i64(o->in2, o->in2, t2);
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32_i64(r1, t1);
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+
+ return NO_EXIT;
+}
+
+static ExitStatus op_lmh(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i64 t1, t2;
+
+ /* Only one register to read. */
+ t1 = tcg_temp_new_i64();
+ if (unlikely(r1 == r3)) {
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32h_i64(r1, t1);
+ tcg_temp_free(t1);
+ return NO_EXIT;
+ }
+
+ /* First load the values of the first and last registers to trigger
+ possible page faults. */
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15));
+ tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s));
+ store_reg32h_i64(r1, t1);
+ store_reg32h_i64(r3, t2);
+
+ /* Only two registers to read. */
+ if (((r1 + 1) & 15) == r3) {
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ return NO_EXIT;
+ }
+
+ /* Then load the remaining registers. Page fault can't occur. */
+ r3 = (r3 - 1) & 15;
+ tcg_gen_movi_i64(t2, 4);
+ while (r1 != r3) {
+ r1 = (r1 + 1) & 15;
+ tcg_gen_add_i64(o->in2, o->in2, t2);
+ tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
+ store_reg32h_i64(r1, t1);
+ }
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+
+ return NO_EXIT;
+}
+
+static ExitStatus op_lm64(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i64 t1, t2;
+
+ /* Only one register to read. */
+ if (unlikely(r1 == r3)) {
+ tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
+ return NO_EXIT;
+ }
+
+ /* First load the values of the first and last registers to trigger
+ possible page faults. */
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
+ tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
+ tcg_gen_mov_i64(regs[r1], t1);
+ tcg_temp_free(t2);
+
+ /* Only two registers to read. */
+ if (((r1 + 1) & 15) == r3) {
+ tcg_temp_free(t1);
+ return NO_EXIT;
+ }
+
+ /* Then load the remaining registers. Page fault can't occur. */
+ r3 = (r3 - 1) & 15;
+ tcg_gen_movi_i64(t1, 8);
+ while (r1 != r3) {
+ r1 = (r1 + 1) & 15;
+ tcg_gen_add_i64(o->in2, o->in2, t1);
+ tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
+ }
+ tcg_temp_free(t1);
+
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_lura(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_lura(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_lurag(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_lurag(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_mov2(DisasContext *s, DisasOps *o)
+{
+ o->out = o->in2;
+ o->g_out = o->g_in2;
+ TCGV_UNUSED_I64(o->in2);
+ o->g_in2 = false;
+ return NO_EXIT;
+}
+
+static ExitStatus op_mov2e(DisasContext *s, DisasOps *o)
+{
+ int b2 = get_field(s->fields, b2);
+ TCGv ar1 = tcg_temp_new_i64();
+
+ o->out = o->in2;
+ o->g_out = o->g_in2;
+ TCGV_UNUSED_I64(o->in2);
+ o->g_in2 = false;
+
+ switch (s->tb->flags & FLAG_MASK_ASC) {
+ case PSW_ASC_PRIMARY >> 32:
+ tcg_gen_movi_i64(ar1, 0);
+ break;
+ case PSW_ASC_ACCREG >> 32:
+ tcg_gen_movi_i64(ar1, 1);
+ break;
+ case PSW_ASC_SECONDARY >> 32:
+ if (b2) {
+ tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2]));
+ } else {
+ tcg_gen_movi_i64(ar1, 0);
+ }
+ break;
+ case PSW_ASC_HOME >> 32:
+ tcg_gen_movi_i64(ar1, 2);
+ break;
+ }
+
+ tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
+ tcg_temp_free_i64(ar1);
+
+ return NO_EXIT;
+}
+
+static ExitStatus op_movx(DisasContext *s, DisasOps *o)
+{
+ o->out = o->in1;
+ o->out2 = o->in2;
+ o->g_out = o->g_in1;
+ o->g_out2 = o->g_in2;
+ TCGV_UNUSED_I64(o->in1);
+ TCGV_UNUSED_I64(o->in2);
+ o->g_in1 = o->g_in2 = false;
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ potential_page_fault(s);
+ gen_helper_mvc(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvcl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2));
+ potential_page_fault(s);
+ gen_helper_mvcl(cc_op, cpu_env, r1, r2);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvcle(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ potential_page_fault(s);
+ gen_helper_mvcle(cc_op, cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_mvcp(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, l1);
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvcs(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, l1);
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_mvpg(DisasContext *s, DisasOps *o)
+{
+ potential_page_fault(s);
+ gen_helper_mvpg(cpu_env, regs[0], o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mvst(DisasContext *s, DisasOps *o)
+{
+ potential_page_fault(s);
+ gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+ set_cc_static(s);
+ return_low128(o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mul(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_mul_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mul128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_meeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_meeb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mdeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mdb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mxdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_maeb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
+ gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
+ tcg_temp_free_i64(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_madb(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s->fields, r3);
+ gen_helper_madb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
+ return NO_EXIT;
+}
+
+static ExitStatus op_mseb(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3));
+ gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
+ tcg_temp_free_i64(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_msdb(DisasContext *s, DisasOps *o)
+{
+ int r3 = get_field(s->fields, r3);
+ gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, fregs[r3]);
+ return NO_EXIT;
+}
+
+static ExitStatus op_nabs(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 z, n;
+ z = tcg_const_i64(0);
+ n = tcg_temp_new_i64();
+ tcg_gen_neg_i64(n, o->in2);
+ tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
+ tcg_temp_free_i64(n);
+ tcg_temp_free_i64(z);
+ return NO_EXIT;
+}
+
+static ExitStatus op_nabsf32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull);
+ return NO_EXIT;
+}
+
+static ExitStatus op_nabsf64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull);
+ return NO_EXIT;
+}
+
+static ExitStatus op_nabsf128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull);
+ tcg_gen_mov_i64(o->out2, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_nc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ potential_page_fault(s);
+ gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_neg(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_neg_i64(o->out, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_negf32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull);
+ return NO_EXIT;
+}
+
+static ExitStatus op_negf64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull);
+ return NO_EXIT;
+}
+
+static ExitStatus op_negf128(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull);
+ tcg_gen_mov_i64(o->out2, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_oc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ potential_page_fault(s);
+ gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_or(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ori(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ uint64_t mask = ((1ull << size) - 1) << shift;
+
+ assert(!o->g_in2);
+ tcg_gen_shli_i64(o->in2, o->in2, shift);
+ tcg_gen_or_i64(o->out, o->in1, o->in2);
+
+ /* Produce the CC from only the bits manipulated. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return NO_EXIT;
+}
+
+static ExitStatus op_popcnt(DisasContext *s, DisasOps *o)
+{
+ gen_helper_popcnt(o->out, o->in2);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_ptlb(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_ptlb(cpu_env);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_risbg(DisasContext *s, DisasOps *o)
+{
+ int i3 = get_field(s->fields, i3);
+ int i4 = get_field(s->fields, i4);
+ int i5 = get_field(s->fields, i5);
+ int do_zero = i4 & 0x80;
+ uint64_t mask, imask, pmask;
+ int pos, len, rot;
+
+ /* Adjust the arguments for the specific insn. */
+ switch (s->fields->op2) {
+ case 0x55: /* risbg */
+ i3 &= 63;
+ i4 &= 63;
+ pmask = ~0;
+ break;
+ case 0x5d: /* risbhg */
+ i3 &= 31;
+ i4 &= 31;
+ pmask = 0xffffffff00000000ull;
+ break;
+ case 0x51: /* risblg */
+ i3 &= 31;
+ i4 &= 31;
+ pmask = 0x00000000ffffffffull;
+ break;
+ default:
+ abort();
+ }
+
+ /* MASK is the set of bits to be inserted from R2.
+ Take care for I3/I4 wraparound. */
+ mask = pmask >> i3;
+ if (i3 <= i4) {
+ mask ^= pmask >> i4 >> 1;
+ } else {
+ mask |= ~(pmask >> i4 >> 1);
+ }
+ mask &= pmask;
+
+ /* IMASK is the set of bits to be kept from R1. In the case of the high/low
+ insns, we need to keep the other half of the register. */
+ imask = ~mask | ~pmask;
+ if (do_zero) {
+ if (s->fields->op2 == 0x55) {
+ imask = 0;
+ } else {
+ imask = ~pmask;
+ }
+ }
+
+ /* In some cases we can implement this with deposit, which can be more
+ efficient on some hosts. */
+ if (~mask == imask && i3 <= i4) {
+ if (s->fields->op2 == 0x5d) {
+ i3 += 32, i4 += 32;
+ }
+ /* Note that we rotate the bits to be inserted to the lsb, not to
+ the position as described in the PoO. */
+ len = i4 - i3 + 1;
+ pos = 63 - i4;
+ rot = (i5 - pos) & 63;
+ } else {
+ pos = len = -1;
+ rot = i5 & 63;
+ }
+
+ /* Rotate the input as necessary. */
+ tcg_gen_rotli_i64(o->in2, o->in2, rot);
+
+ /* Insert the selected bits into the output. */
+ if (pos >= 0) {
+ tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len);
+ } else if (imask == 0) {
+ tcg_gen_andi_i64(o->out, o->in2, mask);
+ } else {
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ tcg_gen_andi_i64(o->out, o->out, imask);
+ tcg_gen_or_i64(o->out, o->out, o->in2);
+ }
+ return NO_EXIT;
+}
+
+static ExitStatus op_rosbg(DisasContext *s, DisasOps *o)
+{
+ int i3 = get_field(s->fields, i3);
+ int i4 = get_field(s->fields, i4);
+ int i5 = get_field(s->fields, i5);
+ uint64_t mask;
+
+ /* If this is a test-only form, arrange to discard the result. */
+ if (i3 & 0x80) {
+ o->out = tcg_temp_new_i64();
+ o->g_out = false;
+ }
+
+ i3 &= 63;
+ i4 &= 63;
+ i5 &= 63;
+
+ /* MASK is the set of bits to be operated on from R2.
+ Take care for I3/I4 wraparound. */
+ mask = ~0ull >> i3;
+ if (i3 <= i4) {
+ mask ^= ~0ull >> i4 >> 1;
+ } else {
+ mask |= ~(~0ull >> i4 >> 1);
+ }
+
+ /* Rotate the input as necessary. */
+ tcg_gen_rotli_i64(o->in2, o->in2, i5);
+
+ /* Operate. */
+ switch (s->fields->op2) {
+ case 0x55: /* AND */
+ tcg_gen_ori_i64(o->in2, o->in2, ~mask);
+ tcg_gen_and_i64(o->out, o->out, o->in2);
+ break;
+ case 0x56: /* OR */
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ tcg_gen_or_i64(o->out, o->out, o->in2);
+ break;
+ case 0x57: /* XOR */
+ tcg_gen_andi_i64(o->in2, o->in2, mask);
+ tcg_gen_xor_i64(o->out, o->out, o->in2);
+ break;
+ default:
+ abort();
+ }
+
+ /* Set the CC. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rev16(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_bswap16_i64(o->out, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rev32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_bswap32_i64(o->out, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rev64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_bswap64_i64(o->out, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rll32(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t1 = tcg_temp_new_i32();
+ TCGv_i32 t2 = tcg_temp_new_i32();
+ TCGv_i32 to = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(t1, o->in1);
+ tcg_gen_extrl_i64_i32(t2, o->in2);
+ tcg_gen_rotl_i32(to, t1, t2);
+ tcg_gen_extu_i32_i64(o->out, to);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t2);
+ tcg_temp_free_i32(to);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rll64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_rotl_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_rrbe(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_rrbe(cc_op, cpu_env, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sacf(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_sacf(cpu_env, o->in2);
+ /* Addressing mode has changed, so end the block. */
+ return EXIT_PC_STALE;
+}
+#endif
+
+static ExitStatus op_sam(DisasContext *s, DisasOps *o)
+{
+ int sam = s->insn->data;
+ TCGv_i64 tsam;
+ uint64_t mask;
+
+ switch (sam) {
+ case 0:
+ mask = 0xffffff;
+ break;
+ case 1:
+ mask = 0x7fffffff;
+ break;
+ default:
+ mask = -1;
+ break;
+ }
+
+ /* Bizarre but true, we check the address of the current insn for the
+ specification exception, not the next to be executed. Thus the PoO
+ documents that Bad Things Happen two bytes before the end. */
+ if (s->pc & ~mask) {
+ gen_program_exception(s, PGM_SPECIFICATION);
+ return EXIT_NORETURN;
+ }
+ s->next_pc &= mask;
+
+ tsam = tcg_const_i64(sam);
+ tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2);
+ tcg_temp_free_i64(tsam);
+
+ /* Always exit the TB, since we (may have) changed execution mode. */
+ return EXIT_PC_STALE;
+}
+
+static ExitStatus op_sar(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1]));
+ return NO_EXIT;
+}
+
+static ExitStatus op_seb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_seb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sdb(o->out, cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sqeb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sqeb(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sqdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sqdb(o->out, cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sqxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2);
+ return_low128(o->out2);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_servc(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_servc(cc_op, cpu_env, o->in2, o->in1);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sigp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_sigp(cc_op, cpu_env, o->in2, r1, o->in1);
+ tcg_temp_free_i32(r1);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_soc(DisasContext *s, DisasOps *o)
+{
+ DisasCompare c;
+ TCGv_i64 a;
+ TCGLabel *lab;
+ int r1;
+
+ disas_jcc(s, &c, get_field(s->fields, m3));
+
+ /* We want to store when the condition is fulfilled, so branch
+ out when it's not */
+ c.cond = tcg_invert_cond(c.cond);
+
+ lab = gen_new_label();
+ if (c.is_64) {
+ tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab);
+ } else {
+ tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
+ }
+ free_compare(&c);
+
+ r1 = get_field(s->fields, r1);
+ a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2));
+ if (s->insn->data) {
+ tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s));
+ }
+ tcg_temp_free_i64(a);
+
+ gen_set_label(lab);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sla(DisasContext *s, DisasOps *o)
+{
+ uint64_t sign = 1ull << s->insn->data;
+ enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64;
+ gen_op_update2_cc_i64(s, cco, o->in1, o->in2);
+ tcg_gen_shl_i64(o->out, o->in1, o->in2);
+ /* The arithmetic left shift is curious in that it does not affect
+ the sign bit. Copy that over from the source unchanged. */
+ tcg_gen_andi_i64(o->out, o->out, ~sign);
+ tcg_gen_andi_i64(o->in1, o->in1, sign);
+ tcg_gen_or_i64(o->out, o->out, o->in1);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sll(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shl_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sra(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_sar_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_srl(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_shr_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sfpc(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sfpc(cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sfas(DisasContext *s, DisasOps *o)
+{
+ gen_helper_sfas(cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_srnm(DisasContext *s, DisasOps *o)
+{
+ int b2 = get_field(s->fields, b2);
+ int d2 = get_field(s->fields, d2);
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ int mask, pos, len;
+
+ switch (s->fields->op2) {
+ case 0x99: /* SRNM */
+ pos = 0, len = 2;
+ break;
+ case 0xb8: /* SRNMB */
+ pos = 0, len = 3;
+ break;
+ case 0xb9: /* SRNMT */
+ pos = 4, len = 3;
+ break;
+ default:
+ tcg_abort();
+ }
+ mask = (1 << len) - 1;
+
+ /* Insert the value into the appropriate field of the FPC. */
+ if (b2 == 0) {
+ tcg_gen_movi_i64(t1, d2 & mask);
+ } else {
+ tcg_gen_addi_i64(t1, regs[b2], d2);
+ tcg_gen_andi_i64(t1, t1, mask);
+ }
+ tcg_gen_ld32u_i64(t2, cpu_env, offsetof(CPUS390XState, fpc));
+ tcg_gen_deposit_i64(t2, t2, t1, pos, len);
+ tcg_temp_free_i64(t1);
+
+ /* Then install the new FPC to set the rounding mode in fpu_status. */
+ gen_helper_sfpc(cpu_env, t2);
+ tcg_temp_free_i64(t2);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_spka(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ tcg_gen_shri_i64(o->in2, o->in2, 4);
+ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY - 4, 4);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sske(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_sske(cpu_env, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ssm(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stap(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ /* ??? Surely cpu address != cpu number. In any case the previous
+ version of this stored more than the required half-word, so it
+ is unlikely this has ever been tested. */
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
+ return NO_EXIT;
+}
+
+static ExitStatus op_stck(DisasContext *s, DisasOps *o)
+{
+ gen_helper_stck(o->out, cpu_env);
+ /* ??? We don't implement clock states. */
+ gen_op_movi_cc(s, 0);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stcke(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 c1 = tcg_temp_new_i64();
+ TCGv_i64 c2 = tcg_temp_new_i64();
+ gen_helper_stck(c1, cpu_env);
+ /* Shift the 64-bit value into its place as a zero-extended
+ 104-bit value. Note that "bit positions 64-103 are always
+ non-zero so that they compare differently to STCK"; we set
+ the least significant bit to 1. */
+ tcg_gen_shli_i64(c2, c1, 56);
+ tcg_gen_shri_i64(c1, c1, 8);
+ tcg_gen_ori_i64(c2, c2, 0x10000);
+ tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 8);
+ tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
+ tcg_temp_free_i64(c1);
+ tcg_temp_free_i64(c2);
+ /* ??? We don't implement clock states. */
+ gen_op_movi_cc(s, 0);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sckc(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_sckc(cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stckc(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_stckc(o->out, cpu_env);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stctg(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_stctg(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stctl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_stctl(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stidp(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ check_privileged(s);
+ tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, cpu_num));
+ tcg_gen_ld32u_i64(t1, cpu_env, offsetof(CPUS390XState, machine_type));
+ tcg_gen_deposit_i64(o->out, o->out, t1, 32, 32);
+ tcg_temp_free_i64(t1);
+
+ return NO_EXIT;
+}
+
+static ExitStatus op_spt(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_spt(cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stfl(DisasContext *s, DisasOps *o)
+{
+ TCGv_i64 f, a;
+ /* We really ought to have more complete indication of facilities
+ that we implement. Address this when STFLE is implemented. */
+ check_privileged(s);
+ f = tcg_const_i64(0xc0000000);
+ a = tcg_const_i64(200);
+ tcg_gen_qemu_st32(f, a, get_mem_index(s));
+ tcg_temp_free_i64(f);
+ tcg_temp_free_i64(a);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stpt(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_stpt(o->out, cpu_env);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stsi(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_spx(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ gen_helper_spx(cpu_env, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_xsch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_xsch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_csch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_csch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_hsch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_hsch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_msch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_msch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rchp(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_rchp(cpu_env, regs[1]);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_rsch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_rsch(cpu_env, regs[1]);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_ssch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_ssch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stsch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_stsch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_tsch(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_tsch(cpu_env, regs[1], o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_chsc(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_chsc(cpu_env, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stpx(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa));
+ tcg_gen_andi_i64(o->out, o->out, 0x7fffe000);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stnosm(DisasContext *s, DisasOps *o)
+{
+ uint64_t i2 = get_field(s->fields, i2);
+ TCGv_i64 t;
+
+ check_privileged(s);
+
+ /* It is important to do what the instruction name says: STORE THEN.
+ If we let the output hook perform the store then if we fault and
+ restart, we'll have the wrong SYSTEM MASK in place. */
+ t = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t, psw_mask, 56);
+ tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
+ tcg_temp_free_i64(t);
+
+ if (s->fields->op == 0xac) {
+ tcg_gen_andi_i64(psw_mask, psw_mask,
+ (i2 << 56) | 0x00ffffffffffffffull);
+ } else {
+ tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56);
+ }
+ return NO_EXIT;
+}
+
+static ExitStatus op_stura(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_stura(cpu_env, o->in2, o->in1);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sturg(DisasContext *s, DisasOps *o)
+{
+ check_privileged(s);
+ potential_page_fault(s);
+ gen_helper_sturg(cpu_env, o->in2, o->in1);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_st8(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_st16(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_st32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_st64(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s));
+ return NO_EXIT;
+}
+
+static ExitStatus op_stam(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1));
+ TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3));
+ potential_page_fault(s);
+ gen_helper_stam(cpu_env, r1, o->in2, r3);
+ tcg_temp_free_i32(r1);
+ tcg_temp_free_i32(r3);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stcm(DisasContext *s, DisasOps *o)
+{
+ int m3 = get_field(s->fields, m3);
+ int pos, base = s->insn->data;
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ pos = base + ctz32(m3) * 8;
+ switch (m3) {
+ case 0xf:
+ /* Effectively a 32-bit store. */
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s));
+ break;
+
+ case 0xc:
+ case 0x6:
+ case 0x3:
+ /* Effectively a 16-bit store. */
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s));
+ break;
+
+ case 0x8:
+ case 0x4:
+ case 0x2:
+ case 0x1:
+ /* Effectively an 8-bit store. */
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
+ break;
+
+ default:
+ /* This is going to be a sequence of shifts and stores. */
+ pos = base + 32 - 8;
+ while (m3) {
+ if (m3 & 0x8) {
+ tcg_gen_shri_i64(tmp, o->in1, pos);
+ tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s));
+ tcg_gen_addi_i64(o->in2, o->in2, 1);
+ }
+ m3 = (m3 << 1) & 0xf;
+ pos -= 8;
+ }
+ break;
+ }
+ tcg_temp_free_i64(tmp);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stm(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ int size = s->insn->data;
+ TCGv_i64 tsize = tcg_const_i64(size);
+
+ while (1) {
+ if (size == 8) {
+ tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s));
+ } else {
+ tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s));
+ }
+ if (r1 == r3) {
+ break;
+ }
+ tcg_gen_add_i64(o->in2, o->in2, tsize);
+ r1 = (r1 + 1) & 15;
+ }
+
+ tcg_temp_free_i64(tsize);
+ return NO_EXIT;
+}
+
+static ExitStatus op_stmh(DisasContext *s, DisasOps *o)
+{
+ int r1 = get_field(s->fields, r1);
+ int r3 = get_field(s->fields, r3);
+ TCGv_i64 t = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_const_i64(4);
+ TCGv_i64 t32 = tcg_const_i64(32);
+
+ while (1) {
+ tcg_gen_shl_i64(t, regs[r1], t32);
+ tcg_gen_qemu_st32(t, o->in2, get_mem_index(s));
+ if (r1 == r3) {
+ break;
+ }
+ tcg_gen_add_i64(o->in2, o->in2, t4);
+ r1 = (r1 + 1) & 15;
+ }
+
+ tcg_temp_free_i64(t);
+ tcg_temp_free_i64(t4);
+ tcg_temp_free_i64(t32);
+ return NO_EXIT;
+}
+
+static ExitStatus op_srst(DisasContext *s, DisasOps *o)
+{
+ potential_page_fault(s);
+ gen_helper_srst(o->in1, cpu_env, regs[0], o->in1, o->in2);
+ set_cc_static(s);
+ return_low128(o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_sub(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_sub_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_subb(DisasContext *s, DisasOps *o)
+{
+ DisasCompare cmp;
+ TCGv_i64 borrow;
+
+ tcg_gen_sub_i64(o->out, o->in1, o->in2);
+
+ /* The !borrow flag is the msb of CC. Since we want the inverse of
+ that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */
+ disas_jcc(s, &cmp, 8 | 4);
+ borrow = tcg_temp_new_i64();
+ if (cmp.is_64) {
+ tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b);
+ } else {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b);
+ tcg_gen_extu_i32_i64(borrow, t);
+ tcg_temp_free_i32(t);
+ }
+ free_compare(&cmp);
+
+ tcg_gen_sub_i64(o->out, o->out, borrow);
+ tcg_temp_free_i64(borrow);
+ return NO_EXIT;
+}
+
+static ExitStatus op_svc(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 t;
+
+ update_psw_addr(s);
+ update_cc_op(s);
+
+ t = tcg_const_i32(get_field(s->fields, i1) & 0xff);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code));
+ tcg_temp_free_i32(t);
+
+ t = tcg_const_i32(s->next_pc - s->pc);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen));
+ tcg_temp_free_i32(t);
+
+ gen_exception(EXCP_SVC);
+ return EXIT_NORETURN;
+}
+
+static ExitStatus op_tceb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_tcdb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_tcxb(DisasContext *s, DisasOps *o)
+{
+ gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+#ifndef CONFIG_USER_ONLY
+static ExitStatus op_tprot(DisasContext *s, DisasOps *o)
+{
+ potential_page_fault(s);
+ gen_helper_tprot(cc_op, o->addr1, o->in2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+#endif
+
+static ExitStatus op_tr(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ potential_page_fault(s);
+ gen_helper_tr(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_tre(DisasContext *s, DisasOps *o)
+{
+ potential_page_fault(s);
+ gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2);
+ return_low128(o->out2);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_trt(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ potential_page_fault(s);
+ gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_unpk(DisasContext *s, DisasOps *o)
+{
+ TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1));
+ potential_page_fault(s);
+ gen_helper_unpk(cpu_env, l, o->addr1, o->in2);
+ tcg_temp_free_i32(l);
+ return NO_EXIT;
+}
+
+static ExitStatus op_xc(DisasContext *s, DisasOps *o)
+{
+ int d1 = get_field(s->fields, d1);
+ int d2 = get_field(s->fields, d2);
+ int b1 = get_field(s->fields, b1);
+ int b2 = get_field(s->fields, b2);
+ int l = get_field(s->fields, l1);
+ TCGv_i32 t32;
+
+ o->addr1 = get_address(s, 0, b1, d1);
+
+ /* If the addresses are identical, this is a store/memset of zero. */
+ if (b1 == b2 && d1 == d2 && (l + 1) <= 32) {
+ o->in2 = tcg_const_i64(0);
+
+ l++;
+ while (l >= 8) {
+ tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s));
+ l -= 8;
+ if (l > 0) {
+ tcg_gen_addi_i64(o->addr1, o->addr1, 8);
+ }
+ }
+ if (l >= 4) {
+ tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s));
+ l -= 4;
+ if (l > 0) {
+ tcg_gen_addi_i64(o->addr1, o->addr1, 4);
+ }
+ }
+ if (l >= 2) {
+ tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s));
+ l -= 2;
+ if (l > 0) {
+ tcg_gen_addi_i64(o->addr1, o->addr1, 2);
+ }
+ }
+ if (l) {
+ tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s));
+ }
+ gen_op_movi_cc(s, 0);
+ return NO_EXIT;
+ }
+
+ /* But in general we'll defer to a helper. */
+ o->in2 = get_address(s, 0, b2, d2);
+ t32 = tcg_const_i32(l);
+ potential_page_fault(s);
+ gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2);
+ tcg_temp_free_i32(t32);
+ set_cc_static(s);
+ return NO_EXIT;
+}
+
+static ExitStatus op_xor(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+ return NO_EXIT;
+}
+
+static ExitStatus op_xori(DisasContext *s, DisasOps *o)
+{
+ int shift = s->insn->data & 0xff;
+ int size = s->insn->data >> 8;
+ uint64_t mask = ((1ull << size) - 1) << shift;
+
+ assert(!o->g_in2);
+ tcg_gen_shli_i64(o->in2, o->in2, shift);
+ tcg_gen_xor_i64(o->out, o->in1, o->in2);
+
+ /* Produce the CC from only the bits manipulated. */
+ tcg_gen_andi_i64(cc_dst, o->out, mask);
+ set_cc_nz_u64(s, cc_dst);
+ return NO_EXIT;
+}
+
+static ExitStatus op_zero(DisasContext *s, DisasOps *o)
+{
+ o->out = tcg_const_i64(0);
+ return NO_EXIT;
+}
+
+static ExitStatus op_zero2(DisasContext *s, DisasOps *o)
+{
+ o->out = tcg_const_i64(0);
+ o->out2 = o->out;
+ o->g_out2 = true;
+ return NO_EXIT;
+}
+
+/* ====================================================================== */
+/* The "Cc OUTput" generators. Given the generated output (and in some cases
+ the original inputs), update the various cc data structures in order to
+ be able to compute the new condition code. */
+
+static void cout_abs32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out);
+}
+
+static void cout_abs64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out);
+}
+
+static void cout_adds32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out);
+}
+
+static void cout_adds64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out);
+}
+
+static void cout_addu32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out);
+}
+
+static void cout_addu64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out);
+}
+
+static void cout_addc32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out);
+}
+
+static void cout_addc64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out);
+}
+
+static void cout_cmps32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2);
+}
+
+static void cout_cmps64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2);
+}
+
+static void cout_cmpu32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2);
+}
+
+static void cout_cmpu64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2);
+}
+
+static void cout_f32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out);
+}
+
+static void cout_f64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out);
+}
+
+static void cout_f128(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2);
+}
+
+static void cout_nabs32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out);
+}
+
+static void cout_nabs64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out);
+}
+
+static void cout_neg32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out);
+}
+
+static void cout_neg64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out);
+}
+
+static void cout_nz32(DisasContext *s, DisasOps *o)
+{
+ tcg_gen_ext32u_i64(cc_dst, o->out);
+ gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst);
+}
+
+static void cout_nz64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_NZ, o->out);
+}
+
+static void cout_s32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out);
+}
+
+static void cout_s64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out);
+}
+
+static void cout_subs32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subs64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out);
+}
+
+static void cout_subu32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subu64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out);
+}
+
+static void cout_subb32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out);
+}
+
+static void cout_subb64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out);
+}
+
+static void cout_tm32(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2);
+}
+
+static void cout_tm64(DisasContext *s, DisasOps *o)
+{
+ gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2);
+}
+
+/* ====================================================================== */
+/* The "PREParation" generators. These initialize the DisasOps.OUT fields
+ with the TCG register to which we will write. Used in combination with
+ the "wout" generators, in some cases we need a new temporary, and in
+ some cases we can write to a TCG global. */
+
+static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->out = tcg_temp_new_i64();
+}
+#define SPEC_prep_new 0
+
+static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->out = tcg_temp_new_i64();
+ o->out2 = tcg_temp_new_i64();
+}
+#define SPEC_prep_new_P 0
+
+static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->out = regs[get_field(f, r1)];
+ o->g_out = true;
+}
+#define SPEC_prep_r1 0
+
+static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->out = regs[r1];
+ o->out2 = regs[r1 + 1];
+ o->g_out = o->g_out2 = true;
+}
+#define SPEC_prep_r1_P SPEC_r1_even
+
+static void prep_f1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->out = fregs[get_field(f, r1)];
+ o->g_out = true;
+}
+#define SPEC_prep_f1 0
+
+static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->out = fregs[r1];
+ o->out2 = fregs[r1 + 2];
+ o->g_out = o->g_out2 = true;
+}
+#define SPEC_prep_x1 SPEC_r1_f128
+
+/* ====================================================================== */
+/* The "Write OUTput" generators. These generally perform some non-trivial
+ copy of data to TCG globals, or to main memory. The trivial cases are
+ generally handled by having a "prep" generator install the TCG global
+ as the destination of the operation. */
+
+static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ store_reg(get_field(f, r1), o->out);
+}
+#define SPEC_wout_r1 0
+
+static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8);
+}
+#define SPEC_wout_r1_8 0
+
+static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16);
+}
+#define SPEC_wout_r1_16 0
+
+static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ store_reg32_i64(get_field(f, r1), o->out);
+}
+#define SPEC_wout_r1_32 0
+
+static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ store_reg32h_i64(get_field(f, r1), o->out);
+}
+#define SPEC_wout_r1_32h 0
+
+static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ store_reg32_i64(r1, o->out);
+ store_reg32_i64(r1 + 1, o->out2);
+}
+#define SPEC_wout_r1_P32 SPEC_r1_even
+
+static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ store_reg32_i64(r1 + 1, o->out);
+ tcg_gen_shri_i64(o->out, o->out, 32);
+ store_reg32_i64(r1, o->out);
+}
+#define SPEC_wout_r1_D32 SPEC_r1_even
+
+static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ store_freg32_i64(get_field(f, r1), o->out);
+}
+#define SPEC_wout_e1 0
+
+static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ store_freg(get_field(f, r1), o->out);
+}
+#define SPEC_wout_f1 0
+
+static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int f1 = get_field(s->fields, r1);
+ store_freg(f1, o->out);
+ store_freg(f1 + 2, o->out2);
+}
+#define SPEC_wout_x1 SPEC_r1_f128
+
+static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ if (get_field(f, r1) != get_field(f, r2)) {
+ store_reg32_i64(get_field(f, r1), o->out);
+ }
+}
+#define SPEC_wout_cond_r1r2_32 0
+
+static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ if (get_field(f, r1) != get_field(f, r2)) {
+ store_freg32_i64(get_field(f, r1), o->out);
+ }
+}
+#define SPEC_wout_cond_e1e2 0
+
+static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_8 0
+
+static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_16 0
+
+static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_32 0
+
+static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
+}
+#define SPEC_wout_m1_64 0
+
+static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s));
+}
+#define SPEC_wout_m2_32 0
+
+static void wout_m2_32_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ /* XXX release reservation */
+ tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s));
+ store_reg32_i64(get_field(f, r1), o->in2);
+}
+#define SPEC_wout_m2_32_r1_atomic 0
+
+static void wout_m2_64_r1_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ /* XXX release reservation */
+ tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s));
+ store_reg(get_field(f, r1), o->in2);
+}
+#define SPEC_wout_m2_64_r1_atomic 0
+
+/* ====================================================================== */
+/* The "INput 1" generators. These load the first operand to an insn. */
+
+static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(f, r1));
+}
+#define SPEC_in1_r1 0
+
+static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = regs[get_field(f, r1)];
+ o->g_in1 = true;
+}
+#define SPEC_in1_r1_o 0
+
+static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]);
+}
+#define SPEC_in1_r1_32s 0
+
+static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]);
+}
+#define SPEC_in1_r1_32u 0
+
+static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32);
+}
+#define SPEC_in1_r1_sr32 0
+
+static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(f, r1) + 1);
+}
+#define SPEC_in1_r1p1 SPEC_r1_even
+
+static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]);
+}
+#define SPEC_in1_r1p1_32s SPEC_r1_even
+
+static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]);
+}
+#define SPEC_in1_r1p1_32u SPEC_r1_even
+
+static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in1_r1_D32 SPEC_r1_even
+
+static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(f, r2));
+}
+#define SPEC_in1_r2 0
+
+static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32);
+}
+#define SPEC_in1_r2_sr32 0
+
+static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = load_reg(get_field(f, r3));
+}
+#define SPEC_in1_r3 0
+
+static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = regs[get_field(f, r3)];
+ o->g_in1 = true;
+}
+#define SPEC_in1_r3_o 0
+
+static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]);
+}
+#define SPEC_in1_r3_32s 0
+
+static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]);
+}
+#define SPEC_in1_r3_32u 0
+
+static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r3 = get_field(f, r3);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]);
+}
+#define SPEC_in1_r3_D32 SPEC_r3_even
+
+static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = load_freg32_i64(get_field(f, r1));
+}
+#define SPEC_in1_e1 0
+
+static void in1_f1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = fregs[get_field(f, r1)];
+ o->g_in1 = true;
+}
+#define SPEC_in1_f1_o 0
+
+static void in1_x1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->out = fregs[r1];
+ o->out2 = fregs[r1 + 2];
+ o->g_out = o->g_out2 = true;
+}
+#define SPEC_in1_x1_o SPEC_r1_f128
+
+static void in1_f3_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in1 = fregs[get_field(f, r3)];
+ o->g_in1 = true;
+}
+#define SPEC_in1_f3_o 0
+
+static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1));
+}
+#define SPEC_in1_la1 0
+
+static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
+ o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
+}
+#define SPEC_in1_la2 0
+
+static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in1_la1(s, f, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_8u 0
+
+static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in1_la1(s, f, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_16s 0
+
+static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in1_la1(s, f, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_16u 0
+
+static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in1_la1(s, f, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_32s 0
+
+static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in1_la1(s, f, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_32u 0
+
+static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in1_la1(s, f, o);
+ o->in1 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s));
+}
+#define SPEC_in1_m1_64 0
+
+/* ====================================================================== */
+/* The "INput 2" generators. These load the second operand to an insn. */
+
+static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = regs[get_field(f, r1)];
+ o->g_in2 = true;
+}
+#define SPEC_in2_r1_o 0
+
+static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]);
+}
+#define SPEC_in2_r1_16u 0
+
+static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]);
+}
+#define SPEC_in2_r1_32u 0
+
+static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r1 = get_field(f, r1);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]);
+}
+#define SPEC_in2_r1_D32 SPEC_r1_even
+
+static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = load_reg(get_field(f, r2));
+}
+#define SPEC_in2_r2 0
+
+static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = regs[get_field(f, r2)];
+ o->g_in2 = true;
+}
+#define SPEC_in2_r2_o 0
+
+static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r2 = get_field(f, r2);
+ if (r2 != 0) {
+ o->in2 = load_reg(r2);
+ }
+}
+#define SPEC_in2_r2_nz 0
+
+static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_8s 0
+
+static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_8u 0
+
+static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_16s 0
+
+static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_16u 0
+
+static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = load_reg(get_field(f, r3));
+}
+#define SPEC_in2_r3 0
+
+static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32);
+}
+#define SPEC_in2_r3_sr32 0
+
+static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_32s 0
+
+static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]);
+}
+#define SPEC_in2_r2_32u 0
+
+static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32);
+}
+#define SPEC_in2_r2_sr32 0
+
+static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = load_freg32_i64(get_field(f, r2));
+}
+#define SPEC_in2_e2 0
+
+static void in2_f2_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = fregs[get_field(f, r2)];
+ o->g_in2 = true;
+}
+#define SPEC_in2_f2_o 0
+
+static void in2_x2_o(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int r2 = get_field(f, r2);
+ o->in1 = fregs[r2];
+ o->in2 = fregs[r2 + 2];
+ o->g_in1 = o->g_in2 = true;
+}
+#define SPEC_in2_x2_o SPEC_r2_f128
+
+static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = get_address(s, 0, get_field(f, r2), 0);
+}
+#define SPEC_in2_ra2 0
+
+static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ int x2 = have_field(f, x2) ? get_field(f, x2) : 0;
+ o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2));
+}
+#define SPEC_in2_a2 0
+
+static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_const_i64(s->pc + (int64_t)get_field(f, i2) * 2);
+}
+#define SPEC_in2_ri2 0
+
+static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ help_l2_shift(s, f, o, 31);
+}
+#define SPEC_in2_sh32 0
+
+static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ help_l2_shift(s, f, o, 63);
+}
+#define SPEC_in2_sh64 0
+
+static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_a2(s, f, o);
+ tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_8u 0
+
+static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_a2(s, f, o);
+ tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_16s 0
+
+static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_a2(s, f, o);
+ tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_16u 0
+
+static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_a2(s, f, o);
+ tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_32s 0
+
+static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_a2(s, f, o);
+ tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_32u 0
+
+static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_a2(s, f, o);
+ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_m2_64 0
+
+static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_ri2(s, f, o);
+ tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_16u 0
+
+static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_ri2(s, f, o);
+ tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_32s 0
+
+static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_ri2(s, f, o);
+ tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_32u 0
+
+static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ in2_ri2(s, f, o);
+ tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s));
+}
+#define SPEC_in2_mri2_64 0
+
+static void in2_m2_32s_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ /* XXX should reserve the address */
+ in1_la2(s, f, o);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld32s(o->in2, o->addr1, get_mem_index(s));
+}
+#define SPEC_in2_m2_32s_atomic 0
+
+static void in2_m2_64_atomic(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ /* XXX should reserve the address */
+ in1_la2(s, f, o);
+ o->in2 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(o->in2, o->addr1, get_mem_index(s));
+}
+#define SPEC_in2_m2_64_atomic 0
+
+static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_const_i64(get_field(f, i2));
+}
+#define SPEC_in2_i2 0
+
+static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_const_i64((uint8_t)get_field(f, i2));
+}
+#define SPEC_in2_i2_8u 0
+
+static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_const_i64((uint16_t)get_field(f, i2));
+}
+#define SPEC_in2_i2_16u 0
+
+static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_const_i64((uint32_t)get_field(f, i2));
+}
+#define SPEC_in2_i2_32u 0
+
+static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ uint64_t i2 = (uint16_t)get_field(f, i2);
+ o->in2 = tcg_const_i64(i2 << s->insn->data);
+}
+#define SPEC_in2_i2_16u_shl 0
+
+static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ uint64_t i2 = (uint32_t)get_field(f, i2);
+ o->in2 = tcg_const_i64(i2 << s->insn->data);
+}
+#define SPEC_in2_i2_32u_shl 0
+
+#ifndef CONFIG_USER_ONLY
+static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o)
+{
+ o->in2 = tcg_const_i64(s->fields->raw_insn);
+}
+#define SPEC_in2_insn 0
+#endif
+
+/* ====================================================================== */
+
+/* Find opc within the table of insns. This is formulated as a switch
+ statement so that (1) we get compile-time notice of cut-paste errors
+ for duplicated opcodes, and (2) the compiler generates the binary
+ search tree, rather than us having to post-process the table. */
+
+#define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \
+ D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0)
+
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) insn_ ## NM,
+
+enum DisasInsnEnum {
+#include "insn-data.def"
+};
+
+#undef D
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) { \
+ .opc = OPC, \
+ .fmt = FMT_##FT, \
+ .fac = FAC_##FC, \
+ .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \
+ .name = #NM, \
+ .help_in1 = in1_##I1, \
+ .help_in2 = in2_##I2, \
+ .help_prep = prep_##P, \
+ .help_wout = wout_##W, \
+ .help_cout = cout_##CC, \
+ .help_op = op_##OP, \
+ .data = D \
+ },
+
+/* Allow 0 to be used for NULL in the table below. */
+#define in1_0 NULL
+#define in2_0 NULL
+#define prep_0 NULL
+#define wout_0 NULL
+#define cout_0 NULL
+#define op_0 NULL
+
+#define SPEC_in1_0 0
+#define SPEC_in2_0 0
+#define SPEC_prep_0 0
+#define SPEC_wout_0 0
+
+static const DisasInsn insn_info[] = {
+#include "insn-data.def"
+};
+
+#undef D
+#define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \
+ case OPC: return &insn_info[insn_ ## NM];
+
+static const DisasInsn *lookup_opc(uint16_t opc)
+{
+ switch (opc) {
+#include "insn-data.def"
+ default:
+ return NULL;
+ }
+}
+
+#undef D
+#undef C
+
+/* Extract a field from the insn. The INSN should be left-aligned in
+ the uint64_t so that we can more easily utilize the big-bit-endian
+ definitions we extract from the Principals of Operation. */
+
+static void extract_field(DisasFields *o, const DisasField *f, uint64_t insn)
+{
+ uint32_t r, m;
+
+ if (f->size == 0) {
+ return;
+ }
+
+ /* Zero extract the field from the insn. */
+ r = (insn << f->beg) >> (64 - f->size);
+
+ /* Sign-extend, or un-swap the field as necessary. */
+ switch (f->type) {
+ case 0: /* unsigned */
+ break;
+ case 1: /* signed */
+ assert(f->size <= 32);
+ m = 1u << (f->size - 1);
+ r = (r ^ m) - m;
+ break;
+ case 2: /* dl+dh split, signed 20 bit. */
+ r = ((int8_t)r << 12) | (r >> 8);
+ break;
+ default:
+ abort();
+ }
+
+ /* Validate that the "compressed" encoding we selected above is valid.
+ I.e. we havn't make two different original fields overlap. */
+ assert(((o->presentC >> f->indexC) & 1) == 0);
+ o->presentC |= 1 << f->indexC;
+ o->presentO |= 1 << f->indexO;
+
+ o->c[f->indexC] = r;
+}
+
+/* Lookup the insn at the current PC, extracting the operands into O and
+ returning the info struct for the insn. Returns NULL for invalid insn. */
+
+static const DisasInsn *extract_insn(CPUS390XState *env, DisasContext *s,
+ DisasFields *f)
+{
+ uint64_t insn, pc = s->pc;
+ int op, op2, ilen;
+ const DisasInsn *info;
+
+ insn = ld_code2(env, pc);
+ op = (insn >> 8) & 0xff;
+ ilen = get_ilen(op);
+ s->next_pc = s->pc + ilen;
+
+ switch (ilen) {
+ case 2:
+ insn = insn << 48;
+ break;
+ case 4:
+ insn = ld_code4(env, pc) << 32;
+ break;
+ case 6:
+ insn = (insn << 48) | (ld_code4(env, pc + 2) << 16);
+ break;
+ default:
+ abort();
+ }
+
+ /* We can't actually determine the insn format until we've looked up
+ the full insn opcode. Which we can't do without locating the
+ secondary opcode. Assume by default that OP2 is at bit 40; for
+ those smaller insns that don't actually have a secondary opcode
+ this will correctly result in OP2 = 0. */
+ switch (op) {
+ case 0x01: /* E */
+ case 0x80: /* S */
+ case 0x82: /* S */
+ case 0x93: /* S */
+ case 0xb2: /* S, RRF, RRE */
+ case 0xb3: /* RRE, RRD, RRF */
+ case 0xb9: /* RRE, RRF */
+ case 0xe5: /* SSE, SIL */
+ op2 = (insn << 8) >> 56;
+ break;
+ case 0xa5: /* RI */
+ case 0xa7: /* RI */
+ case 0xc0: /* RIL */
+ case 0xc2: /* RIL */
+ case 0xc4: /* RIL */
+ case 0xc6: /* RIL */
+ case 0xc8: /* SSF */
+ case 0xcc: /* RIL */
+ op2 = (insn << 12) >> 60;
+ break;
+ case 0xd0 ... 0xdf: /* SS */
+ case 0xe1: /* SS */
+ case 0xe2: /* SS */
+ case 0xe8: /* SS */
+ case 0xe9: /* SS */
+ case 0xea: /* SS */
+ case 0xee ... 0xf3: /* SS */
+ case 0xf8 ... 0xfd: /* SS */
+ op2 = 0;
+ break;
+ default:
+ op2 = (insn << 40) >> 56;
+ break;
+ }
+
+ memset(f, 0, sizeof(*f));
+ f->raw_insn = insn;
+ f->op = op;
+ f->op2 = op2;
+
+ /* Lookup the instruction. */
+ info = lookup_opc(op << 8 | op2);
+
+ /* If we found it, extract the operands. */
+ if (info != NULL) {
+ DisasFormat fmt = info->fmt;
+ int i;
+
+ for (i = 0; i < NUM_C_FIELD; ++i) {
+ extract_field(f, &format_info[fmt].op[i], insn);
+ }
+ }
+ return info;
+}
+
+static ExitStatus translate_one(CPUS390XState *env, DisasContext *s)
+{
+ const DisasInsn *insn;
+ ExitStatus ret = NO_EXIT;
+ DisasFields f;
+ DisasOps o;
+
+ /* Search for the insn in the table. */
+ insn = extract_insn(env, s, &f);
+
+ /* Not found means unimplemented/illegal opcode. */
+ if (insn == NULL) {
+ qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n",
+ f.op, f.op2);
+ gen_illegal_opcode(s);
+ return EXIT_NORETURN;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (s->tb->flags & FLAG_MASK_PER) {
+ TCGv_i64 addr = tcg_const_i64(s->pc);
+ gen_helper_per_ifetch(cpu_env, addr);
+ tcg_temp_free_i64(addr);
+ }
+#endif
+
+ /* Check for insn specification exceptions. */
+ if (insn->spec) {
+ int spec = insn->spec, excp = 0, r;
+
+ if (spec & SPEC_r1_even) {
+ r = get_field(&f, r1);
+ if (r & 1) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
+ if (spec & SPEC_r2_even) {
+ r = get_field(&f, r2);
+ if (r & 1) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
+ if (spec & SPEC_r3_even) {
+ r = get_field(&f, r3);
+ if (r & 1) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
+ if (spec & SPEC_r1_f128) {
+ r = get_field(&f, r1);
+ if (r > 13) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
+ if (spec & SPEC_r2_f128) {
+ r = get_field(&f, r2);
+ if (r > 13) {
+ excp = PGM_SPECIFICATION;
+ }
+ }
+ if (excp) {
+ gen_program_exception(s, excp);
+ return EXIT_NORETURN;
+ }
+ }
+
+ /* Set up the strutures we use to communicate with the helpers. */
+ s->insn = insn;
+ s->fields = &f;
+ o.g_out = o.g_out2 = o.g_in1 = o.g_in2 = false;
+ TCGV_UNUSED_I64(o.out);
+ TCGV_UNUSED_I64(o.out2);
+ TCGV_UNUSED_I64(o.in1);
+ TCGV_UNUSED_I64(o.in2);
+ TCGV_UNUSED_I64(o.addr1);
+
+ /* Implement the instruction. */
+ if (insn->help_in1) {
+ insn->help_in1(s, &f, &o);
+ }
+ if (insn->help_in2) {
+ insn->help_in2(s, &f, &o);
+ }
+ if (insn->help_prep) {
+ insn->help_prep(s, &f, &o);
+ }
+ if (insn->help_op) {
+ ret = insn->help_op(s, &o);
+ }
+ if (insn->help_wout) {
+ insn->help_wout(s, &f, &o);
+ }
+ if (insn->help_cout) {
+ insn->help_cout(s, &o);
+ }
+
+ /* Free any temporaries created by the helpers. */
+ if (!TCGV_IS_UNUSED_I64(o.out) && !o.g_out) {
+ tcg_temp_free_i64(o.out);
+ }
+ if (!TCGV_IS_UNUSED_I64(o.out2) && !o.g_out2) {
+ tcg_temp_free_i64(o.out2);
+ }
+ if (!TCGV_IS_UNUSED_I64(o.in1) && !o.g_in1) {
+ tcg_temp_free_i64(o.in1);
+ }
+ if (!TCGV_IS_UNUSED_I64(o.in2) && !o.g_in2) {
+ tcg_temp_free_i64(o.in2);
+ }
+ if (!TCGV_IS_UNUSED_I64(o.addr1)) {
+ tcg_temp_free_i64(o.addr1);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if (s->tb->flags & FLAG_MASK_PER) {
+ /* An exception might be triggered, save PSW if not already done. */
+ if (ret == NO_EXIT || ret == EXIT_PC_STALE) {
+ tcg_gen_movi_i64(psw_addr, s->next_pc);
+ }
+
+ /* Save off cc. */
+ update_cc_op(s);
+
+ /* Call the helper to check for a possible PER exception. */
+ gen_helper_per_check_exception(cpu_env);
+ }
+#endif
+
+ /* Advance to the next instruction. */
+ s->pc = s->next_pc;
+ return ret;
+}
+
+void gen_intermediate_code(CPUS390XState *env, struct TranslationBlock *tb)
+{
+ S390CPU *cpu = s390_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc;
+ target_ulong pc_start;
+ uint64_t next_page_start;
+ int num_insns, max_insns;
+ ExitStatus status;
+ bool do_debug;
+
+ pc_start = tb->pc;
+
+ /* 31-bit mode */
+ if (!(tb->flags & FLAG_MASK_64)) {
+ pc_start &= 0x7fffffff;
+ }
+
+ dc.tb = tb;
+ dc.pc = pc_start;
+ dc.cc_op = CC_OP_DYNAMIC;
+ do_debug = dc.singlestep_enabled = cs->singlestep_enabled;
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+
+ do {
+ tcg_gen_insn_start(dc.pc, dc.cc_op);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
+ status = EXIT_PC_STALE;
+ do_debug = true;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc.pc += 2;
+ break;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ status = NO_EXIT;
+ if (status == NO_EXIT) {
+ status = translate_one(env, &dc);
+ }
+
+ /* If we reach a page boundary, are single stepping,
+ or exhaust instruction count, stop generation. */
+ if (status == NO_EXIT
+ && (dc.pc >= next_page_start
+ || tcg_op_buf_full()
+ || num_insns >= max_insns
+ || singlestep
+ || cs->singlestep_enabled)) {
+ status = EXIT_PC_STALE;
+ }
+ } while (status == NO_EXIT);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ switch (status) {
+ case EXIT_GOTO_TB:
+ case EXIT_NORETURN:
+ break;
+ case EXIT_PC_STALE:
+ update_psw_addr(&dc);
+ /* FALLTHRU */
+ case EXIT_PC_UPDATED:
+ /* Next TB starts off with CC_OP_DYNAMIC, so make sure the
+ cc op type is in env */
+ update_cc_op(&dc);
+ /* Exit the TB, either by raising a debug exception or by return. */
+ if (do_debug) {
+ gen_exception(EXCP_DEBUG);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ break;
+ default:
+ abort();
+ }
+
+ gen_tb_end(tb, num_insns);
+
+ tb->size = dc.pc - pc_start;
+ tb->icount = num_insns;
+
+#if defined(S390X_DEBUG_DISAS)
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc.pc - pc_start, 1);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ int cc_op = data[1];
+ env->psw.addr = data[0];
+ if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) {
+ env->cc_op = cc_op;
+ }
+}
diff --git a/target/sh4/Makefile.objs b/target/sh4/Makefile.objs
new file mode 100644
index 0000000000..2c25d96e65
--- /dev/null
+++ b/target/sh4/Makefile.objs
@@ -0,0 +1,3 @@
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-$(CONFIG_SOFTMMU) += monitor.o
+obj-y += gdbstub.o
diff --git a/target/sh4/README.sh4 b/target/sh4/README.sh4
new file mode 100644
index 0000000000..a192ca7540
--- /dev/null
+++ b/target/sh4/README.sh4
@@ -0,0 +1,150 @@
+qemu target: sh4
+author: Samuel Tardieu <sam@rfc1149.net>
+last modified: Tue Dec 6 07:22:44 CET 2005
+
+The sh4 target is not ready at all yet for integration in qemu. This
+file describes the current state of implementation.
+
+Most places requiring attention and/or modification can be detected by
+looking for "XXXXX" or "abort()".
+
+The sh4 core is located in target/sh4/*, while the 7750 peripheral
+features (IO ports for example) are located in hw/sh7750.[ch]. The
+main board description is in hw/shix.c, and the NAND flash in
+hw/tc58128.[ch].
+
+All the shortcomings indicated here will eventually be resolved. This
+is a work in progress. Features are added in a semi-random order: if a
+point is blocking to progress on booting the Linux kernel for the shix
+board, it is addressed first; if feedback is necessary and no progress
+can be made on blocking points until it is received, a random feature
+is worked on.
+
+Goals
+-----
+
+The primary model being worked on is the soft MMU target to be able to
+emulate the Shix 2.0 board by Alexis Polti, described at
+https://web.archive.org/web/20070917001736/http://perso.enst.fr/~polti/realisations/shix20/
+
+Ultimately, qemu will be coupled with a system C or a verilog
+simulator to simulate the whole board functionalities.
+
+A sh4 user-mode has also somewhat started but will be worked on
+afterwards. The goal is to automate tests for GNAT (GNU Ada) compiler
+that I ported recently to the sh4-linux target.
+
+Registers
+---------
+
+16 general purpose registers are available at any time. The first 8
+registers are banked and the non-directly visible ones can be accessed
+by privileged instructions. In qemu, we define 24 general purpose
+registers and the code generation use either [0-7]+[8-15] or
+[16-23]+[8-15] depending on the MD and RB flags in the sr
+configuration register.
+
+Instructions
+------------
+
+Most sh4 instructions have been implemented. The missing ones at this
+time are:
+ - FPU related instructions
+ - LDTLB to load a new MMU entry
+ - SLEEP to put the processor in sleep mode
+
+Most instructions could be optimized a lot. This will be worked on
+after the current model is fully functional unless debugging
+convenience requires that it is done early.
+
+Many instructions did not have a chance to be tested yet. The plan is
+to implement unit and regression testing of those in the future.
+
+MMU
+---
+
+The MMU is implemented in the sh4 core. MMU management has not been
+tested at all yet. In the sh7750, it can be manipulated through memory
+mapped registers and this part has not yet been implemented.
+
+Exceptions
+----------
+
+Exceptions are implemented as described in the sh4 reference manual
+but have not been tested yet. They do not use qemu EXCP_ features
+yet.
+
+IRQ
+---
+
+IRQ are not implemented yet.
+
+Peripheral features
+-------------------
+
+ + Serial ports
+
+Configuration and use of the first serial port (SCI) without
+interrupts is supported. Input has not yet been tested.
+
+Configuration of the second serial port (SCIF) is supported. FIFO
+handling infrastructure has been started but is not completed yet.
+
+ + GPIO ports
+
+GPIO ports have been implemented. A registration function allows
+external modules to register interest in some port changes (see
+hw/tc58128.[ch] for an example) and will be called back. Interrupt
+generation is not yet supported but some infrastructure is in place
+for this purpose. Note that in the current model a peripheral module
+cannot directly simulate a H->L->H input port transition and have an
+interrupt generated on the low level.
+
+ + TC58128 NAND flash
+
+TC58128 NAND flash is partially implemented through GPIO ports. It
+supports reading from flash.
+
+GDB
+---
+
+GDB remote target support has been implemented and lightly tested.
+
+Files
+-----
+
+File names are hardcoded at this time. The bootloader must be stored in
+shix_bios.bin in the current directory. The initial Linux image must
+be stored in shix_linux_nand.bin in the current directory in NAND
+format. Test files can be obtained from
+http://perso.enst.fr/~polti/robot/ as well as the various datasheets I
+use.
+
+qemu disk parameter on the command line is unused. You can supply any
+existing image and it will be ignored. As the goal is to simulate an
+embedded target, it is not clear how this parameter will be handled in
+the future.
+
+To build an ELF kernel image from the NAND image, 16 bytes have to be
+stripped off the end of every 528 bytes, keeping only 512 of them. The
+following Python code snippet does it:
+
+#! /usr/bin/python
+
+def denand (infd, outfd):
+ while True:
+ d = infd.read (528)
+ if not d: return
+ outfd.write (d[:512])
+
+if __name__ == '__main__':
+ import sys
+ denand (open (sys.argv[1], 'rb'),
+ open (sys.argv[2], 'wb'))
+
+Style isssues
+-------------
+
+There is currently a mix between my style (space before opening
+parenthesis) and qemu style. This will be resolved before final
+integration is proposed.
diff --git a/target/sh4/cpu-qom.h b/target/sh4/cpu-qom.h
new file mode 100644
index 0000000000..01abb206e4
--- /dev/null
+++ b/target/sh4/cpu-qom.h
@@ -0,0 +1,65 @@
+/*
+ * QEMU SuperH CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_SUPERH_CPU_QOM_H
+#define QEMU_SUPERH_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_SUPERH_CPU "superh-cpu"
+
+#define TYPE_SH7750R_CPU "sh7750r-" TYPE_SUPERH_CPU
+#define TYPE_SH7751R_CPU "sh7751r-" TYPE_SUPERH_CPU
+#define TYPE_SH7785_CPU "sh7785-" TYPE_SUPERH_CPU
+
+#define SUPERH_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SuperHCPUClass, (klass), TYPE_SUPERH_CPU)
+#define SUPERH_CPU(obj) \
+ OBJECT_CHECK(SuperHCPU, (obj), TYPE_SUPERH_CPU)
+#define SUPERH_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SuperHCPUClass, (obj), TYPE_SUPERH_CPU)
+
+/**
+ * SuperHCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @name: The name.
+ * @pvr: Processor Version Register
+ * @prr: Processor Revision Register
+ * @cvr: Cache Version Register
+ *
+ * A SuperH CPU model.
+ */
+typedef struct SuperHCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+
+ const char *name;
+ uint32_t pvr;
+ uint32_t prr;
+ uint32_t cvr;
+} SuperHCPUClass;
+
+typedef struct SuperHCPU SuperHCPU;
+
+#endif
diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c
new file mode 100644
index 0000000000..a38f6a6ded
--- /dev/null
+++ b/target/sh4/cpu.c
@@ -0,0 +1,332 @@
+/*
+ * QEMU SuperH CPU
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+
+static void superh_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static void superh_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+
+ cpu->env.pc = tb->pc;
+ cpu->env.flags = tb->flags;
+}
+
+static bool superh_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request & CPU_INTERRUPT_HARD;
+}
+
+/* CPUClass::reset() */
+static void superh_cpu_reset(CPUState *s)
+{
+ SuperHCPU *cpu = SUPERH_CPU(s);
+ SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(cpu);
+ CPUSH4State *env = &cpu->env;
+
+ scc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUSH4State, id));
+ tlb_flush(s, 1);
+
+ env->pc = 0xA0000000;
+#if defined(CONFIG_USER_ONLY)
+ env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
+#else
+ env->sr = (1u << SR_MD) | (1u << SR_RB) | (1u << SR_BL) |
+ (1u << SR_I3) | (1u << SR_I2) | (1u << SR_I1) | (1u << SR_I0);
+ env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ set_flush_to_zero(1, &env->fp_status);
+#endif
+ set_default_nan_mode(1, &env->fp_status);
+ set_snan_bit_is_one(1, &env->fp_status);
+}
+
+static void superh_cpu_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->mach = bfd_mach_sh4;
+ info->print_insn = print_insn_sh;
+}
+
+typedef struct SuperHCPUListState {
+ fprintf_function cpu_fprintf;
+ FILE *file;
+} SuperHCPUListState;
+
+/* Sort alphabetically by type name. */
+static gint superh_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ return strcmp(name_a, name_b);
+}
+
+static void superh_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+ SuperHCPUListState *s = user_data;
+
+ (*s->cpu_fprintf)(s->file, "%s\n",
+ scc->name);
+}
+
+void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ SuperHCPUListState s = {
+ .cpu_fprintf = cpu_fprintf,
+ .file = f,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_SUPERH_CPU, false);
+ list = g_slist_sort(list, superh_cpu_list_compare);
+ g_slist_foreach(list, superh_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+static gint superh_cpu_name_compare(gconstpointer a, gconstpointer b)
+{
+ const SuperHCPUClass *scc = SUPERH_CPU_CLASS(a);
+ const char *name = b;
+
+ return strcasecmp(scc->name, name);
+}
+
+static ObjectClass *superh_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ GSList *list, *item;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+ if (strcasecmp(cpu_model, "any") == 0) {
+ return object_class_by_name(TYPE_SH7750R_CPU);
+ }
+
+ oc = object_class_by_name(cpu_model);
+ if (oc != NULL && object_class_dynamic_cast(oc, TYPE_SUPERH_CPU) != NULL
+ && !object_class_is_abstract(oc)) {
+ return oc;
+ }
+
+ oc = NULL;
+ list = object_class_get_list(TYPE_SUPERH_CPU, false);
+ item = g_slist_find_custom(list, cpu_model, superh_cpu_name_compare);
+ if (item != NULL) {
+ oc = item->data;
+ }
+ g_slist_free(list);
+ return oc;
+}
+
+SuperHCPU *cpu_sh4_init(const char *cpu_model)
+{
+ return SUPERH_CPU(cpu_generic_init(TYPE_SUPERH_CPU, cpu_model));
+}
+
+static void sh7750r_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ env->id = SH_CPU_SH7750R;
+ env->features = SH_FEATURE_BCR3_AND_BCR4;
+}
+
+static void sh7750r_class_init(ObjectClass *oc, void *data)
+{
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->name = "SH7750R";
+ scc->pvr = 0x00050000;
+ scc->prr = 0x00000100;
+ scc->cvr = 0x00110000;
+}
+
+static const TypeInfo sh7750r_type_info = {
+ .name = TYPE_SH7750R_CPU,
+ .parent = TYPE_SUPERH_CPU,
+ .class_init = sh7750r_class_init,
+ .instance_init = sh7750r_cpu_initfn,
+};
+
+static void sh7751r_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ env->id = SH_CPU_SH7751R;
+ env->features = SH_FEATURE_BCR3_AND_BCR4;
+}
+
+static void sh7751r_class_init(ObjectClass *oc, void *data)
+{
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->name = "SH7751R";
+ scc->pvr = 0x04050005;
+ scc->prr = 0x00000113;
+ scc->cvr = 0x00110000; /* Neutered caches, should be 0x20480000 */
+}
+
+static const TypeInfo sh7751r_type_info = {
+ .name = TYPE_SH7751R_CPU,
+ .parent = TYPE_SUPERH_CPU,
+ .class_init = sh7751r_class_init,
+ .instance_init = sh7751r_cpu_initfn,
+};
+
+static void sh7785_cpu_initfn(Object *obj)
+{
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ env->id = SH_CPU_SH7785;
+ env->features = SH_FEATURE_SH4A;
+}
+
+static void sh7785_class_init(ObjectClass *oc, void *data)
+{
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->name = "SH7785";
+ scc->pvr = 0x10300700;
+ scc->prr = 0x00000200;
+ scc->cvr = 0x71440211;
+}
+
+static const TypeInfo sh7785_type_info = {
+ .name = TYPE_SH7785_CPU,
+ .parent = TYPE_SUPERH_CPU,
+ .class_init = sh7785_class_init,
+ .instance_init = sh7785_cpu_initfn,
+};
+
+static void superh_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ SuperHCPUClass *scc = SUPERH_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ scc->parent_realize(dev, errp);
+}
+
+static void superh_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ SuperHCPU *cpu = SUPERH_CPU(obj);
+ CPUSH4State *env = &cpu->env;
+
+ cs->env_ptr = env;
+
+ env->movcal_backup_tail = &(env->movcal_backup);
+
+ if (tcg_enabled()) {
+ sh4_translate_init();
+ }
+}
+
+static const VMStateDescription vmstate_sh_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+static void superh_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ SuperHCPUClass *scc = SUPERH_CPU_CLASS(oc);
+
+ scc->parent_realize = dc->realize;
+ dc->realize = superh_cpu_realizefn;
+
+ scc->parent_reset = cc->reset;
+ cc->reset = superh_cpu_reset;
+
+ cc->class_by_name = superh_cpu_class_by_name;
+ cc->has_work = superh_cpu_has_work;
+ cc->do_interrupt = superh_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = superh_cpu_exec_interrupt;
+ cc->dump_state = superh_cpu_dump_state;
+ cc->set_pc = superh_cpu_set_pc;
+ cc->synchronize_from_tb = superh_cpu_synchronize_from_tb;
+ cc->gdb_read_register = superh_cpu_gdb_read_register;
+ cc->gdb_write_register = superh_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = superh_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = superh_cpu_get_phys_page_debug;
+#endif
+ cc->disas_set_info = superh_cpu_disas_set_info;
+
+ cc->gdb_num_core_regs = 59;
+
+ dc->vmsd = &vmstate_sh_cpu;
+}
+
+static const TypeInfo superh_cpu_type_info = {
+ .name = TYPE_SUPERH_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(SuperHCPU),
+ .instance_init = superh_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(SuperHCPUClass),
+ .class_init = superh_cpu_class_init,
+};
+
+static void superh_cpu_register_types(void)
+{
+ type_register_static(&superh_cpu_type_info);
+ type_register_static(&sh7750r_type_info);
+ type_register_static(&sh7751r_type_info);
+ type_register_static(&sh7785_type_info);
+}
+
+type_init(superh_cpu_register_types)
diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h
new file mode 100644
index 0000000000..478ab55868
--- /dev/null
+++ b/target/sh4/cpu.h
@@ -0,0 +1,391 @@
+/*
+ * SH4 emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef SH4_CPU_H
+#define SH4_CPU_H
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+
+#define TARGET_LONG_BITS 32
+
+/* CPU Subtypes */
+#define SH_CPU_SH7750 (1 << 0)
+#define SH_CPU_SH7750S (1 << 1)
+#define SH_CPU_SH7750R (1 << 2)
+#define SH_CPU_SH7751 (1 << 3)
+#define SH_CPU_SH7751R (1 << 4)
+#define SH_CPU_SH7785 (1 << 5)
+#define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R)
+#define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R)
+
+#define CPUArchState struct CPUSH4State
+
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+#define TARGET_PAGE_BITS 12 /* 4k XXXXX */
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define SR_MD 30
+#define SR_RB 29
+#define SR_BL 28
+#define SR_FD 15
+#define SR_M 9
+#define SR_Q 8
+#define SR_I3 7
+#define SR_I2 6
+#define SR_I1 5
+#define SR_I0 4
+#define SR_S 1
+#define SR_T 0
+
+#define FPSCR_MASK (0x003fffff)
+#define FPSCR_FR (1 << 21)
+#define FPSCR_SZ (1 << 20)
+#define FPSCR_PR (1 << 19)
+#define FPSCR_DN (1 << 18)
+#define FPSCR_CAUSE_MASK (0x3f << 12)
+#define FPSCR_CAUSE_SHIFT (12)
+#define FPSCR_CAUSE_E (1 << 17)
+#define FPSCR_CAUSE_V (1 << 16)
+#define FPSCR_CAUSE_Z (1 << 15)
+#define FPSCR_CAUSE_O (1 << 14)
+#define FPSCR_CAUSE_U (1 << 13)
+#define FPSCR_CAUSE_I (1 << 12)
+#define FPSCR_ENABLE_MASK (0x1f << 7)
+#define FPSCR_ENABLE_SHIFT (7)
+#define FPSCR_ENABLE_V (1 << 11)
+#define FPSCR_ENABLE_Z (1 << 10)
+#define FPSCR_ENABLE_O (1 << 9)
+#define FPSCR_ENABLE_U (1 << 8)
+#define FPSCR_ENABLE_I (1 << 7)
+#define FPSCR_FLAG_MASK (0x1f << 2)
+#define FPSCR_FLAG_SHIFT (2)
+#define FPSCR_FLAG_V (1 << 6)
+#define FPSCR_FLAG_Z (1 << 5)
+#define FPSCR_FLAG_O (1 << 4)
+#define FPSCR_FLAG_U (1 << 3)
+#define FPSCR_FLAG_I (1 << 2)
+#define FPSCR_RM_MASK (0x03 << 0)
+#define FPSCR_RM_NEAREST (0 << 0)
+#define FPSCR_RM_ZERO (1 << 0)
+
+#define DELAY_SLOT (1 << 0)
+#define DELAY_SLOT_CONDITIONAL (1 << 1)
+#define DELAY_SLOT_TRUE (1 << 2)
+#define DELAY_SLOT_CLEARME (1 << 3)
+/* The dynamic value of the DELAY_SLOT_TRUE flag determines whether the jump
+ * after the delay slot should be taken or not. It is calculated from SR_T.
+ *
+ * It is unclear if it is permitted to modify the SR_T flag in a delay slot.
+ * The use of DELAY_SLOT_TRUE flag makes us accept such SR_T modification.
+ */
+
+typedef struct tlb_t {
+ uint32_t vpn; /* virtual page number */
+ uint32_t ppn; /* physical page number */
+ uint32_t size; /* mapped page size in bytes */
+ uint8_t asid; /* address space identifier */
+ uint8_t v:1; /* validity */
+ uint8_t sz:2; /* page size */
+ uint8_t sh:1; /* share status */
+ uint8_t c:1; /* cacheability */
+ uint8_t pr:2; /* protection key */
+ uint8_t d:1; /* dirty */
+ uint8_t wt:1; /* write through */
+ uint8_t sa:3; /* space attribute (PCMCIA) */
+ uint8_t tc:1; /* timing control */
+} tlb_t;
+
+#define UTLB_SIZE 64
+#define ITLB_SIZE 4
+
+#define NB_MMU_MODES 2
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+enum sh_features {
+ SH_FEATURE_SH4A = 1,
+ SH_FEATURE_BCR3_AND_BCR4 = 2,
+};
+
+typedef struct memory_content {
+ uint32_t address;
+ uint32_t value;
+ struct memory_content *next;
+} memory_content;
+
+typedef struct CPUSH4State {
+ uint32_t flags; /* general execution flags */
+ uint32_t gregs[24]; /* general registers */
+ float32 fregs[32]; /* floating point registers */
+ uint32_t sr; /* status register (with T split out) */
+ uint32_t sr_m; /* M bit of status register */
+ uint32_t sr_q; /* Q bit of status register */
+ uint32_t sr_t; /* T bit of status register */
+ uint32_t ssr; /* saved status register */
+ uint32_t spc; /* saved program counter */
+ uint32_t gbr; /* global base register */
+ uint32_t vbr; /* vector base register */
+ uint32_t sgr; /* saved global register 15 */
+ uint32_t dbr; /* debug base register */
+ uint32_t pc; /* program counter */
+ uint32_t delayed_pc; /* target of delayed jump */
+ uint32_t mach; /* multiply and accumulate high */
+ uint32_t macl; /* multiply and accumulate low */
+ uint32_t pr; /* procedure register */
+ uint32_t fpscr; /* floating point status/control register */
+ uint32_t fpul; /* floating point communication register */
+
+ /* float point status register */
+ float_status fp_status;
+
+ /* Those belong to the specific unit (SH7750) but are handled here */
+ uint32_t mmucr; /* MMU control register */
+ uint32_t pteh; /* page table entry high register */
+ uint32_t ptel; /* page table entry low register */
+ uint32_t ptea; /* page table entry assistance register */
+ uint32_t ttb; /* tranlation table base register */
+ uint32_t tea; /* TLB exception address register */
+ uint32_t tra; /* TRAPA exception register */
+ uint32_t expevt; /* exception event register */
+ uint32_t intevt; /* interrupt event register */
+
+ tlb_t itlb[ITLB_SIZE]; /* instruction translation table */
+ tlb_t utlb[UTLB_SIZE]; /* unified translation table */
+
+ uint32_t ldst;
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved over CPU reset. */
+ int id; /* CPU model */
+
+ /* The features that we should emulate. See sh_features above. */
+ uint32_t features;
+
+ void *intc_handle;
+ int in_sleep; /* SR_BL ignored during sleep */
+ memory_content *movcal_backup;
+ memory_content **movcal_backup_tail;
+} CPUSH4State;
+
+/**
+ * SuperHCPU:
+ * @env: #CPUSH4State
+ *
+ * A SuperH CPU.
+ */
+struct SuperHCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUSH4State env;
+};
+
+static inline SuperHCPU *sh_env_get_cpu(CPUSH4State *env)
+{
+ return container_of(env, SuperHCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(sh_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(SuperHCPU, env)
+
+void superh_cpu_do_interrupt(CPUState *cpu);
+bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void superh_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr superh_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int superh_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int superh_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+
+void sh4_translate_init(void);
+SuperHCPU *cpu_sh4_init(const char *cpu_model);
+int cpu_sh4_signal_handler(int host_signum, void *pinfo,
+ void *puc);
+int superh_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+
+void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+#if !defined(CONFIG_USER_ONLY)
+void cpu_sh4_invalidate_tlb(CPUSH4State *s);
+uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s,
+ hwaddr addr);
+void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value);
+#endif
+
+int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr);
+
+void cpu_load_tlb(CPUSH4State * env);
+
+#define cpu_init(cpu_model) CPU(cpu_sh4_init(cpu_model))
+
+#define cpu_signal_handler cpu_sh4_signal_handler
+#define cpu_list sh4_cpu_list
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
+{
+ return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
+}
+
+#include "exec/cpu-all.h"
+
+/* Memory access type */
+enum {
+ /* Privilege */
+ ACCESS_PRIV = 0x01,
+ /* Direction */
+ ACCESS_WRITE = 0x02,
+ /* Type of instruction */
+ ACCESS_CODE = 0x10,
+ ACCESS_INT = 0x20
+};
+
+/* MMU control register */
+#define MMUCR 0x1F000010
+#define MMUCR_AT (1<<0)
+#define MMUCR_TI (1<<2)
+#define MMUCR_SV (1<<8)
+#define MMUCR_URC_BITS (6)
+#define MMUCR_URC_OFFSET (10)
+#define MMUCR_URC_SIZE (1 << MMUCR_URC_BITS)
+#define MMUCR_URC_MASK (((MMUCR_URC_SIZE) - 1) << MMUCR_URC_OFFSET)
+static inline int cpu_mmucr_urc (uint32_t mmucr)
+{
+ return ((mmucr & MMUCR_URC_MASK) >> MMUCR_URC_OFFSET);
+}
+
+/* PTEH : Page Translation Entry High register */
+#define PTEH_ASID_BITS (8)
+#define PTEH_ASID_SIZE (1 << PTEH_ASID_BITS)
+#define PTEH_ASID_MASK (PTEH_ASID_SIZE - 1)
+#define cpu_pteh_asid(pteh) ((pteh) & PTEH_ASID_MASK)
+#define PTEH_VPN_BITS (22)
+#define PTEH_VPN_OFFSET (10)
+#define PTEH_VPN_SIZE (1 << PTEH_VPN_BITS)
+#define PTEH_VPN_MASK (((PTEH_VPN_SIZE) - 1) << PTEH_VPN_OFFSET)
+static inline int cpu_pteh_vpn (uint32_t pteh)
+{
+ return ((pteh & PTEH_VPN_MASK) >> PTEH_VPN_OFFSET);
+}
+
+/* PTEL : Page Translation Entry Low register */
+#define PTEL_V (1 << 8)
+#define cpu_ptel_v(ptel) (((ptel) & PTEL_V) >> 8)
+#define PTEL_C (1 << 3)
+#define cpu_ptel_c(ptel) (((ptel) & PTEL_C) >> 3)
+#define PTEL_D (1 << 2)
+#define cpu_ptel_d(ptel) (((ptel) & PTEL_D) >> 2)
+#define PTEL_SH (1 << 1)
+#define cpu_ptel_sh(ptel)(((ptel) & PTEL_SH) >> 1)
+#define PTEL_WT (1 << 0)
+#define cpu_ptel_wt(ptel) ((ptel) & PTEL_WT)
+
+#define PTEL_SZ_HIGH_OFFSET (7)
+#define PTEL_SZ_HIGH (1 << PTEL_SZ_HIGH_OFFSET)
+#define PTEL_SZ_LOW_OFFSET (4)
+#define PTEL_SZ_LOW (1 << PTEL_SZ_LOW_OFFSET)
+static inline int cpu_ptel_sz (uint32_t ptel)
+{
+ int sz;
+ sz = (ptel & PTEL_SZ_HIGH) >> PTEL_SZ_HIGH_OFFSET;
+ sz <<= 1;
+ sz |= (ptel & PTEL_SZ_LOW) >> PTEL_SZ_LOW_OFFSET;
+ return sz;
+}
+
+#define PTEL_PPN_BITS (19)
+#define PTEL_PPN_OFFSET (10)
+#define PTEL_PPN_SIZE (1 << PTEL_PPN_BITS)
+#define PTEL_PPN_MASK (((PTEL_PPN_SIZE) - 1) << PTEL_PPN_OFFSET)
+static inline int cpu_ptel_ppn (uint32_t ptel)
+{
+ return ((ptel & PTEL_PPN_MASK) >> PTEL_PPN_OFFSET);
+}
+
+#define PTEL_PR_BITS (2)
+#define PTEL_PR_OFFSET (5)
+#define PTEL_PR_SIZE (1 << PTEL_PR_BITS)
+#define PTEL_PR_MASK (((PTEL_PR_SIZE) - 1) << PTEL_PR_OFFSET)
+static inline int cpu_ptel_pr (uint32_t ptel)
+{
+ return ((ptel & PTEL_PR_MASK) >> PTEL_PR_OFFSET);
+}
+
+/* PTEA : Page Translation Entry Assistance register */
+#define PTEA_SA_BITS (3)
+#define PTEA_SA_SIZE (1 << PTEA_SA_BITS)
+#define PTEA_SA_MASK (PTEA_SA_SIZE - 1)
+#define cpu_ptea_sa(ptea) ((ptea) & PTEA_SA_MASK)
+#define PTEA_TC (1 << 3)
+#define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3)
+
+#define TB_FLAG_PENDING_MOVCA (1 << 4)
+
+static inline target_ulong cpu_read_sr(CPUSH4State *env)
+{
+ return env->sr | (env->sr_m << SR_M) |
+ (env->sr_q << SR_Q) |
+ (env->sr_t << SR_T);
+}
+
+static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr)
+{
+ env->sr_m = (sr >> SR_M) & 1;
+ env->sr_q = (sr >> SR_Q) & 1;
+ env->sr_t = (sr >> SR_T) & 1;
+ env->sr = sr & ~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T));
+}
+
+static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
+ | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
+ | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
+ | (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
+ | (env->sr & (1u << SR_FD)) /* Bit 15 */
+ | (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 4 */
+}
+
+#endif /* SH4_CPU_H */
diff --git a/target/sh4/gdbstub.c b/target/sh4/gdbstub.c
new file mode 100644
index 0000000000..13bea00d7d
--- /dev/null
+++ b/target/sh4/gdbstub.c
@@ -0,0 +1,147 @@
+/*
+ * SuperH gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+/* Hint: Use "set architecture sh4" in GDB to see fpu registers */
+/* FIXME: We should use XML for this. */
+
+int superh_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ switch (n) {
+ case 0 ... 7:
+ if ((env->sr & (1u << SR_MD)) && (env->sr & (1u << SR_RB))) {
+ return gdb_get_regl(mem_buf, env->gregs[n + 16]);
+ } else {
+ return gdb_get_regl(mem_buf, env->gregs[n]);
+ }
+ case 8 ... 15:
+ return gdb_get_regl(mem_buf, env->gregs[n]);
+ case 16:
+ return gdb_get_regl(mem_buf, env->pc);
+ case 17:
+ return gdb_get_regl(mem_buf, env->pr);
+ case 18:
+ return gdb_get_regl(mem_buf, env->gbr);
+ case 19:
+ return gdb_get_regl(mem_buf, env->vbr);
+ case 20:
+ return gdb_get_regl(mem_buf, env->mach);
+ case 21:
+ return gdb_get_regl(mem_buf, env->macl);
+ case 22:
+ return gdb_get_regl(mem_buf, cpu_read_sr(env));
+ case 23:
+ return gdb_get_regl(mem_buf, env->fpul);
+ case 24:
+ return gdb_get_regl(mem_buf, env->fpscr);
+ case 25 ... 40:
+ if (env->fpscr & FPSCR_FR) {
+ stfl_p(mem_buf, env->fregs[n - 9]);
+ } else {
+ stfl_p(mem_buf, env->fregs[n - 25]);
+ }
+ return 4;
+ case 41:
+ return gdb_get_regl(mem_buf, env->ssr);
+ case 42:
+ return gdb_get_regl(mem_buf, env->spc);
+ case 43 ... 50:
+ return gdb_get_regl(mem_buf, env->gregs[n - 43]);
+ case 51 ... 58:
+ return gdb_get_regl(mem_buf, env->gregs[n - (51 - 16)]);
+ }
+
+ return 0;
+}
+
+int superh_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ switch (n) {
+ case 0 ... 7:
+ if ((env->sr & (1u << SR_MD)) && (env->sr & (1u << SR_RB))) {
+ env->gregs[n + 16] = ldl_p(mem_buf);
+ } else {
+ env->gregs[n] = ldl_p(mem_buf);
+ }
+ break;
+ case 8 ... 15:
+ env->gregs[n] = ldl_p(mem_buf);
+ break;
+ case 16:
+ env->pc = ldl_p(mem_buf);
+ break;
+ case 17:
+ env->pr = ldl_p(mem_buf);
+ break;
+ case 18:
+ env->gbr = ldl_p(mem_buf);
+ break;
+ case 19:
+ env->vbr = ldl_p(mem_buf);
+ break;
+ case 20:
+ env->mach = ldl_p(mem_buf);
+ break;
+ case 21:
+ env->macl = ldl_p(mem_buf);
+ break;
+ case 22:
+ cpu_write_sr(env, ldl_p(mem_buf));
+ break;
+ case 23:
+ env->fpul = ldl_p(mem_buf);
+ break;
+ case 24:
+ env->fpscr = ldl_p(mem_buf);
+ break;
+ case 25 ... 40:
+ if (env->fpscr & FPSCR_FR) {
+ env->fregs[n - 9] = ldfl_p(mem_buf);
+ } else {
+ env->fregs[n - 25] = ldfl_p(mem_buf);
+ }
+ break;
+ case 41:
+ env->ssr = ldl_p(mem_buf);
+ break;
+ case 42:
+ env->spc = ldl_p(mem_buf);
+ break;
+ case 43 ... 50:
+ env->gregs[n - 43] = ldl_p(mem_buf);
+ break;
+ case 51 ... 58:
+ env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
+ break;
+ default:
+ return 0;
+ }
+
+ return 4;
+}
diff --git a/target/sh4/helper.c b/target/sh4/helper.c
new file mode 100644
index 0000000000..a33ac697c5
--- /dev/null
+++ b/target/sh4/helper.c
@@ -0,0 +1,872 @@
+/*
+ * SH4 emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/log.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/sh4/sh_intc.h"
+#endif
+
+#if defined(CONFIG_USER_ONLY)
+
+void superh_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+
+ env->tea = address;
+ cs->exception_index = -1;
+ switch (rw) {
+ case 0:
+ cs->exception_index = 0x0a0;
+ break;
+ case 1:
+ cs->exception_index = 0x0c0;
+ break;
+ case 2:
+ cs->exception_index = 0x0a0;
+ break;
+ }
+ return 1;
+}
+
+int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
+{
+ /* For user mode, only U0 area is cacheable. */
+ return !(addr & 0x80000000);
+}
+
+#else /* !CONFIG_USER_ONLY */
+
+#define MMU_OK 0
+#define MMU_ITLB_MISS (-1)
+#define MMU_ITLB_MULTIPLE (-2)
+#define MMU_ITLB_VIOLATION (-3)
+#define MMU_DTLB_MISS_READ (-4)
+#define MMU_DTLB_MISS_WRITE (-5)
+#define MMU_DTLB_INITIAL_WRITE (-6)
+#define MMU_DTLB_VIOLATION_READ (-7)
+#define MMU_DTLB_VIOLATION_WRITE (-8)
+#define MMU_DTLB_MULTIPLE (-9)
+#define MMU_DTLB_MISS (-10)
+#define MMU_IADDR_ERROR (-11)
+#define MMU_DADDR_ERROR_READ (-12)
+#define MMU_DADDR_ERROR_WRITE (-13)
+
+void superh_cpu_do_interrupt(CPUState *cs)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD;
+ int do_exp, irq_vector = cs->exception_index;
+
+ /* prioritize exceptions over interrupts */
+
+ do_exp = cs->exception_index != -1;
+ do_irq = do_irq && (cs->exception_index == -1);
+
+ if (env->sr & (1u << SR_BL)) {
+ if (do_exp && cs->exception_index != 0x1e0) {
+ cs->exception_index = 0x000; /* masked exception -> reset */
+ }
+ if (do_irq && !env->in_sleep) {
+ return; /* masked */
+ }
+ }
+ env->in_sleep = 0;
+
+ if (do_irq) {
+ irq_vector = sh_intc_get_pending_vector(env->intc_handle,
+ (env->sr >> 4) & 0xf);
+ if (irq_vector == -1) {
+ return; /* masked */
+ }
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ const char *expname;
+ switch (cs->exception_index) {
+ case 0x0e0:
+ expname = "addr_error";
+ break;
+ case 0x040:
+ expname = "tlb_miss";
+ break;
+ case 0x0a0:
+ expname = "tlb_violation";
+ break;
+ case 0x180:
+ expname = "illegal_instruction";
+ break;
+ case 0x1a0:
+ expname = "slot_illegal_instruction";
+ break;
+ case 0x800:
+ expname = "fpu_disable";
+ break;
+ case 0x820:
+ expname = "slot_fpu";
+ break;
+ case 0x100:
+ expname = "data_write";
+ break;
+ case 0x060:
+ expname = "dtlb_miss_write";
+ break;
+ case 0x0c0:
+ expname = "dtlb_violation_write";
+ break;
+ case 0x120:
+ expname = "fpu_exception";
+ break;
+ case 0x080:
+ expname = "initial_page_write";
+ break;
+ case 0x160:
+ expname = "trapa";
+ break;
+ default:
+ expname = do_irq ? "interrupt" : "???";
+ break;
+ }
+ qemu_log("exception 0x%03x [%s] raised\n",
+ irq_vector, expname);
+ log_cpu_state(cs, 0);
+ }
+
+ env->ssr = cpu_read_sr(env);
+ env->spc = env->pc;
+ env->sgr = env->gregs[15];
+ env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
+
+ if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+ /* Branch instruction should be executed again before delay slot. */
+ env->spc -= 2;
+ /* Clear flags for exception/interrupt routine. */
+ env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL | DELAY_SLOT_TRUE);
+ }
+ if (env->flags & DELAY_SLOT_CLEARME)
+ env->flags = 0;
+
+ if (do_exp) {
+ env->expevt = cs->exception_index;
+ switch (cs->exception_index) {
+ case 0x000:
+ case 0x020:
+ case 0x140:
+ env->sr &= ~(1u << SR_FD);
+ env->sr |= 0xf << 4; /* IMASK */
+ env->pc = 0xa0000000;
+ break;
+ case 0x040:
+ case 0x060:
+ env->pc = env->vbr + 0x400;
+ break;
+ case 0x160:
+ env->spc += 2; /* special case for TRAPA */
+ /* fall through */
+ default:
+ env->pc = env->vbr + 0x100;
+ break;
+ }
+ return;
+ }
+
+ if (do_irq) {
+ env->intevt = irq_vector;
+ env->pc = env->vbr + 0x600;
+ return;
+ }
+}
+
+static void update_itlb_use(CPUSH4State * env, int itlbnb)
+{
+ uint8_t or_mask = 0, and_mask = (uint8_t) - 1;
+
+ switch (itlbnb) {
+ case 0:
+ and_mask = 0x1f;
+ break;
+ case 1:
+ and_mask = 0xe7;
+ or_mask = 0x80;
+ break;
+ case 2:
+ and_mask = 0xfb;
+ or_mask = 0x50;
+ break;
+ case 3:
+ or_mask = 0x2c;
+ break;
+ }
+
+ env->mmucr &= (and_mask << 24) | 0x00ffffff;
+ env->mmucr |= (or_mask << 24);
+}
+
+static int itlb_replacement(CPUSH4State * env)
+{
+ SuperHCPU *cpu = sh_env_get_cpu(env);
+
+ if ((env->mmucr & 0xe0000000) == 0xe0000000) {
+ return 0;
+ }
+ if ((env->mmucr & 0x98000000) == 0x18000000) {
+ return 1;
+ }
+ if ((env->mmucr & 0x54000000) == 0x04000000) {
+ return 2;
+ }
+ if ((env->mmucr & 0x2c000000) == 0x00000000) {
+ return 3;
+ }
+ cpu_abort(CPU(cpu), "Unhandled itlb_replacement");
+}
+
+/* Find the corresponding entry in the right TLB
+ Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE
+*/
+static int find_tlb_entry(CPUSH4State * env, target_ulong address,
+ tlb_t * entries, uint8_t nbtlb, int use_asid)
+{
+ int match = MMU_DTLB_MISS;
+ uint32_t start, end;
+ uint8_t asid;
+ int i;
+
+ asid = env->pteh & 0xff;
+
+ for (i = 0; i < nbtlb; i++) {
+ if (!entries[i].v)
+ continue; /* Invalid entry */
+ if (!entries[i].sh && use_asid && entries[i].asid != asid)
+ continue; /* Bad ASID */
+ start = (entries[i].vpn << 10) & ~(entries[i].size - 1);
+ end = start + entries[i].size - 1;
+ if (address >= start && address <= end) { /* Match */
+ if (match != MMU_DTLB_MISS)
+ return MMU_DTLB_MULTIPLE; /* Multiple match */
+ match = i;
+ }
+ }
+ return match;
+}
+
+static void increment_urc(CPUSH4State * env)
+{
+ uint8_t urb, urc;
+
+ /* Increment URC */
+ urb = ((env->mmucr) >> 18) & 0x3f;
+ urc = ((env->mmucr) >> 10) & 0x3f;
+ urc++;
+ if ((urb > 0 && urc > urb) || urc > (UTLB_SIZE - 1))
+ urc = 0;
+ env->mmucr = (env->mmucr & 0xffff03ff) | (urc << 10);
+}
+
+/* Copy and utlb entry into itlb
+ Return entry
+*/
+static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb)
+{
+ int itlb;
+
+ tlb_t * ientry;
+ itlb = itlb_replacement(env);
+ ientry = &env->itlb[itlb];
+ if (ientry->v) {
+ tlb_flush_page(CPU(sh_env_get_cpu(env)), ientry->vpn << 10);
+ }
+ *ientry = env->utlb[utlb];
+ update_itlb_use(env, itlb);
+ return itlb;
+}
+
+/* Find itlb entry
+ Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE
+*/
+static int find_itlb_entry(CPUSH4State * env, target_ulong address,
+ int use_asid)
+{
+ int e;
+
+ e = find_tlb_entry(env, address, env->itlb, ITLB_SIZE, use_asid);
+ if (e == MMU_DTLB_MULTIPLE) {
+ e = MMU_ITLB_MULTIPLE;
+ } else if (e == MMU_DTLB_MISS) {
+ e = MMU_ITLB_MISS;
+ } else if (e >= 0) {
+ update_itlb_use(env, e);
+ }
+ return e;
+}
+
+/* Find utlb entry
+ Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */
+static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid)
+{
+ /* per utlb access */
+ increment_urc(env);
+
+ /* Return entry */
+ return find_tlb_entry(env, address, env->utlb, UTLB_SIZE, use_asid);
+}
+
+/* Match address against MMU
+ Return MMU_OK, MMU_DTLB_MISS_READ, MMU_DTLB_MISS_WRITE,
+ MMU_DTLB_INITIAL_WRITE, MMU_DTLB_VIOLATION_READ,
+ MMU_DTLB_VIOLATION_WRITE, MMU_ITLB_MISS,
+ MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION,
+ MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE.
+*/
+static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
+ int *prot, target_ulong address,
+ int rw, int access_type)
+{
+ int use_asid, n;
+ tlb_t *matching = NULL;
+
+ use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
+
+ if (rw == 2) {
+ n = find_itlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ matching = &env->itlb[n];
+ if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
+ n = MMU_ITLB_VIOLATION;
+ } else {
+ *prot = PAGE_EXEC;
+ }
+ } else {
+ n = find_utlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ n = copy_utlb_entry_itlb(env, n);
+ matching = &env->itlb[n];
+ if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
+ n = MMU_ITLB_VIOLATION;
+ } else {
+ *prot = PAGE_READ | PAGE_EXEC;
+ if ((matching->pr & 1) && matching->d) {
+ *prot |= PAGE_WRITE;
+ }
+ }
+ } else if (n == MMU_DTLB_MULTIPLE) {
+ n = MMU_ITLB_MULTIPLE;
+ } else if (n == MMU_DTLB_MISS) {
+ n = MMU_ITLB_MISS;
+ }
+ }
+ } else {
+ n = find_utlb_entry(env, address, use_asid);
+ if (n >= 0) {
+ matching = &env->utlb[n];
+ if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
+ n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE :
+ MMU_DTLB_VIOLATION_READ;
+ } else if ((rw == 1) && !(matching->pr & 1)) {
+ n = MMU_DTLB_VIOLATION_WRITE;
+ } else if ((rw == 1) && !matching->d) {
+ n = MMU_DTLB_INITIAL_WRITE;
+ } else {
+ *prot = PAGE_READ;
+ if ((matching->pr & 1) && matching->d) {
+ *prot |= PAGE_WRITE;
+ }
+ }
+ } else if (n == MMU_DTLB_MISS) {
+ n = (rw == 1) ? MMU_DTLB_MISS_WRITE :
+ MMU_DTLB_MISS_READ;
+ }
+ }
+ if (n >= 0) {
+ n = MMU_OK;
+ *physical = ((matching->ppn << 10) & ~(matching->size - 1)) |
+ (address & (matching->size - 1));
+ }
+ return n;
+}
+
+static int get_physical_address(CPUSH4State * env, target_ulong * physical,
+ int *prot, target_ulong address,
+ int rw, int access_type)
+{
+ /* P1, P2 and P4 areas do not use translation */
+ if ((address >= 0x80000000 && address < 0xc0000000) ||
+ address >= 0xe0000000) {
+ if (!(env->sr & (1u << SR_MD))
+ && (address < 0xe0000000 || address >= 0xe4000000)) {
+ /* Unauthorized access in user mode (only store queues are available) */
+ fprintf(stderr, "Unauthorized access\n");
+ if (rw == 0)
+ return MMU_DADDR_ERROR_READ;
+ else if (rw == 1)
+ return MMU_DADDR_ERROR_WRITE;
+ else
+ return MMU_IADDR_ERROR;
+ }
+ if (address >= 0x80000000 && address < 0xc0000000) {
+ /* Mask upper 3 bits for P1 and P2 areas */
+ *physical = address & 0x1fffffff;
+ } else {
+ *physical = address;
+ }
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
+ }
+
+ /* If MMU is disabled, return the corresponding physical page */
+ if (!(env->mmucr & MMUCR_AT)) {
+ *physical = address & 0x1FFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return MMU_OK;
+ }
+
+ /* We need to resort to the MMU */
+ return get_mmu_address(env, physical, prot, address, rw, access_type);
+}
+
+int superh_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ target_ulong physical;
+ int prot, ret, access_type;
+
+ access_type = ACCESS_INT;
+ ret =
+ get_physical_address(env, &physical, &prot, address, rw,
+ access_type);
+
+ if (ret != MMU_OK) {
+ env->tea = address;
+ if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) {
+ env->pteh = (env->pteh & PTEH_ASID_MASK) |
+ (address & PTEH_VPN_MASK);
+ }
+ switch (ret) {
+ case MMU_ITLB_MISS:
+ case MMU_DTLB_MISS_READ:
+ cs->exception_index = 0x040;
+ break;
+ case MMU_DTLB_MULTIPLE:
+ case MMU_ITLB_MULTIPLE:
+ cs->exception_index = 0x140;
+ break;
+ case MMU_ITLB_VIOLATION:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_MISS_WRITE:
+ cs->exception_index = 0x060;
+ break;
+ case MMU_DTLB_INITIAL_WRITE:
+ cs->exception_index = 0x080;
+ break;
+ case MMU_DTLB_VIOLATION_READ:
+ cs->exception_index = 0x0a0;
+ break;
+ case MMU_DTLB_VIOLATION_WRITE:
+ cs->exception_index = 0x0c0;
+ break;
+ case MMU_IADDR_ERROR:
+ case MMU_DADDR_ERROR_READ:
+ cs->exception_index = 0x0e0;
+ break;
+ case MMU_DADDR_ERROR_WRITE:
+ cs->exception_index = 0x100;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled MMU fault");
+ }
+ return 1;
+ }
+
+ address &= TARGET_PAGE_MASK;
+ physical &= TARGET_PAGE_MASK;
+
+ tlb_set_page(cs, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
+}
+
+hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ target_ulong physical;
+ int prot;
+
+ get_physical_address(&cpu->env, &physical, &prot, addr, 0, 0);
+ return physical;
+}
+
+void cpu_load_tlb(CPUSH4State * env)
+{
+ SuperHCPU *cpu = sh_env_get_cpu(env);
+ int n = cpu_mmucr_urc(env->mmucr);
+ tlb_t * entry = &env->utlb[n];
+
+ if (entry->v) {
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(CPU(cpu), address);
+ }
+
+ /* Take values into cpu status from registers. */
+ entry->asid = (uint8_t)cpu_pteh_asid(env->pteh);
+ entry->vpn = cpu_pteh_vpn(env->pteh);
+ entry->v = (uint8_t)cpu_ptel_v(env->ptel);
+ entry->ppn = cpu_ptel_ppn(env->ptel);
+ entry->sz = (uint8_t)cpu_ptel_sz(env->ptel);
+ switch (entry->sz) {
+ case 0: /* 00 */
+ entry->size = 1024; /* 1K */
+ break;
+ case 1: /* 01 */
+ entry->size = 1024 * 4; /* 4K */
+ break;
+ case 2: /* 10 */
+ entry->size = 1024 * 64; /* 64K */
+ break;
+ case 3: /* 11 */
+ entry->size = 1024 * 1024; /* 1M */
+ break;
+ default:
+ cpu_abort(CPU(cpu), "Unhandled load_tlb");
+ break;
+ }
+ entry->sh = (uint8_t)cpu_ptel_sh(env->ptel);
+ entry->c = (uint8_t)cpu_ptel_c(env->ptel);
+ entry->pr = (uint8_t)cpu_ptel_pr(env->ptel);
+ entry->d = (uint8_t)cpu_ptel_d(env->ptel);
+ entry->wt = (uint8_t)cpu_ptel_wt(env->ptel);
+ entry->sa = (uint8_t)cpu_ptea_sa(env->ptea);
+ entry->tc = (uint8_t)cpu_ptea_tc(env->ptea);
+}
+
+ void cpu_sh4_invalidate_tlb(CPUSH4State *s)
+{
+ int i;
+
+ /* UTLB */
+ for (i = 0; i < UTLB_SIZE; i++) {
+ tlb_t * entry = &s->utlb[i];
+ entry->v = 0;
+ }
+ /* ITLB */
+ for (i = 0; i < ITLB_SIZE; i++) {
+ tlb_t * entry = &s->itlb[i];
+ entry->v = 0;
+ }
+
+ tlb_flush(CPU(sh_env_get_cpu(s)), 1);
+}
+
+uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s,
+ hwaddr addr)
+{
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+
+ return (entry->vpn << 10) |
+ (entry->v << 8) |
+ (entry->asid);
+}
+
+void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
+ uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
+ uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
+
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+ if (entry->v) {
+ /* Overwriting valid entry in itlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
+ }
+ entry->asid = asid;
+ entry->vpn = vpn;
+ entry->v = v;
+}
+
+uint32_t cpu_sh4_read_mmaped_itlb_data(CPUSH4State *s,
+ hwaddr addr)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+
+ if (array == 0) {
+ /* ITLB Data Array 1 */
+ return (entry->ppn << 10) |
+ (entry->v << 8) |
+ (entry->pr << 5) |
+ ((entry->sz & 1) << 6) |
+ ((entry->sz & 2) << 4) |
+ (entry->c << 3) |
+ (entry->sh << 1);
+ } else {
+ /* ITLB Data Array 2 */
+ return (entry->tc << 1) |
+ (entry->sa);
+ }
+}
+
+void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00000300) >> 8;
+ tlb_t * entry = &s->itlb[index];
+
+ if (array == 0) {
+ /* ITLB Data Array 1 */
+ if (entry->v) {
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
+ }
+ entry->ppn = (mem_value & 0x1ffffc00) >> 10;
+ entry->v = (mem_value & 0x00000100) >> 8;
+ entry->sz = (mem_value & 0x00000080) >> 6 |
+ (mem_value & 0x00000010) >> 4;
+ entry->pr = (mem_value & 0x00000040) >> 5;
+ entry->c = (mem_value & 0x00000008) >> 3;
+ entry->sh = (mem_value & 0x00000002) >> 1;
+ } else {
+ /* ITLB Data Array 2 */
+ entry->tc = (mem_value & 0x00000008) >> 3;
+ entry->sa = (mem_value & 0x00000007);
+ }
+}
+
+uint32_t cpu_sh4_read_mmaped_utlb_addr(CPUSH4State *s,
+ hwaddr addr)
+{
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+
+ increment_urc(s); /* per utlb access */
+
+ return (entry->vpn << 10) |
+ (entry->v << 8) |
+ (entry->asid);
+}
+
+void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ int associate = addr & 0x0000080;
+ uint32_t vpn = (mem_value & 0xfffffc00) >> 10;
+ uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9);
+ uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
+ uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
+ int use_asid = !(s->mmucr & MMUCR_SV) || !(s->sr & (1u << SR_MD));
+
+ if (associate) {
+ int i;
+ tlb_t * utlb_match_entry = NULL;
+ int needs_tlb_flush = 0;
+
+ /* search UTLB */
+ for (i = 0; i < UTLB_SIZE; i++) {
+ tlb_t * entry = &s->utlb[i];
+ if (!entry->v)
+ continue;
+
+ if (entry->vpn == vpn
+ && (!use_asid || entry->asid == asid || entry->sh)) {
+ if (utlb_match_entry) {
+ CPUState *cs = CPU(sh_env_get_cpu(s));
+
+ /* Multiple TLB Exception */
+ cs->exception_index = 0x140;
+ s->tea = addr;
+ break;
+ }
+ if (entry->v && !v)
+ needs_tlb_flush = 1;
+ entry->v = v;
+ entry->d = d;
+ utlb_match_entry = entry;
+ }
+ increment_urc(s); /* per utlb access */
+ }
+
+ /* search ITLB */
+ for (i = 0; i < ITLB_SIZE; i++) {
+ tlb_t * entry = &s->itlb[i];
+ if (entry->vpn == vpn
+ && (!use_asid || entry->asid == asid || entry->sh)) {
+ if (entry->v && !v)
+ needs_tlb_flush = 1;
+ if (utlb_match_entry)
+ *entry = *utlb_match_entry;
+ else
+ entry->v = v;
+ break;
+ }
+ }
+
+ if (needs_tlb_flush) {
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10);
+ }
+
+ } else {
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+ if (entry->v) {
+ CPUState *cs = CPU(sh_env_get_cpu(s));
+
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(cs, address);
+ }
+ entry->asid = asid;
+ entry->vpn = vpn;
+ entry->d = d;
+ entry->v = v;
+ increment_urc(s);
+ }
+}
+
+uint32_t cpu_sh4_read_mmaped_utlb_data(CPUSH4State *s,
+ hwaddr addr)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+
+ increment_urc(s); /* per utlb access */
+
+ if (array == 0) {
+ /* ITLB Data Array 1 */
+ return (entry->ppn << 10) |
+ (entry->v << 8) |
+ (entry->pr << 5) |
+ ((entry->sz & 1) << 6) |
+ ((entry->sz & 2) << 4) |
+ (entry->c << 3) |
+ (entry->d << 2) |
+ (entry->sh << 1) |
+ (entry->wt);
+ } else {
+ /* ITLB Data Array 2 */
+ return (entry->tc << 1) |
+ (entry->sa);
+ }
+}
+
+void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
+ uint32_t mem_value)
+{
+ int array = (addr & 0x00800000) >> 23;
+ int index = (addr & 0x00003f00) >> 8;
+ tlb_t * entry = &s->utlb[index];
+
+ increment_urc(s); /* per utlb access */
+
+ if (array == 0) {
+ /* UTLB Data Array 1 */
+ if (entry->v) {
+ /* Overwriting valid entry in utlb. */
+ target_ulong address = entry->vpn << 10;
+ tlb_flush_page(CPU(sh_env_get_cpu(s)), address);
+ }
+ entry->ppn = (mem_value & 0x1ffffc00) >> 10;
+ entry->v = (mem_value & 0x00000100) >> 8;
+ entry->sz = (mem_value & 0x00000080) >> 6 |
+ (mem_value & 0x00000010) >> 4;
+ entry->pr = (mem_value & 0x00000060) >> 5;
+ entry->c = (mem_value & 0x00000008) >> 3;
+ entry->d = (mem_value & 0x00000004) >> 2;
+ entry->sh = (mem_value & 0x00000002) >> 1;
+ entry->wt = (mem_value & 0x00000001);
+ } else {
+ /* UTLB Data Array 2 */
+ entry->tc = (mem_value & 0x00000008) >> 3;
+ entry->sa = (mem_value & 0x00000007);
+ }
+}
+
+int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
+{
+ int n;
+ int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
+
+ /* check area */
+ if (env->sr & (1u << SR_MD)) {
+ /* For privileged mode, P2 and P4 area is not cacheable. */
+ if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr)
+ return 0;
+ } else {
+ /* For user mode, only U0 area is cacheable. */
+ if (0x80000000 <= addr)
+ return 0;
+ }
+
+ /*
+ * TODO : Evaluate CCR and check if the cache is on or off.
+ * Now CCR is not in CPUSH4State, but in SH7750State.
+ * When you move the ccr into CPUSH4State, the code will be
+ * as follows.
+ */
+#if 0
+ /* check if operand cache is enabled or not. */
+ if (!(env->ccr & 1))
+ return 0;
+#endif
+
+ /* if MMU is off, no check for TLB. */
+ if (env->mmucr & MMUCR_AT)
+ return 1;
+
+ /* check TLB */
+ n = find_tlb_entry(env, addr, env->itlb, ITLB_SIZE, use_asid);
+ if (n >= 0)
+ return env->itlb[n].c;
+
+ n = find_tlb_entry(env, addr, env->utlb, UTLB_SIZE, use_asid);
+ if (n >= 0)
+ return env->utlb[n].c;
+
+ return 0;
+}
+
+#endif
+
+bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ superh_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
diff --git a/target/sh4/helper.h b/target/sh4/helper.h
new file mode 100644
index 0000000000..dce859caea
--- /dev/null
+++ b/target/sh4/helper.h
@@ -0,0 +1,45 @@
+DEF_HELPER_1(ldtlb, void, env)
+DEF_HELPER_1(raise_illegal_instruction, noreturn, env)
+DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env)
+DEF_HELPER_1(raise_fpu_disable, noreturn, env)
+DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env)
+DEF_HELPER_1(debug, noreturn, env)
+DEF_HELPER_1(sleep, noreturn, env)
+DEF_HELPER_2(trapa, noreturn, env, i32)
+
+DEF_HELPER_3(movcal, void, env, i32, i32)
+DEF_HELPER_1(discard_movcal_backup, void, env)
+DEF_HELPER_2(ocbi, void, env, i32)
+
+DEF_HELPER_3(macl, void, env, i32, i32)
+DEF_HELPER_3(macw, void, env, i32, i32)
+
+DEF_HELPER_2(ld_fpscr, void, env, i32)
+
+DEF_HELPER_FLAGS_1(fabs_FT, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_1(fabs_DT, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_3(fadd_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fadd_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_2(fcnvsd_FT_DT, TCG_CALL_NO_WG, f64, env, f32)
+DEF_HELPER_FLAGS_2(fcnvds_DT_FT, TCG_CALL_NO_WG, f32, env, f64)
+
+DEF_HELPER_3(fcmp_eq_FT, void, env, f32, f32)
+DEF_HELPER_3(fcmp_eq_DT, void, env, f64, f64)
+DEF_HELPER_3(fcmp_gt_FT, void, env, f32, f32)
+DEF_HELPER_3(fcmp_gt_DT, void, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdiv_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdiv_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_2(float_FT, TCG_CALL_NO_WG, f32, env, i32)
+DEF_HELPER_FLAGS_2(float_DT, TCG_CALL_NO_WG, f64, env, i32)
+DEF_HELPER_FLAGS_4(fmac_FT, TCG_CALL_NO_WG, f32, env, f32, f32, f32)
+DEF_HELPER_FLAGS_3(fmul_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmul_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_1(fneg_T, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_3(fsub_FT, TCG_CALL_NO_WG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsub_DT, TCG_CALL_NO_WG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_2(fsqrt_FT, TCG_CALL_NO_WG, f32, env, f32)
+DEF_HELPER_FLAGS_2(fsqrt_DT, TCG_CALL_NO_WG, f64, env, f64)
+DEF_HELPER_FLAGS_2(ftrc_FT, TCG_CALL_NO_WG, i32, env, f32)
+DEF_HELPER_FLAGS_2(ftrc_DT, TCG_CALL_NO_WG, i32, env, f64)
+DEF_HELPER_3(fipr, void, env, i32, i32)
+DEF_HELPER_2(ftrv, void, env, i32)
diff --git a/target/sh4/monitor.c b/target/sh4/monitor.c
new file mode 100644
index 0000000000..426e5d4914
--- /dev/null
+++ b/target/sh4/monitor.c
@@ -0,0 +1,53 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+static void print_tlb(Monitor *mon, int idx, tlb_t *tlb)
+{
+ monitor_printf(mon, " tlb%i:\t"
+ "asid=%hhu vpn=%x\tppn=%x\tsz=%hhu size=%u\t"
+ "v=%hhu shared=%hhu cached=%hhu prot=%hhu "
+ "dirty=%hhu writethrough=%hhu\n",
+ idx,
+ tlb->asid, tlb->vpn, tlb->ppn, tlb->sz, tlb->size,
+ tlb->v, tlb->sh, tlb->c, tlb->pr,
+ tlb->d, tlb->wt);
+}
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ int i;
+
+ monitor_printf (mon, "ITLB:\n");
+ for (i = 0 ; i < ITLB_SIZE ; i++)
+ print_tlb (mon, i, &env->itlb[i]);
+ monitor_printf (mon, "UTLB:\n");
+ for (i = 0 ; i < UTLB_SIZE ; i++)
+ print_tlb (mon, i, &env->utlb[i]);
+}
diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c
new file mode 100644
index 0000000000..684d3f3758
--- /dev/null
+++ b/target/sh4/op_helper.c
@@ -0,0 +1,498 @@
+/*
+ * SH4 emulation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#ifndef CONFIG_USER_ONLY
+
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = superh_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (ret) {
+ /* now we have a real cpu fault */
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+
+#endif
+
+void helper_ldtlb(CPUSH4State *env)
+{
+#ifdef CONFIG_USER_ONLY
+ SuperHCPU *cpu = sh_env_get_cpu(env);
+
+ /* XXXXX */
+ cpu_abort(CPU(cpu), "Unhandled ldtlb");
+#else
+ cpu_load_tlb(env);
+#endif
+}
+
+static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index,
+ uintptr_t retaddr)
+{
+ CPUState *cs = CPU(sh_env_get_cpu(env));
+
+ cs->exception_index = index;
+ if (retaddr) {
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+}
+
+void helper_raise_illegal_instruction(CPUSH4State *env)
+{
+ raise_exception(env, 0x180, 0);
+}
+
+void helper_raise_slot_illegal_instruction(CPUSH4State *env)
+{
+ raise_exception(env, 0x1a0, 0);
+}
+
+void helper_raise_fpu_disable(CPUSH4State *env)
+{
+ raise_exception(env, 0x800, 0);
+}
+
+void helper_raise_slot_fpu_disable(CPUSH4State *env)
+{
+ raise_exception(env, 0x820, 0);
+}
+
+void helper_debug(CPUSH4State *env)
+{
+ raise_exception(env, EXCP_DEBUG, 0);
+}
+
+void helper_sleep(CPUSH4State *env)
+{
+ CPUState *cs = CPU(sh_env_get_cpu(env));
+
+ cs->halted = 1;
+ env->in_sleep = 1;
+ raise_exception(env, EXCP_HLT, 0);
+}
+
+void helper_trapa(CPUSH4State *env, uint32_t tra)
+{
+ env->tra = tra << 2;
+ raise_exception(env, 0x160, 0);
+}
+
+void helper_movcal(CPUSH4State *env, uint32_t address, uint32_t value)
+{
+ if (cpu_sh4_is_cached (env, address))
+ {
+ memory_content *r = g_new(memory_content, 1);
+
+ r->address = address;
+ r->value = value;
+ r->next = NULL;
+
+ *(env->movcal_backup_tail) = r;
+ env->movcal_backup_tail = &(r->next);
+ }
+}
+
+void helper_discard_movcal_backup(CPUSH4State *env)
+{
+ memory_content *current = env->movcal_backup;
+
+ while(current)
+ {
+ memory_content *next = current->next;
+ g_free(current);
+ env->movcal_backup = current = next;
+ if (current == NULL)
+ env->movcal_backup_tail = &(env->movcal_backup);
+ }
+}
+
+void helper_ocbi(CPUSH4State *env, uint32_t address)
+{
+ memory_content **current = &(env->movcal_backup);
+ while (*current)
+ {
+ uint32_t a = (*current)->address;
+ if ((a & ~0x1F) == (address & ~0x1F))
+ {
+ memory_content *next = (*current)->next;
+ cpu_stl_data(env, a, (*current)->value);
+
+ if (next == NULL)
+ {
+ env->movcal_backup_tail = current;
+ }
+
+ g_free(*current);
+ *current = next;
+ break;
+ }
+ }
+}
+
+void helper_macl(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
+{
+ int64_t res;
+
+ res = ((uint64_t) env->mach << 32) | env->macl;
+ res += (int64_t) (int32_t) arg0 *(int64_t) (int32_t) arg1;
+ env->mach = (res >> 32) & 0xffffffff;
+ env->macl = res & 0xffffffff;
+ if (env->sr & (1u << SR_S)) {
+ if (res < 0)
+ env->mach |= 0xffff0000;
+ else
+ env->mach &= 0x00007fff;
+ }
+}
+
+void helper_macw(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
+{
+ int64_t res;
+
+ res = ((uint64_t) env->mach << 32) | env->macl;
+ res += (int64_t) (int16_t) arg0 *(int64_t) (int16_t) arg1;
+ env->mach = (res >> 32) & 0xffffffff;
+ env->macl = res & 0xffffffff;
+ if (env->sr & (1u << SR_S)) {
+ if (res < -0x80000000) {
+ env->mach = 1;
+ env->macl = 0x80000000;
+ } else if (res > 0x000000007fffffff) {
+ env->mach = 1;
+ env->macl = 0x7fffffff;
+ }
+ }
+}
+
+void helper_ld_fpscr(CPUSH4State *env, uint32_t val)
+{
+ env->fpscr = val & FPSCR_MASK;
+ if ((val & FPSCR_RM_MASK) == FPSCR_RM_ZERO) {
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ } else {
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ }
+ set_flush_to_zero((val & FPSCR_DN) != 0, &env->fp_status);
+}
+
+static void update_fpscr(CPUSH4State *env, uintptr_t retaddr)
+{
+ int xcpt, cause, enable;
+
+ xcpt = get_float_exception_flags(&env->fp_status);
+
+ /* Clear the flag entries */
+ env->fpscr &= ~FPSCR_FLAG_MASK;
+
+ if (unlikely(xcpt)) {
+ if (xcpt & float_flag_invalid) {
+ env->fpscr |= FPSCR_FLAG_V;
+ }
+ if (xcpt & float_flag_divbyzero) {
+ env->fpscr |= FPSCR_FLAG_Z;
+ }
+ if (xcpt & float_flag_overflow) {
+ env->fpscr |= FPSCR_FLAG_O;
+ }
+ if (xcpt & float_flag_underflow) {
+ env->fpscr |= FPSCR_FLAG_U;
+ }
+ if (xcpt & float_flag_inexact) {
+ env->fpscr |= FPSCR_FLAG_I;
+ }
+
+ /* Accumulate in cause entries */
+ env->fpscr |= (env->fpscr & FPSCR_FLAG_MASK)
+ << (FPSCR_CAUSE_SHIFT - FPSCR_FLAG_SHIFT);
+
+ /* Generate an exception if enabled */
+ cause = (env->fpscr & FPSCR_CAUSE_MASK) >> FPSCR_CAUSE_SHIFT;
+ enable = (env->fpscr & FPSCR_ENABLE_MASK) >> FPSCR_ENABLE_SHIFT;
+ if (cause & enable) {
+ raise_exception(env, 0x120, retaddr);
+ }
+ }
+}
+
+float32 helper_fabs_FT(float32 t0)
+{
+ return float32_abs(t0);
+}
+
+float64 helper_fabs_DT(float64 t0)
+{
+ return float64_abs(t0);
+}
+
+float32 helper_fadd_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_add(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fadd_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_add(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+void helper_fcmp_eq_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float32_compare(t0, t1, &env->fp_status);
+ if (unlikely(relation == float_relation_unordered)) {
+ update_fpscr(env, GETPC());
+ } else {
+ env->sr_t = (relation == float_relation_equal);
+ }
+}
+
+void helper_fcmp_eq_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float64_compare(t0, t1, &env->fp_status);
+ if (unlikely(relation == float_relation_unordered)) {
+ update_fpscr(env, GETPC());
+ } else {
+ env->sr_t = (relation == float_relation_equal);
+ }
+}
+
+void helper_fcmp_gt_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float32_compare(t0, t1, &env->fp_status);
+ if (unlikely(relation == float_relation_unordered)) {
+ update_fpscr(env, GETPC());
+ } else {
+ env->sr_t = (relation == float_relation_greater);
+ }
+}
+
+void helper_fcmp_gt_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ int relation;
+
+ set_float_exception_flags(0, &env->fp_status);
+ relation = float64_compare(t0, t1, &env->fp_status);
+ if (unlikely(relation == float_relation_unordered)) {
+ update_fpscr(env, GETPC());
+ } else {
+ env->sr_t = (relation == float_relation_greater);
+ }
+}
+
+float64 helper_fcnvsd_FT_DT(CPUSH4State *env, float32 t0)
+{
+ float64 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float32_to_float64(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float32 helper_fcnvds_DT_FT(CPUSH4State *env, float64 t0)
+{
+ float32 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float64_to_float32(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float32 helper_fdiv_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_div(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fdiv_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_div(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_float_FT(CPUSH4State *env, uint32_t t0)
+{
+ float32 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = int32_to_float32(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float64 helper_float_DT(CPUSH4State *env, uint32_t t0)
+{
+ float64 ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = int32_to_float64(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+float32 helper_fmac_FT(CPUSH4State *env, float32 t0, float32 t1, float32 t2)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_muladd(t0, t1, t2, 0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fmul_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_mul(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fmul_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_mul(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fneg_T(float32 t0)
+{
+ return float32_chs(t0);
+}
+
+float32 helper_fsqrt_FT(CPUSH4State *env, float32 t0)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_sqrt(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fsqrt_DT(CPUSH4State *env, float64 t0)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_sqrt(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float32 helper_fsub_FT(CPUSH4State *env, float32 t0, float32 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float32_sub(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+float64 helper_fsub_DT(CPUSH4State *env, float64 t0, float64 t1)
+{
+ set_float_exception_flags(0, &env->fp_status);
+ t0 = float64_sub(t0, t1, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return t0;
+}
+
+uint32_t helper_ftrc_FT(CPUSH4State *env, float32 t0)
+{
+ uint32_t ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float32_to_int32_round_to_zero(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+uint32_t helper_ftrc_DT(CPUSH4State *env, float64 t0)
+{
+ uint32_t ret;
+ set_float_exception_flags(0, &env->fp_status);
+ ret = float64_to_int32_round_to_zero(t0, &env->fp_status);
+ update_fpscr(env, GETPC());
+ return ret;
+}
+
+void helper_fipr(CPUSH4State *env, uint32_t m, uint32_t n)
+{
+ int bank, i;
+ float32 r, p;
+
+ bank = (env->sr & FPSCR_FR) ? 16 : 0;
+ r = float32_zero;
+ set_float_exception_flags(0, &env->fp_status);
+
+ for (i = 0 ; i < 4 ; i++) {
+ p = float32_mul(env->fregs[bank + m + i],
+ env->fregs[bank + n + i],
+ &env->fp_status);
+ r = float32_add(r, p, &env->fp_status);
+ }
+ update_fpscr(env, GETPC());
+
+ env->fregs[bank + n + 3] = r;
+}
+
+void helper_ftrv(CPUSH4State *env, uint32_t n)
+{
+ int bank_matrix, bank_vector;
+ int i, j;
+ float32 r[4];
+ float32 p;
+
+ bank_matrix = (env->sr & FPSCR_FR) ? 0 : 16;
+ bank_vector = (env->sr & FPSCR_FR) ? 16 : 0;
+ set_float_exception_flags(0, &env->fp_status);
+ for (i = 0 ; i < 4 ; i++) {
+ r[i] = float32_zero;
+ for (j = 0 ; j < 4 ; j++) {
+ p = float32_mul(env->fregs[bank_matrix + 4 * j + i],
+ env->fregs[bank_vector + j],
+ &env->fp_status);
+ r[i] = float32_add(r[i], p, &env->fp_status);
+ }
+ }
+ update_fpscr(env, GETPC());
+
+ for (i = 0 ; i < 4 ; i++) {
+ env->fregs[bank_vector + i] = r[i];
+ }
+}
diff --git a/target/sh4/translate.c b/target/sh4/translate.c
new file mode 100644
index 0000000000..c89a14733f
--- /dev/null
+++ b/target/sh4/translate.c
@@ -0,0 +1,1944 @@
+/*
+ * SH4 translation
+ *
+ * Copyright (c) 2005 Samuel Tardieu
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DEBUG_DISAS
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+typedef struct DisasContext {
+ struct TranslationBlock *tb;
+ target_ulong pc;
+ uint16_t opcode;
+ uint32_t flags;
+ int bstate;
+ int memidx;
+ uint32_t delayed_pc;
+ int singlestep_enabled;
+ uint32_t features;
+ int has_movcal;
+} DisasContext;
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(ctx) 1
+#else
+#define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
+#endif
+
+enum {
+ BS_NONE = 0, /* We go out of the TB without reaching a branch or an
+ * exception condition
+ */
+ BS_STOP = 1, /* We want to stop translation for any reason */
+ BS_BRANCH = 2, /* We reached a branch condition */
+ BS_EXCP = 3, /* We reached an exception condition */
+};
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv cpu_gregs[24];
+static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
+static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
+static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
+static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
+static TCGv cpu_fregs[32];
+
+/* internal register indexes */
+static TCGv cpu_flags, cpu_delayed_pc;
+
+#include "exec/gen-icount.h"
+
+void sh4_translate_init(void)
+{
+ int i;
+ static int done_init = 0;
+ static const char * const gregnames[24] = {
+ "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
+ "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
+ "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
+ "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
+ "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
+ };
+ static const char * const fregnames[32] = {
+ "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
+ "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
+ "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
+ "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
+ "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
+ "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
+ "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
+ "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
+ };
+
+ if (done_init)
+ return;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ for (i = 0; i < 24; i++)
+ cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, gregs[i]),
+ gregnames[i]);
+
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, pc), "PC");
+ cpu_sr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr), "SR");
+ cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr_m), "SR_M");
+ cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr_q), "SR_Q");
+ cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sr_t), "SR_T");
+ cpu_ssr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, ssr), "SSR");
+ cpu_spc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, spc), "SPC");
+ cpu_gbr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, gbr), "GBR");
+ cpu_vbr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, vbr), "VBR");
+ cpu_sgr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, sgr), "SGR");
+ cpu_dbr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, dbr), "DBR");
+ cpu_mach = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, mach), "MACH");
+ cpu_macl = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, macl), "MACL");
+ cpu_pr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, pr), "PR");
+ cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, fpscr), "FPSCR");
+ cpu_fpul = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, fpul), "FPUL");
+
+ cpu_flags = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, flags), "_flags_");
+ cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, delayed_pc),
+ "_delayed_pc_");
+ cpu_ldst = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, ldst), "_ldst_");
+
+ for (i = 0; i < 32; i++)
+ cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUSH4State, fregs[i]),
+ fregnames[i]);
+
+ done_init = 1;
+}
+
+void superh_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ SuperHCPU *cpu = SUPERH_CPU(cs);
+ CPUSH4State *env = &cpu->env;
+ int i;
+ cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
+ env->pc, cpu_read_sr(env), env->pr, env->fpscr);
+ cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
+ env->spc, env->ssr, env->gbr, env->vbr);
+ cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
+ env->sgr, env->dbr, env->delayed_pc, env->fpul);
+ for (i = 0; i < 24; i += 4) {
+ cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
+ i, env->gregs[i], i + 1, env->gregs[i + 1],
+ i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
+ }
+ if (env->flags & DELAY_SLOT) {
+ cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
+ } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
+ cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
+ env->delayed_pc);
+ }
+}
+
+static void gen_read_sr(TCGv dst)
+{
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
+ tcg_gen_or_i32(dst, dst, t0);
+ tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
+ tcg_gen_or_i32(dst, dst, t0);
+ tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
+ tcg_gen_or_i32(dst, cpu_sr, t0);
+ tcg_temp_free_i32(t0);
+}
+
+static void gen_write_sr(TCGv src)
+{
+ tcg_gen_andi_i32(cpu_sr, src,
+ ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
+ tcg_gen_shri_i32(cpu_sr_q, src, SR_Q);
+ tcg_gen_andi_i32(cpu_sr_q, cpu_sr_q, 1);
+ tcg_gen_shri_i32(cpu_sr_m, src, SR_M);
+ tcg_gen_andi_i32(cpu_sr_m, cpu_sr_m, 1);
+ tcg_gen_shri_i32(cpu_sr_t, src, SR_T);
+ tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
+ /* Use a direct jump if in same page and singlestep not enabled */
+ tcg_gen_goto_tb(n);
+ tcg_gen_movi_i32(cpu_pc, dest);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+ } else {
+ tcg_gen_movi_i32(cpu_pc, dest);
+ if (ctx->singlestep_enabled)
+ gen_helper_debug(cpu_env);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void gen_jump(DisasContext * ctx)
+{
+ if (ctx->delayed_pc == (uint32_t) - 1) {
+ /* Target is not statically known, it comes necessarily from a
+ delayed jump as immediate jump are conditinal jumps */
+ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
+ if (ctx->singlestep_enabled)
+ gen_helper_debug(cpu_env);
+ tcg_gen_exit_tb(0);
+ } else {
+ gen_goto_tb(ctx, 0, ctx->delayed_pc);
+ }
+}
+
+static inline void gen_branch_slot(uint32_t delayed_pc, int t)
+{
+ TCGLabel *label = gen_new_label();
+ tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
+ tcg_gen_brcondi_i32(t ? TCG_COND_EQ : TCG_COND_NE, cpu_sr_t, 0, label);
+ tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
+ gen_set_label(label);
+}
+
+/* Immediate conditional jump (bt or bf) */
+static void gen_conditional_jump(DisasContext * ctx,
+ target_ulong ift, target_ulong ifnott)
+{
+ TCGLabel *l1 = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_sr_t, 0, l1);
+ gen_goto_tb(ctx, 0, ifnott);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 1, ift);
+}
+
+/* Delayed conditional jump (bt or bf) */
+static void gen_delayed_conditional_jump(DisasContext * ctx)
+{
+ TCGLabel *l1;
+ TCGv ds;
+
+ l1 = gen_new_label();
+ ds = tcg_temp_new();
+ tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
+ gen_goto_tb(ctx, 1, ctx->pc + 2);
+ gen_set_label(l1);
+ tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
+ gen_jump(ctx);
+}
+
+static inline void gen_store_flags(uint32_t flags)
+{
+ tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
+ tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
+}
+
+static inline void gen_load_fpr64(TCGv_i64 t, int reg)
+{
+ tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
+}
+
+static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(tmp, t);
+ tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
+ tcg_gen_shri_i64(t, t, 32);
+ tcg_gen_extrl_i64_i32(tmp, t);
+ tcg_gen_mov_i32(cpu_fregs[reg], tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+#define B3_0 (ctx->opcode & 0xf)
+#define B6_4 ((ctx->opcode >> 4) & 0x7)
+#define B7_4 ((ctx->opcode >> 4) & 0xf)
+#define B7_0 (ctx->opcode & 0xff)
+#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
+#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
+ (ctx->opcode & 0xfff))
+#define B11_8 ((ctx->opcode >> 8) & 0xf)
+#define B15_12 ((ctx->opcode >> 12) & 0xf)
+
+#define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
+ && (ctx->flags & (1u << SR_RB))\
+ ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
+
+#define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
+ || !(ctx->flags & (1u << SR_RB)))\
+ ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
+
+#define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
+#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
+#define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
+#define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
+
+#define CHECK_NOT_DELAY_SLOT \
+ if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
+ { \
+ tcg_gen_movi_i32(cpu_pc, ctx->pc); \
+ gen_helper_raise_slot_illegal_instruction(cpu_env); \
+ ctx->bstate = BS_BRANCH; \
+ return; \
+ }
+
+#define CHECK_PRIVILEGED \
+ if (IS_USER(ctx)) { \
+ tcg_gen_movi_i32(cpu_pc, ctx->pc); \
+ if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+ gen_helper_raise_slot_illegal_instruction(cpu_env); \
+ } else { \
+ gen_helper_raise_illegal_instruction(cpu_env); \
+ } \
+ ctx->bstate = BS_BRANCH; \
+ return; \
+ }
+
+#define CHECK_FPU_ENABLED \
+ if (ctx->flags & (1u << SR_FD)) { \
+ tcg_gen_movi_i32(cpu_pc, ctx->pc); \
+ if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
+ gen_helper_raise_slot_fpu_disable(cpu_env); \
+ } else { \
+ gen_helper_raise_fpu_disable(cpu_env); \
+ } \
+ ctx->bstate = BS_BRANCH; \
+ return; \
+ }
+
+static void _decode_opc(DisasContext * ctx)
+{
+ /* This code tries to make movcal emulation sufficiently
+ accurate for Linux purposes. This instruction writes
+ memory, and prior to that, always allocates a cache line.
+ It is used in two contexts:
+ - in memcpy, where data is copied in blocks, the first write
+ of to a block uses movca.l for performance.
+ - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
+ to flush the cache. Here, the data written by movcal.l is never
+ written to memory, and the data written is just bogus.
+
+ To simulate this, we simulate movcal.l, we store the value to memory,
+ but we also remember the previous content. If we see ocbi, we check
+ if movcal.l for that address was done previously. If so, the write should
+ not have hit the memory, so we restore the previous content.
+ When we see an instruction that is neither movca.l
+ nor ocbi, the previous content is discarded.
+
+ To optimize, we only try to flush stores when we're at the start of
+ TB, or if we already saw movca.l in this TB and did not flush stores
+ yet. */
+ if (ctx->has_movcal)
+ {
+ int opcode = ctx->opcode & 0xf0ff;
+ if (opcode != 0x0093 /* ocbi */
+ && opcode != 0x00c3 /* movca.l */)
+ {
+ gen_helper_discard_movcal_backup(cpu_env);
+ ctx->has_movcal = 0;
+ }
+ }
+
+#if 0
+ fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
+#endif
+
+ switch (ctx->opcode) {
+ case 0x0019: /* div0u */
+ tcg_gen_movi_i32(cpu_sr_m, 0);
+ tcg_gen_movi_i32(cpu_sr_q, 0);
+ tcg_gen_movi_i32(cpu_sr_t, 0);
+ return;
+ case 0x000b: /* rts */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
+ ctx->flags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0028: /* clrmac */
+ tcg_gen_movi_i32(cpu_mach, 0);
+ tcg_gen_movi_i32(cpu_macl, 0);
+ return;
+ case 0x0048: /* clrs */
+ tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
+ return;
+ case 0x0008: /* clrt */
+ tcg_gen_movi_i32(cpu_sr_t, 0);
+ return;
+ case 0x0038: /* ldtlb */
+ CHECK_PRIVILEGED
+ gen_helper_ldtlb(cpu_env);
+ return;
+ case 0x002b: /* rte */
+ CHECK_PRIVILEGED
+ CHECK_NOT_DELAY_SLOT
+ gen_write_sr(cpu_ssr);
+ tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
+ ctx->flags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0058: /* sets */
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
+ return;
+ case 0x0018: /* sett */
+ tcg_gen_movi_i32(cpu_sr_t, 1);
+ return;
+ case 0xfbfd: /* frchg */
+ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
+ ctx->bstate = BS_STOP;
+ return;
+ case 0xf3fd: /* fschg */
+ tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
+ ctx->bstate = BS_STOP;
+ return;
+ case 0x0009: /* nop */
+ return;
+ case 0x001b: /* sleep */
+ CHECK_PRIVILEGED
+ tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
+ gen_helper_sleep(cpu_env);
+ return;
+ }
+
+ switch (ctx->opcode & 0xf000) {
+ case 0x1000: /* mov.l Rm,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x5000: /* mov.l @(disp,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xe000: /* mov #imm,Rn */
+ tcg_gen_movi_i32(REG(B11_8), B7_0s);
+ return;
+ case 0x9000: /* mov.w @(disp,PC),Rn */
+ {
+ TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xd000: /* mov.l @(disp,PC),Rn */
+ {
+ TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x7000: /* add #imm,Rn */
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
+ return;
+ case 0xa000: /* bra disp */
+ CHECK_NOT_DELAY_SLOT
+ ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
+ tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
+ ctx->flags |= DELAY_SLOT;
+ return;
+ case 0xb000: /* bsr disp */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
+ ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
+ tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
+ ctx->flags |= DELAY_SLOT;
+ return;
+ }
+
+ switch (ctx->opcode & 0xf00f) {
+ case 0x6003: /* mov Rm,Rn */
+ tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x2000: /* mov.b Rm,@Rn */
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
+ return;
+ case 0x2001: /* mov.w Rm,@Rn */
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
+ return;
+ case 0x2002: /* mov.l Rm,@Rn */
+ tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
+ return;
+ case 0x6000: /* mov.b @Rm,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
+ return;
+ case 0x6001: /* mov.w @Rm,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ return;
+ case 0x6002: /* mov.l @Rm,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ return;
+ case 0x2004: /* mov.b Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 1);
+ /* might cause re-execution */
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
+ tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x2005: /* mov.w Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 2);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x2006: /* mov.l Rm,@-Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ }
+ return;
+ case 0x6004: /* mov.b @Rm+,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
+ return;
+ case 0x6005: /* mov.w @Rm+,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
+ return;
+ case 0x6006: /* mov.l @Rm+,Rn */
+ tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
+ if ( B11_8 != B7_4 )
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ return;
+ case 0x0004: /* mov.b Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x0005: /* mov.w Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x0006: /* mov.l Rm,@(R0,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x000c: /* mov.b @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x000d: /* mov.w @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x000e: /* mov.l @(R0,Rm),Rn */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x6008: /* swap.b Rm,Rn */
+ {
+ TCGv low = tcg_temp_new();;
+ tcg_gen_ext16u_i32(low, REG(B7_4));
+ tcg_gen_bswap16_i32(low, low);
+ tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
+ tcg_temp_free(low);
+ }
+ return;
+ case 0x6009: /* swap.w Rm,Rn */
+ tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
+ return;
+ case 0x200d: /* xtrct Rm,Rn */
+ {
+ TCGv high, low;
+ high = tcg_temp_new();
+ tcg_gen_shli_i32(high, REG(B7_4), 16);
+ low = tcg_temp_new();
+ tcg_gen_shri_i32(low, REG(B11_8), 16);
+ tcg_gen_or_i32(REG(B11_8), high, low);
+ tcg_temp_free(low);
+ tcg_temp_free(high);
+ }
+ return;
+ case 0x300c: /* add Rm,Rn */
+ tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x300e: /* addc Rm,Rn */
+ {
+ TCGv t0, t1;
+ t0 = tcg_const_tl(0);
+ t1 = tcg_temp_new();
+ tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
+ tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
+ REG(B11_8), t0, t1, cpu_sr_t);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ return;
+ case 0x300f: /* addv Rm,Rn */
+ {
+ TCGv t0, t1, t2;
+ t0 = tcg_temp_new();
+ tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
+ t1 = tcg_temp_new();
+ tcg_gen_xor_i32(t1, t0, REG(B11_8));
+ t2 = tcg_temp_new();
+ tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
+ tcg_gen_andc_i32(cpu_sr_t, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
+ tcg_temp_free(t1);
+ tcg_gen_mov_i32(REG(B7_4), t0);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x2009: /* and Rm,Rn */
+ tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x3000: /* cmp/eq Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3003: /* cmp/ge Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3007: /* cmp/gt Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3006: /* cmp/hi Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x3002: /* cmp/hs Rm,Rn */
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
+ return;
+ case 0x200c: /* cmp/str Rm,Rn */
+ {
+ TCGv cmp1 = tcg_temp_new();
+ TCGv cmp2 = tcg_temp_new();
+ tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
+ tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
+ tcg_gen_andc_i32(cmp1, cmp1, cmp2);
+ tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
+ tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
+ tcg_temp_free(cmp2);
+ tcg_temp_free(cmp1);
+ }
+ return;
+ case 0x2007: /* div0s Rm,Rn */
+ tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
+ tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
+ tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
+ return;
+ case 0x3004: /* div1 Rm,Rn */
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv zero = tcg_const_i32(0);
+
+ /* shift left arg1, saving the bit being pushed out and inserting
+ T on the right */
+ tcg_gen_shri_i32(t0, REG(B11_8), 31);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
+
+ /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
+ using 64-bit temps, we compute arg0's high part from q ^ m, so
+ that it is 0x00000000 when adding the value or 0xffffffff when
+ subtracting it. */
+ tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
+ tcg_gen_subi_i32(t1, t1, 1);
+ tcg_gen_neg_i32(t2, REG(B7_4));
+ tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
+ tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
+
+ /* compute T and Q depending on carry */
+ tcg_gen_andi_i32(t1, t1, 1);
+ tcg_gen_xor_i32(t1, t1, t0);
+ tcg_gen_xori_i32(cpu_sr_t, t1, 1);
+ tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
+
+ tcg_temp_free(zero);
+ tcg_temp_free(t2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x300d: /* dmuls.l Rm,Rn */
+ tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
+ return;
+ case 0x3005: /* dmulu.l Rm,Rn */
+ tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
+ return;
+ case 0x600e: /* exts.b Rm,Rn */
+ tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600f: /* exts.w Rm,Rn */
+ tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600c: /* extu.b Rm,Rn */
+ tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600d: /* extu.w Rm,Rn */
+ tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x000f: /* mac.l @Rm+,@Rn+ */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
+ arg1 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
+ gen_helper_macl(cpu_env, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ }
+ return;
+ case 0x400f: /* mac.w @Rm+,@Rn+ */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
+ arg1 = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
+ gen_helper_macw(cpu_env, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
+ }
+ return;
+ case 0x0007: /* mul.l Rm,Rn */
+ tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
+ return;
+ case 0x200f: /* muls.w Rm,Rn */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_ext16s_i32(arg0, REG(B7_4));
+ arg1 = tcg_temp_new();
+ tcg_gen_ext16s_i32(arg1, REG(B11_8));
+ tcg_gen_mul_i32(cpu_macl, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ }
+ return;
+ case 0x200e: /* mulu.w Rm,Rn */
+ {
+ TCGv arg0, arg1;
+ arg0 = tcg_temp_new();
+ tcg_gen_ext16u_i32(arg0, REG(B7_4));
+ arg1 = tcg_temp_new();
+ tcg_gen_ext16u_i32(arg1, REG(B11_8));
+ tcg_gen_mul_i32(cpu_macl, arg0, arg1);
+ tcg_temp_free(arg1);
+ tcg_temp_free(arg0);
+ }
+ return;
+ case 0x600b: /* neg Rm,Rn */
+ tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x600a: /* negc Rm,Rn */
+ {
+ TCGv t0 = tcg_const_i32(0);
+ tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
+ REG(B7_4), t0, cpu_sr_t, t0);
+ tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
+ t0, t0, REG(B11_8), cpu_sr_t);
+ tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x6007: /* not Rm,Rn */
+ tcg_gen_not_i32(REG(B11_8), REG(B7_4));
+ return;
+ case 0x200b: /* or Rm,Rn */
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x400c: /* shad Rm,Rn */
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
+
+ /* positive case: shift to the left */
+ tcg_gen_shl_i32(t1, REG(B11_8), t0);
+
+ /* negative case: shift to the right in two steps to
+ correctly handle the -32 case */
+ tcg_gen_xori_i32(t0, t0, 0x1f);
+ tcg_gen_sar_i32(t2, REG(B11_8), t0);
+ tcg_gen_sari_i32(t2, t2, 1);
+
+ /* select between the two cases */
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ return;
+ case 0x400d: /* shld Rm,Rn */
+ {
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+
+ tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
+
+ /* positive case: shift to the left */
+ tcg_gen_shl_i32(t1, REG(B11_8), t0);
+
+ /* negative case: shift to the right in two steps to
+ correctly handle the -32 case */
+ tcg_gen_xori_i32(t0, t0, 0x1f);
+ tcg_gen_shr_i32(t2, REG(B11_8), t0);
+ tcg_gen_shri_i32(t2, t2, 1);
+
+ /* select between the two cases */
+ tcg_gen_movi_i32(t0, 0);
+ tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ }
+ return;
+ case 0x3008: /* sub Rm,Rn */
+ tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0x300a: /* subc Rm,Rn */
+ {
+ TCGv t0, t1;
+ t0 = tcg_const_tl(0);
+ t1 = tcg_temp_new();
+ tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
+ tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
+ REG(B11_8), t0, t1, cpu_sr_t);
+ tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+ }
+ return;
+ case 0x300b: /* subv Rm,Rn */
+ {
+ TCGv t0, t1, t2;
+ t0 = tcg_temp_new();
+ tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
+ t1 = tcg_temp_new();
+ tcg_gen_xor_i32(t1, t0, REG(B7_4));
+ t2 = tcg_temp_new();
+ tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
+ tcg_gen_and_i32(t1, t1, t2);
+ tcg_temp_free(t2);
+ tcg_gen_shri_i32(cpu_sr_t, t1, 31);
+ tcg_temp_free(t1);
+ tcg_gen_mov_i32(REG(B11_8), t0);
+ tcg_temp_free(t0);
+ }
+ return;
+ case 0x2008: /* tst Rm,Rn */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0x200a: /* xor Rm,Rn */
+ tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
+ return;
+ case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_SZ) {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(fp, XREG(B7_4));
+ gen_store_fpr64(fp, XREG(B11_8));
+ tcg_temp_free_i64(fp);
+ } else {
+ tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
+ }
+ return;
+ case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_SZ) {
+ TCGv addr_hi = tcg_temp_new();
+ int fr = XREG(B7_4);
+ tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
+ ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
+ ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr_hi);
+ } else {
+ tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
+ ctx->memidx, MO_TEUL);
+ }
+ return;
+ case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_SZ) {
+ TCGv addr_hi = tcg_temp_new();
+ int fr = XREG(B11_8);
+ tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr_hi);
+ } else {
+ tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
+ ctx->memidx, MO_TEUL);
+ }
+ return;
+ case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_SZ) {
+ TCGv addr_hi = tcg_temp_new();
+ int fr = XREG(B11_8);
+ tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
+ tcg_temp_free(addr_hi);
+ } else {
+ tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
+ ctx->memidx, MO_TEUL);
+ tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
+ }
+ return;
+ case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ TCGv addr = tcg_temp_new_i32();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ if (ctx->flags & FPSCR_SZ) {
+ int fr = XREG(B7_4);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
+ tcg_gen_subi_i32(addr, addr, 4);
+ tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
+ } else {
+ tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
+ ctx->memidx, MO_TEUL);
+ }
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ return;
+ case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, REG(B7_4), REG(0));
+ if (ctx->flags & FPSCR_SZ) {
+ int fr = XREG(B11_8);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
+ ctx->memidx, MO_TEUL);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
+ ctx->memidx, MO_TEUL);
+ } else {
+ tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
+ ctx->memidx, MO_TEUL);
+ }
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(B11_8), REG(0));
+ if (ctx->flags & FPSCR_SZ) {
+ int fr = XREG(B7_4);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
+ ctx->memidx, MO_TEUL);
+ tcg_gen_addi_i32(addr, addr, 4);
+ tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
+ ctx->memidx, MO_TEUL);
+ } else {
+ tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
+ ctx->memidx, MO_TEUL);
+ }
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
+ case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
+ case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
+ {
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_PR) {
+ TCGv_i64 fp0, fp1;
+
+ if (ctx->opcode & 0x0110)
+ break; /* illegal instruction */
+ fp0 = tcg_temp_new_i64();
+ fp1 = tcg_temp_new_i64();
+ gen_load_fpr64(fp0, DREG(B11_8));
+ gen_load_fpr64(fp1, DREG(B7_4));
+ switch (ctx->opcode & 0xf00f) {
+ case 0xf000: /* fadd Rm,Rn */
+ gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf001: /* fsub Rm,Rn */
+ gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf002: /* fmul Rm,Rn */
+ gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf003: /* fdiv Rm,Rn */
+ gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
+ break;
+ case 0xf004: /* fcmp/eq Rm,Rn */
+ gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
+ return;
+ case 0xf005: /* fcmp/gt Rm,Rn */
+ gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
+ return;
+ }
+ gen_store_fpr64(fp0, DREG(B11_8));
+ tcg_temp_free_i64(fp0);
+ tcg_temp_free_i64(fp1);
+ } else {
+ switch (ctx->opcode & 0xf00f) {
+ case 0xf000: /* fadd Rm,Rn */
+ gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
+ break;
+ case 0xf001: /* fsub Rm,Rn */
+ gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
+ break;
+ case 0xf002: /* fmul Rm,Rn */
+ gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
+ break;
+ case 0xf003: /* fdiv Rm,Rn */
+ gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
+ break;
+ case 0xf004: /* fcmp/eq Rm,Rn */
+ gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
+ return;
+ case 0xf005: /* fcmp/gt Rm,Rn */
+ gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
+ cpu_fregs[FREG(B7_4)]);
+ return;
+ }
+ }
+ }
+ return;
+ case 0xf00e: /* fmac FR0,RM,Rn */
+ {
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_PR) {
+ break; /* illegal instruction */
+ } else {
+ gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
+ cpu_fregs[FREG(B11_8)]);
+ return;
+ }
+ }
+ }
+
+ switch (ctx->opcode & 0xff00) {
+ case 0xc900: /* and #imm,R0 */
+ tcg_gen_andi_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xcd00: /* and.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_andi_i32(val, val, B7_0);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8b00: /* bf label */
+ CHECK_NOT_DELAY_SLOT
+ gen_conditional_jump(ctx, ctx->pc + 2,
+ ctx->pc + 4 + B7_0s * 2);
+ ctx->bstate = BS_BRANCH;
+ return;
+ case 0x8f00: /* bf/s label */
+ CHECK_NOT_DELAY_SLOT
+ gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
+ ctx->flags |= DELAY_SLOT_CONDITIONAL;
+ return;
+ case 0x8900: /* bt label */
+ CHECK_NOT_DELAY_SLOT
+ gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
+ ctx->pc + 2);
+ ctx->bstate = BS_BRANCH;
+ return;
+ case 0x8d00: /* bt/s label */
+ CHECK_NOT_DELAY_SLOT
+ gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
+ ctx->flags |= DELAY_SLOT_CONDITIONAL;
+ return;
+ case 0x8800: /* cmp/eq #imm,R0 */
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
+ return;
+ case 0xc400: /* mov.b @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc500: /* mov.w @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc600: /* mov.l @(disp,GBR),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc000: /* mov.b R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc100: /* mov.w R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc200: /* mov.l R0,@(disp,GBR) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8000: /* mov.b R0,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8100: /* mov.w R0,@(disp,Rn) */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
+ tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8400: /* mov.b @(disp,Rn),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0x8500: /* mov.w @(disp,Rn),R0 */
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
+ tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc700: /* mova @(disp,PC),R0 */
+ tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
+ return;
+ case 0xcb00: /* or #imm,R0 */
+ tcg_gen_ori_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xcf00: /* or.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_ori_i32(val, val, B7_0);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xc300: /* trapa #imm */
+ {
+ TCGv imm;
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ imm = tcg_const_i32(B7_0);
+ gen_helper_trapa(cpu_env, imm);
+ tcg_temp_free(imm);
+ ctx->bstate = BS_BRANCH;
+ }
+ return;
+ case 0xc800: /* tst #imm,R0 */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_andi_i32(val, REG(0), B7_0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0xcc00: /* tst.b #imm,@(R0,GBR) */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_add_i32(val, REG(0), cpu_gbr);
+ tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
+ tcg_gen_andi_i32(val, val, B7_0);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0xca00: /* xor #imm,R0 */
+ tcg_gen_xori_i32(REG(0), REG(0), B7_0);
+ return;
+ case 0xce00: /* xor.b #imm,@(R0,GBR) */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_new();
+ tcg_gen_add_i32(addr, REG(0), cpu_gbr);
+ val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_xori_i32(val, val, B7_0);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ }
+
+ switch (ctx->opcode & 0xf08f) {
+ case 0x408e: /* ldc Rm,Rn_BANK */
+ CHECK_PRIVILEGED
+ tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
+ return;
+ case 0x4087: /* ldc.l @Rm+,Rn_BANK */
+ CHECK_PRIVILEGED
+ tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ return;
+ case 0x0082: /* stc Rm_BANK,Rn */
+ CHECK_PRIVILEGED
+ tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
+ return;
+ case 0x4083: /* stc.l Rm_BANK,@-Rn */
+ CHECK_PRIVILEGED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ }
+ return;
+ }
+
+ switch (ctx->opcode & 0xf0ff) {
+ case 0x0023: /* braf Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
+ ctx->flags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x0003: /* bsrf Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
+ tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
+ ctx->flags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x4015: /* cmp/pl Rn */
+ tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x4011: /* cmp/pz Rn */
+ tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x4010: /* dt Rn */
+ tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x402b: /* jmp @Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
+ ctx->flags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x400b: /* jsr @Rn */
+ CHECK_NOT_DELAY_SLOT
+ tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
+ tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
+ ctx->flags |= DELAY_SLOT;
+ ctx->delayed_pc = (uint32_t) - 1;
+ return;
+ case 0x400e: /* ldc Rm,SR */
+ CHECK_PRIVILEGED
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
+ gen_write_sr(val);
+ tcg_temp_free(val);
+ ctx->bstate = BS_STOP;
+ }
+ return;
+ case 0x4007: /* ldc.l @Rm+,SR */
+ CHECK_PRIVILEGED
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_andi_i32(val, val, 0x700083f3);
+ gen_write_sr(val);
+ tcg_temp_free(val);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ ctx->bstate = BS_STOP;
+ }
+ return;
+ case 0x0002: /* stc SR,Rn */
+ CHECK_PRIVILEGED
+ gen_read_sr(REG(B11_8));
+ return;
+ case 0x4003: /* stc SR,@-Rn */
+ CHECK_PRIVILEGED
+ {
+ TCGv addr = tcg_temp_new();
+ TCGv val = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ gen_read_sr(val);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+#define LD(reg,ldnum,ldpnum,prechk) \
+ case ldnum: \
+ prechk \
+ tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
+ return; \
+ case ldpnum: \
+ prechk \
+ tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
+ return;
+#define ST(reg,stnum,stpnum,prechk) \
+ case stnum: \
+ prechk \
+ tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
+ return; \
+ case stpnum: \
+ prechk \
+ { \
+ TCGv addr = tcg_temp_new(); \
+ tcg_gen_subi_i32(addr, REG(B11_8), 4); \
+ tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
+ tcg_gen_mov_i32(REG(B11_8), addr); \
+ tcg_temp_free(addr); \
+ } \
+ return;
+#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
+ LD(reg,ldnum,ldpnum,prechk) \
+ ST(reg,stnum,stpnum,prechk)
+ LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
+ LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
+ LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
+ LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
+ ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
+ LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
+ LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
+ LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
+ LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
+ LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
+ LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
+ case 0x406a: /* lds Rm,FPSCR */
+ CHECK_FPU_ENABLED
+ gen_helper_ld_fpscr(cpu_env, REG(B11_8));
+ ctx->bstate = BS_STOP;
+ return;
+ case 0x4066: /* lds.l @Rm+,FPSCR */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ gen_helper_ld_fpscr(cpu_env, addr);
+ tcg_temp_free(addr);
+ ctx->bstate = BS_STOP;
+ }
+ return;
+ case 0x006a: /* sts FPSCR,Rn */
+ CHECK_FPU_ENABLED
+ tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
+ return;
+ case 0x4062: /* sts FPSCR,@-Rn */
+ CHECK_FPU_ENABLED
+ {
+ TCGv addr, val;
+ val = tcg_temp_new();
+ tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
+ addr = tcg_temp_new();
+ tcg_gen_subi_i32(addr, REG(B11_8), 4);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
+ tcg_gen_mov_i32(REG(B11_8), addr);
+ tcg_temp_free(addr);
+ tcg_temp_free(val);
+ }
+ return;
+ case 0x00c3: /* movca.l R0,@Rm */
+ {
+ TCGv val = tcg_temp_new();
+ tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
+ gen_helper_movcal(cpu_env, REG(B11_8), val);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ }
+ ctx->has_movcal = 1;
+ return;
+ case 0x40a9:
+ /* MOVUA.L @Rm,R0 (Rm) -> R0
+ Load non-boundary-aligned data */
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ return;
+ case 0x40e9:
+ /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
+ Load non-boundary-aligned data */
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
+ return;
+ case 0x0029: /* movt Rn */
+ tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
+ return;
+ case 0x0073:
+ /* MOVCO.L
+ LDST -> T
+ If (T == 1) R0 -> (Rn)
+ 0 -> LDST
+ */
+ if (ctx->features & SH_FEATURE_SH4A) {
+ TCGLabel *label = gen_new_label();
+ tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
+ tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
+ gen_set_label(label);
+ tcg_gen_movi_i32(cpu_ldst, 0);
+ return;
+ } else
+ break;
+ case 0x0063:
+ /* MOVLI.L @Rm,R0
+ 1 -> LDST
+ (Rm) -> R0
+ When interrupt/exception
+ occurred 0 -> LDST
+ */
+ if (ctx->features & SH_FEATURE_SH4A) {
+ tcg_gen_movi_i32(cpu_ldst, 0);
+ tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
+ tcg_gen_movi_i32(cpu_ldst, 1);
+ return;
+ } else
+ break;
+ case 0x0093: /* ocbi @Rn */
+ {
+ gen_helper_ocbi(cpu_env, REG(B11_8));
+ }
+ return;
+ case 0x00a3: /* ocbp @Rn */
+ case 0x00b3: /* ocbwb @Rn */
+ /* These instructions are supposed to do nothing in case of
+ a cache miss. Given that we only partially emulate caches
+ it is safe to simply ignore them. */
+ return;
+ case 0x0083: /* pref @Rn */
+ return;
+ case 0x00d3: /* prefi @Rn */
+ if (ctx->features & SH_FEATURE_SH4A)
+ return;
+ else
+ break;
+ case 0x00e3: /* icbi @Rn */
+ if (ctx->features & SH_FEATURE_SH4A)
+ return;
+ else
+ break;
+ case 0x00ab: /* synco */
+ if (ctx->features & SH_FEATURE_SH4A)
+ return;
+ else
+ break;
+ case 0x4024: /* rotcl Rn */
+ {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_mov_i32(tmp, cpu_sr_t);
+ tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
+ tcg_temp_free(tmp);
+ }
+ return;
+ case 0x4025: /* rotcr Rn */
+ {
+ TCGv tmp = tcg_temp_new();
+ tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
+ tcg_temp_free(tmp);
+ }
+ return;
+ case 0x4004: /* rotl Rn */
+ tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
+ return;
+ case 0x4005: /* rotr Rn */
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
+ tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4000: /* shll Rn */
+ case 0x4020: /* shal Rn */
+ tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4021: /* shar Rn */
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
+ tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4001: /* shlr Rn */
+ tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
+ return;
+ case 0x4008: /* shll2 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
+ return;
+ case 0x4018: /* shll8 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
+ return;
+ case 0x4028: /* shll16 Rn */
+ tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
+ return;
+ case 0x4009: /* shlr2 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
+ return;
+ case 0x4019: /* shlr8 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
+ return;
+ case 0x4029: /* shlr16 Rn */
+ tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
+ return;
+ case 0x401b: /* tas.b @Rn */
+ {
+ TCGv addr, val;
+ addr = tcg_temp_local_new();
+ tcg_gen_mov_i32(addr, REG(B11_8));
+ val = tcg_temp_local_new();
+ tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
+ tcg_gen_ori_i32(val, val, 0x80);
+ tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
+ tcg_temp_free(val);
+ tcg_temp_free(addr);
+ }
+ return;
+ case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
+ return;
+ case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
+ return;
+ case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_PR) {
+ TCGv_i64 fp;
+ if (ctx->opcode & 0x0100)
+ break; /* illegal instruction */
+ fp = tcg_temp_new_i64();
+ gen_helper_float_DT(fp, cpu_env, cpu_fpul);
+ gen_store_fpr64(fp, DREG(B11_8));
+ tcg_temp_free_i64(fp);
+ }
+ else {
+ gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
+ }
+ return;
+ case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_PR) {
+ TCGv_i64 fp;
+ if (ctx->opcode & 0x0100)
+ break; /* illegal instruction */
+ fp = tcg_temp_new_i64();
+ gen_load_fpr64(fp, DREG(B11_8));
+ gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
+ tcg_temp_free_i64(fp);
+ }
+ else {
+ gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
+ }
+ return;
+ case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
+ CHECK_FPU_ENABLED
+ {
+ gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
+ }
+ return;
+ case 0xf05d: /* fabs FRn/DRn */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_PR) {
+ if (ctx->opcode & 0x0100)
+ break; /* illegal instruction */
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(fp, DREG(B11_8));
+ gen_helper_fabs_DT(fp, fp);
+ gen_store_fpr64(fp, DREG(B11_8));
+ tcg_temp_free_i64(fp);
+ } else {
+ gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
+ }
+ return;
+ case 0xf06d: /* fsqrt FRn */
+ CHECK_FPU_ENABLED
+ if (ctx->flags & FPSCR_PR) {
+ if (ctx->opcode & 0x0100)
+ break; /* illegal instruction */
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(fp, DREG(B11_8));
+ gen_helper_fsqrt_DT(fp, cpu_env, fp);
+ gen_store_fpr64(fp, DREG(B11_8));
+ tcg_temp_free_i64(fp);
+ } else {
+ gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
+ cpu_fregs[FREG(B11_8)]);
+ }
+ return;
+ case 0xf07d: /* fsrra FRn */
+ CHECK_FPU_ENABLED
+ break;
+ case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
+ CHECK_FPU_ENABLED
+ if (!(ctx->flags & FPSCR_PR)) {
+ tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
+ }
+ return;
+ case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
+ CHECK_FPU_ENABLED
+ if (!(ctx->flags & FPSCR_PR)) {
+ tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
+ }
+ return;
+ case 0xf0ad: /* fcnvsd FPUL,DRn */
+ CHECK_FPU_ENABLED
+ {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
+ gen_store_fpr64(fp, DREG(B11_8));
+ tcg_temp_free_i64(fp);
+ }
+ return;
+ case 0xf0bd: /* fcnvds DRn,FPUL */
+ CHECK_FPU_ENABLED
+ {
+ TCGv_i64 fp = tcg_temp_new_i64();
+ gen_load_fpr64(fp, DREG(B11_8));
+ gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
+ tcg_temp_free_i64(fp);
+ }
+ return;
+ case 0xf0ed: /* fipr FVm,FVn */
+ CHECK_FPU_ENABLED
+ if ((ctx->flags & FPSCR_PR) == 0) {
+ TCGv m, n;
+ m = tcg_const_i32((ctx->opcode >> 8) & 3);
+ n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ gen_helper_fipr(cpu_env, m, n);
+ tcg_temp_free(m);
+ tcg_temp_free(n);
+ return;
+ }
+ break;
+ case 0xf0fd: /* ftrv XMTRX,FVn */
+ CHECK_FPU_ENABLED
+ if ((ctx->opcode & 0x0300) == 0x0100 &&
+ (ctx->flags & FPSCR_PR) == 0) {
+ TCGv n;
+ n = tcg_const_i32((ctx->opcode >> 10) & 3);
+ gen_helper_ftrv(cpu_env, n);
+ tcg_temp_free(n);
+ return;
+ }
+ break;
+ }
+#if 0
+ fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
+ ctx->opcode, ctx->pc);
+ fflush(stderr);
+#endif
+ tcg_gen_movi_i32(cpu_pc, ctx->pc);
+ if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+ gen_helper_raise_slot_illegal_instruction(cpu_env);
+ } else {
+ gen_helper_raise_illegal_instruction(cpu_env);
+ }
+ ctx->bstate = BS_BRANCH;
+}
+
+static void decode_opc(DisasContext * ctx)
+{
+ uint32_t old_flags = ctx->flags;
+
+ _decode_opc(ctx);
+
+ if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
+ if (ctx->flags & DELAY_SLOT_CLEARME) {
+ gen_store_flags(0);
+ } else {
+ /* go out of the delay slot */
+ uint32_t new_flags = ctx->flags;
+ new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
+ gen_store_flags(new_flags);
+ }
+ ctx->flags = 0;
+ ctx->bstate = BS_BRANCH;
+ if (old_flags & DELAY_SLOT_CONDITIONAL) {
+ gen_delayed_conditional_jump(ctx);
+ } else if (old_flags & DELAY_SLOT) {
+ gen_jump(ctx);
+ }
+
+ }
+
+ /* go into a delay slot */
+ if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
+ gen_store_flags(ctx->flags);
+}
+
+void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
+{
+ SuperHCPU *cpu = sh_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx;
+ target_ulong pc_start;
+ int num_insns;
+ int max_insns;
+
+ pc_start = tb->pc;
+ ctx.pc = pc_start;
+ ctx.flags = (uint32_t)tb->flags;
+ ctx.bstate = BS_NONE;
+ ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
+ /* We don't know if the delayed pc came from a dynamic or static branch,
+ so assume it is a dynamic branch. */
+ ctx.delayed_pc = -1; /* use delayed pc from env pointer */
+ ctx.tb = tb;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
+ ctx.features = env->features;
+ ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ while (ctx.bstate == BS_NONE && !tcg_op_buf_full()) {
+ tcg_gen_insn_start(ctx.pc, ctx.flags);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
+ /* We have hit a breakpoint - make sure PC is up-to-date */
+ tcg_gen_movi_i32(cpu_pc, ctx.pc);
+ gen_helper_debug(cpu_env);
+ ctx.bstate = BS_BRANCH;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ ctx.pc += 2;
+ break;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ ctx.opcode = cpu_lduw_code(env, ctx.pc);
+ decode_opc(&ctx);
+ ctx.pc += 2;
+ if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
+ break;
+ if (cs->singlestep_enabled) {
+ break;
+ }
+ if (num_insns >= max_insns)
+ break;
+ if (singlestep)
+ break;
+ }
+ if (tb->cflags & CF_LAST_IO)
+ gen_io_end();
+ if (cs->singlestep_enabled) {
+ tcg_gen_movi_i32(cpu_pc, ctx.pc);
+ gen_helper_debug(cpu_env);
+ } else {
+ switch (ctx.bstate) {
+ case BS_STOP:
+ /* gen_op_interrupt_restart(); */
+ /* fall through */
+ case BS_NONE:
+ if (ctx.flags) {
+ gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
+ }
+ gen_goto_tb(&ctx, 0, ctx.pc);
+ break;
+ case BS_EXCP:
+ /* gen_op_interrupt_restart(); */
+ tcg_gen_exit_tb(0);
+ break;
+ case BS_BRANCH:
+ default:
+ break;
+ }
+ }
+
+ gen_tb_end(tb, num_insns);
+
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
+ log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+ env->flags = data[1];
+}
diff --git a/target/sparc/Makefile.objs b/target/sparc/Makefile.objs
new file mode 100644
index 0000000000..ec905698c5
--- /dev/null
+++ b/target/sparc/Makefile.objs
@@ -0,0 +1,7 @@
+obj-$(CONFIG_SOFTMMU) += machine.o monitor.o
+obj-y += translate.o helper.o cpu.o
+obj-y += fop_helper.o cc_helper.o win_helper.o mmu_helper.o ldst_helper.o
+obj-$(TARGET_SPARC) += int32_helper.o
+obj-$(TARGET_SPARC64) += int64_helper.o
+obj-$(TARGET_SPARC64) += vis_helper.o
+obj-y += gdbstub.o
diff --git a/target/sparc/TODO b/target/sparc/TODO
new file mode 100644
index 0000000000..b8c727e858
--- /dev/null
+++ b/target/sparc/TODO
@@ -0,0 +1,88 @@
+TODO-list:
+
+CPU common:
+- Unimplemented features/bugs:
+ - Delay slot handling may fail sometimes (branch end of page, delay
+ slot next page)
+ - Atomical instructions
+ - CPU features should match real CPUs (also ASI selection)
+- Optimizations/improvements:
+ - Condition code/branch handling like x86, also for FPU?
+ - Remove remaining explicit alignment checks
+ - Global register for regwptr, so that windowed registers can be
+ accessed directly
+ - Improve Sparc32plus addressing
+ - NPC/PC static optimisations (use JUMP_TB when possible)? (Is this
+ obsolete?)
+ - Synthetic instructions
+ - MMU model dependent on CPU model
+ - Select ASI helper at translation time (on V9 only if known)
+ - KQemu/KVM support for VM only
+ - Hardware breakpoint/watchpoint support
+ - Cache emulation mode
+ - Reverse-endian pages
+ - Faster FPU emulation
+ - Busy loop detection
+
+Sparc32 CPUs:
+- Unimplemented features/bugs:
+ - Sun4/Sun4c MMUs
+ - Some V8 ASIs
+
+Sparc64 CPUs:
+- Unimplemented features/bugs:
+ - Interrupt handling
+ - Secondary address space, other MMU functions
+ - Many V9/UA2005/UA2007 ASIs
+ - Rest of V9 instructions, missing VIS instructions
+ - IG/MG/AG vs. UA2007 globals
+ - Full hypervisor support
+ - SMP/CMT
+ - Sun4v CPUs
+
+Sun4:
+- To be added
+
+Sun4c:
+- A lot of unimplemented features
+- Maybe split from Sun4m
+
+Sun4m:
+- Unimplemented features/bugs:
+ - Hardware devices do not match real boards
+ - Floppy does not work
+ - CS4231: merge with cs4231a, add DMA
+ - Add cg6, bwtwo
+ - Arbitrary resolution support
+ - PCI for MicroSparc-IIe
+ - JavaStation machines
+ - SBus slot probing, FCode ROM support
+ - SMP probing support
+ - Interrupt routing does not match real HW
+ - SuSE 7.3 keyboard sometimes unresponsive
+ - Gentoo 2004.1 SMP does not work
+ - SS600MP ledma -> lebuffer
+ - Type 5 keyboard
+ - Less fixed hardware choices
+ - DBRI audio (Am7930)
+ - BPP parallel
+ - Diagnostic switch
+ - ESP PIO mode
+
+Sun4d:
+- A lot of unimplemented features:
+ - SBI
+ - IO-unit
+- Maybe split from Sun4m
+
+Sun4u:
+- Unimplemented features/bugs:
+ - Interrupt controller
+ - PCI/IOMMU support (Simba, JIO, Tomatillo, Psycho, Schizo, Safari...)
+ - SMP
+ - Happy Meal Ethernet, flash, I2C, GPIO
+ - A lot of real machine types
+
+Sun4v:
+- A lot of unimplemented features
+ - A lot of real machine types
diff --git a/target/sparc/asi.h b/target/sparc/asi.h
new file mode 100644
index 0000000000..c9a1849600
--- /dev/null
+++ b/target/sparc/asi.h
@@ -0,0 +1,311 @@
+#ifndef _SPARC_ASI_H
+#define _SPARC_ASI_H
+
+/* asi.h: Address Space Identifier values for the sparc.
+ *
+ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Pioneer work for sun4m: Paul Hatchman (paul@sfe.com.au)
+ * Joint edition for sun4c+sun4m: Pete A. Zaitcev <zaitcev@ipmce.su>
+ */
+
+/* The first batch are for the sun4c. */
+
+#define ASI_NULL1 0x00
+#define ASI_NULL2 0x01
+
+/* sun4c and sun4 control registers and mmu/vac ops */
+#define ASI_CONTROL 0x02
+#define ASI_SEGMAP 0x03
+#define ASI_PTE 0x04
+#define ASI_HWFLUSHSEG 0x05
+#define ASI_HWFLUSHPAGE 0x06
+#define ASI_REGMAP 0x06
+#define ASI_HWFLUSHCONTEXT 0x07
+
+#define ASI_USERTXT 0x08
+#define ASI_KERNELTXT 0x09
+#define ASI_USERDATA 0x0a
+#define ASI_KERNELDATA 0x0b
+
+/* VAC Cache flushing on sun4c and sun4 */
+#define ASI_FLUSHSEG 0x0c
+#define ASI_FLUSHPG 0x0d
+#define ASI_FLUSHCTX 0x0e
+
+/* SPARCstation-5: only 6 bits are decoded. */
+/* wo = Write Only, rw = Read Write; */
+/* ss = Single Size, as = All Sizes; */
+#define ASI_M_RES00 0x00 /* Don't touch... */
+#define ASI_M_UNA01 0x01 /* Same here... */
+#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
+#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
+#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
+#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
+#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
+#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
+#define ASI_M_USERTXT 0x08 /* Same as ASI_USERTXT; rw, as */
+#define ASI_M_KERNELTXT 0x09 /* Same as ASI_KERNELTXT; rw, as */
+#define ASI_M_USERDATA 0x0A /* Same as ASI_USERDATA; rw, as */
+#define ASI_M_KERNELDATA 0x0B /* Same as ASI_KERNELDATA; rw, as */
+#define ASI_M_TXTC_TAG 0x0C /* Instruction Cache Tag; rw, ss */
+#define ASI_M_TXTC_DATA 0x0D /* Instruction Cache Data; rw, ss */
+#define ASI_M_DATAC_TAG 0x0E /* Data Cache Tag; rw, ss */
+#define ASI_M_DATAC_DATA 0x0F /* Data Cache Data; rw, ss */
+
+/* The following cache flushing ASIs work only with the 'sta'
+ * instruction. Results are unpredictable for 'swap' and 'ldstuba',
+ * so don't do it.
+ */
+
+/* These ASI flushes affect external caches too. */
+#define ASI_M_FLUSH_PAGE 0x10 /* Flush I&D Cache Line (page); wo, ss */
+#define ASI_M_FLUSH_SEG 0x11 /* Flush I&D Cache Line (seg); wo, ss */
+#define ASI_M_FLUSH_REGION 0x12 /* Flush I&D Cache Line (region); wo, ss */
+#define ASI_M_FLUSH_CTX 0x13 /* Flush I&D Cache Line (context); wo, ss */
+#define ASI_M_FLUSH_USER 0x14 /* Flush I&D Cache Line (user); wo, ss */
+
+/* Block-copy operations are available only on certain V8 cpus. */
+#define ASI_M_BCOPY 0x17 /* Block copy */
+
+/* These affect only the ICACHE and are Ross HyperSparc and TurboSparc specific. */
+#define ASI_M_IFLUSH_PAGE 0x18 /* Flush I Cache Line (page); wo, ss */
+#define ASI_M_IFLUSH_SEG 0x19 /* Flush I Cache Line (seg); wo, ss */
+#define ASI_M_IFLUSH_REGION 0x1A /* Flush I Cache Line (region); wo, ss */
+#define ASI_M_IFLUSH_CTX 0x1B /* Flush I Cache Line (context); wo, ss */
+#define ASI_M_IFLUSH_USER 0x1C /* Flush I Cache Line (user); wo, ss */
+
+/* Block-fill operations are available on certain V8 cpus */
+#define ASI_M_BFILL 0x1F
+
+/* This allows direct access to main memory, actually 0x20 to 0x2f are
+ * the available ASI's for physical ram pass-through, but I don't have
+ * any idea what the other ones do....
+ */
+
+#define ASI_M_BYPASS 0x20 /* Reference MMU bypass; rw, as */
+#define ASI_M_FBMEM 0x29 /* Graphics card frame buffer access */
+#define ASI_M_VMEUS 0x2A /* VME user 16-bit access */
+#define ASI_M_VMEPS 0x2B /* VME priv 16-bit access */
+#define ASI_M_VMEUT 0x2C /* VME user 32-bit access */
+#define ASI_M_VMEPT 0x2D /* VME priv 32-bit access */
+#define ASI_M_SBUS 0x2E /* Direct SBus access */
+#define ASI_M_CTL 0x2F /* Control Space (ECC and MXCC are here) */
+
+
+/* This is ROSS HyperSparc only. */
+#define ASI_M_FLUSH_IWHOLE 0x31 /* Flush entire ICACHE; wo, ss */
+
+/* Tsunami/Viking/TurboSparc i/d cache flash clear. */
+#define ASI_M_IC_FLCLEAR 0x36
+#define ASI_M_DC_FLCLEAR 0x37
+
+#define ASI_M_DCDR 0x39 /* Data Cache Diagnostics Register rw, ss */
+
+#define ASI_M_VIKING_TMP1 0x40 /* Emulation temporary 1 on Viking */
+/* only available on SuperSparc I */
+/* #define ASI_M_VIKING_TMP2 0x41 */ /* Emulation temporary 2 on Viking */
+
+#define ASI_M_ACTION 0x4c /* Breakpoint Action Register (GNU/Viking) */
+
+/* LEON ASI */
+#define ASI_LEON_NOCACHE 0x01
+
+#define ASI_LEON_DCACHE_MISS 0x01
+
+#define ASI_LEON_CACHEREGS 0x02
+#define ASI_LEON_IFLUSH 0x10
+#define ASI_LEON_DFLUSH 0x11
+
+#define ASI_LEON_MMUFLUSH 0x18
+#define ASI_LEON_MMUREGS 0x19
+#define ASI_LEON_BYPASS 0x1c
+#define ASI_LEON_FLUSH_PAGE 0x10
+
+/* V9 Architecture mandary ASIs. */
+#define ASI_N 0x04 /* Nucleus */
+#define ASI_NL 0x0c /* Nucleus, little endian */
+#define ASI_AIUP 0x10 /* Primary, user */
+#define ASI_AIUS 0x11 /* Secondary, user */
+#define ASI_AIUPL 0x18 /* Primary, user, little endian */
+#define ASI_AIUSL 0x19 /* Secondary, user, little endian */
+#define ASI_P 0x80 /* Primary, implicit */
+#define ASI_S 0x81 /* Secondary, implicit */
+#define ASI_PNF 0x82 /* Primary, no fault */
+#define ASI_SNF 0x83 /* Secondary, no fault */
+#define ASI_PL 0x88 /* Primary, implicit, l-endian */
+#define ASI_SL 0x89 /* Secondary, implicit, l-endian */
+#define ASI_PNFL 0x8a /* Primary, no fault, l-endian */
+#define ASI_SNFL 0x8b /* Secondary, no fault, l-endian */
+
+/* SpitFire and later extended ASIs. The "(III)" marker designates
+ * UltraSparc-III and later specific ASIs. The "(CMT)" marker designates
+ * Chip Multi Threading specific ASIs. "(NG)" designates Niagara specific
+ * ASIs, "(4V)" designates SUN4V specific ASIs. "(NG4)" designates SPARC-T4
+ * and later ASIs.
+ */
+#define ASI_REAL 0x14 /* Real address, cachable */
+#define ASI_PHYS_USE_EC 0x14 /* PADDR, E-cachable */
+#define ASI_REAL_IO 0x15 /* Real address, non-cachable */
+#define ASI_PHYS_BYPASS_EC_E 0x15 /* PADDR, E-bit */
+#define ASI_BLK_AIUP_4V 0x16 /* (4V) Prim, user, block ld/st */
+#define ASI_BLK_AIUS_4V 0x17 /* (4V) Sec, user, block ld/st */
+#define ASI_REAL_L 0x1c /* Real address, cachable, LE */
+#define ASI_PHYS_USE_EC_L 0x1c /* PADDR, E-cachable, little endian*/
+#define ASI_REAL_IO_L 0x1d /* Real address, non-cachable, LE */
+#define ASI_PHYS_BYPASS_EC_E_L 0x1d /* PADDR, E-bit, little endian */
+#define ASI_BLK_AIUP_L_4V 0x1e /* (4V) Prim, user, block, l-endian*/
+#define ASI_BLK_AIUS_L_4V 0x1f /* (4V) Sec, user, block, l-endian */
+#define ASI_SCRATCHPAD 0x20 /* (4V) Scratch Pad Registers */
+#define ASI_MMU 0x21 /* (4V) MMU Context Registers */
+#define ASI_TWINX_AIUP 0x22 /* twin load, primary user */
+#define ASI_TWINX_AIUS 0x23 /* twin load, secondary user */
+#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 /* (NG) init-store, twin load,
+ * secondary, user
+ */
+#define ASI_NUCLEUS_QUAD_LDD 0x24 /* Cachable, qword load */
+#define ASI_QUEUE 0x25 /* (4V) Interrupt Queue Registers */
+#define ASI_TWINX_REAL 0x26 /* twin load, real, cachable */
+#define ASI_QUAD_LDD_PHYS_4V 0x26 /* (4V) Physical, qword load */
+#define ASI_TWINX_N 0x27 /* twin load, nucleus */
+#define ASI_TWINX_AIUP_L 0x2a /* twin load, primary user, LE */
+#define ASI_TWINX_AIUS_L 0x2b /* twin load, secondary user, LE */
+#define ASI_NUCLEUS_QUAD_LDD_L 0x2c /* Cachable, qword load, l-endian */
+#define ASI_TWINX_REAL_L 0x2e /* twin load, real, cachable, LE */
+#define ASI_QUAD_LDD_PHYS_L_4V 0x2e /* (4V) Phys, qword load, l-endian */
+#define ASI_TWINX_NL 0x2f /* twin load, nucleus, LE */
+#define ASI_PCACHE_DATA_STATUS 0x30 /* (III) PCache data stat RAM diag */
+#define ASI_PCACHE_DATA 0x31 /* (III) PCache data RAM diag */
+#define ASI_PCACHE_TAG 0x32 /* (III) PCache tag RAM diag */
+#define ASI_PCACHE_SNOOP_TAG 0x33 /* (III) PCache snoop tag RAM diag */
+#define ASI_QUAD_LDD_PHYS 0x34 /* (III+) PADDR, qword load */
+#define ASI_WCACHE_VALID_BITS 0x38 /* (III) WCache Valid Bits diag */
+#define ASI_WCACHE_DATA 0x39 /* (III) WCache data RAM diag */
+#define ASI_WCACHE_TAG 0x3a /* (III) WCache tag RAM diag */
+#define ASI_WCACHE_SNOOP_TAG 0x3b /* (III) WCache snoop tag RAM diag */
+#define ASI_QUAD_LDD_PHYS_L 0x3c /* (III+) PADDR, qw-load, l-endian */
+#define ASI_SRAM_FAST_INIT 0x40 /* (III+) Fast SRAM init */
+#define ASI_CORE_AVAILABLE 0x41 /* (CMT) LP Available */
+#define ASI_CORE_ENABLE_STAT 0x41 /* (CMT) LP Enable Status */
+#define ASI_CORE_ENABLE 0x41 /* (CMT) LP Enable RW */
+#define ASI_XIR_STEERING 0x41 /* (CMT) XIR Steering RW */
+#define ASI_CORE_RUNNING_RW 0x41 /* (CMT) LP Running RW */
+#define ASI_CORE_RUNNING_W1S 0x41 /* (CMT) LP Running Write-One Set */
+#define ASI_CORE_RUNNING_W1C 0x41 /* (CMT) LP Running Write-One Clr */
+#define ASI_CORE_RUNNING_STAT 0x41 /* (CMT) LP Running Status */
+#define ASI_CMT_ERROR_STEERING 0x41 /* (CMT) Error Steering RW */
+#define ASI_DCACHE_INVALIDATE 0x42 /* (III) DCache Invalidate diag */
+#define ASI_DCACHE_UTAG 0x43 /* (III) DCache uTag diag */
+#define ASI_DCACHE_SNOOP_TAG 0x44 /* (III) DCache snoop tag RAM diag */
+#define ASI_LSU_CONTROL 0x45 /* Load-store control unit */
+#define ASI_DCU_CONTROL_REG 0x45 /* (III) DCache Unit Control reg */
+#define ASI_DCACHE_DATA 0x46 /* DCache data-ram diag access */
+#define ASI_DCACHE_TAG 0x47 /* Dcache tag/valid ram diag access*/
+#define ASI_INTR_DISPATCH_STAT 0x48 /* IRQ vector dispatch status */
+#define ASI_INTR_RECEIVE 0x49 /* IRQ vector receive status */
+#define ASI_UPA_CONFIG 0x4a /* UPA config space */
+#define ASI_JBUS_CONFIG 0x4a /* (IIIi) JBUS Config Register */
+#define ASI_SAFARI_CONFIG 0x4a /* (III) Safari Config Register */
+#define ASI_SAFARI_ADDRESS 0x4a /* (III) Safari Address Register */
+#define ASI_ESTATE_ERROR_EN 0x4b /* E-cache error enable space */
+#define ASI_AFSR 0x4c /* Async fault status register */
+#define ASI_AFAR 0x4d /* Async fault address register */
+#define ASI_EC_TAG_DATA 0x4e /* E-cache tag/valid ram diag acc */
+#define ASI_IMMU 0x50 /* Insn-MMU main register space */
+#define ASI_IMMU_TSB_8KB_PTR 0x51 /* Insn-MMU 8KB TSB pointer reg */
+#define ASI_IMMU_TSB_64KB_PTR 0x52 /* Insn-MMU 64KB TSB pointer reg */
+#define ASI_ITLB_DATA_IN 0x54 /* Insn-MMU TLB data in reg */
+#define ASI_ITLB_DATA_ACCESS 0x55 /* Insn-MMU TLB data access reg */
+#define ASI_ITLB_TAG_READ 0x56 /* Insn-MMU TLB tag read reg */
+#define ASI_IMMU_DEMAP 0x57 /* Insn-MMU TLB demap */
+#define ASI_DMMU 0x58 /* Data-MMU main register space */
+#define ASI_DMMU_TSB_8KB_PTR 0x59 /* Data-MMU 8KB TSB pointer reg */
+#define ASI_DMMU_TSB_64KB_PTR 0x5a /* Data-MMU 16KB TSB pointer reg */
+#define ASI_DMMU_TSB_DIRECT_PTR 0x5b /* Data-MMU TSB direct pointer reg */
+#define ASI_DTLB_DATA_IN 0x5c /* Data-MMU TLB data in reg */
+#define ASI_DTLB_DATA_ACCESS 0x5d /* Data-MMU TLB data access reg */
+#define ASI_DTLB_TAG_READ 0x5e /* Data-MMU TLB tag read reg */
+#define ASI_DMMU_DEMAP 0x5f /* Data-MMU TLB demap */
+#define ASI_IIU_INST_TRAP 0x60 /* (III) Instruction Breakpoint */
+#define ASI_INTR_ID 0x63 /* (CMT) Interrupt ID register */
+#define ASI_CORE_ID 0x63 /* (CMT) LP ID register */
+#define ASI_CESR_ID 0x63 /* (CMT) CESR ID register */
+#define ASI_IC_INSTR 0x66 /* Insn cache instrucion ram diag */
+#define ASI_IC_TAG 0x67 /* Insn cache tag/valid ram diag */
+#define ASI_IC_STAG 0x68 /* (III) Insn cache snoop tag ram */
+#define ASI_IC_PRE_DECODE 0x6e /* Insn cache pre-decode ram diag */
+#define ASI_IC_NEXT_FIELD 0x6f /* Insn cache next-field ram diag */
+#define ASI_BRPRED_ARRAY 0x6f /* (III) Branch Prediction RAM diag*/
+#define ASI_BLK_AIUP 0x70 /* Primary, user, block load/store */
+#define ASI_BLK_AIUS 0x71 /* Secondary, user, block ld/st */
+#define ASI_MCU_CTRL_REG 0x72 /* (III) Memory controller regs */
+#define ASI_EC_DATA 0x74 /* (III) E-cache data staging reg */
+#define ASI_EC_CTRL 0x75 /* (III) E-cache control reg */
+#define ASI_EC_W 0x76 /* E-cache diag write access */
+#define ASI_UDB_ERROR_W 0x77 /* External UDB error regs W */
+#define ASI_UDB_CONTROL_W 0x77 /* External UDB control regs W */
+#define ASI_INTR_W 0x77 /* IRQ vector dispatch write */
+#define ASI_INTR_DATAN_W 0x77 /* (III) Out irq vector data reg N */
+#define ASI_INTR_DISPATCH_W 0x77 /* (III) Interrupt vector dispatch */
+#define ASI_BLK_AIUPL 0x78 /* Primary, user, little, blk ld/st*/
+#define ASI_BLK_AIUSL 0x79 /* Secondary, user, little, blk ld/st*/
+#define ASI_EC_R 0x7e /* E-cache diag read access */
+#define ASI_UDBH_ERROR_R 0x7f /* External UDB error regs rd hi */
+#define ASI_UDBL_ERROR_R 0x7f /* External UDB error regs rd low */
+#define ASI_UDBH_CONTROL_R 0x7f /* External UDB control regs rd hi */
+#define ASI_UDBL_CONTROL_R 0x7f /* External UDB control regs rd low*/
+#define ASI_INTR_R 0x7f /* IRQ vector dispatch read */
+#define ASI_INTR_DATAN_R 0x7f /* (III) In irq vector data reg N */
+#define ASI_PIC 0xb0 /* (NG4) PIC registers */
+#define ASI_PST8_P 0xc0 /* Primary, 8 8-bit, partial */
+#define ASI_PST8_S 0xc1 /* Secondary, 8 8-bit, partial */
+#define ASI_PST16_P 0xc2 /* Primary, 4 16-bit, partial */
+#define ASI_PST16_S 0xc3 /* Secondary, 4 16-bit, partial */
+#define ASI_PST32_P 0xc4 /* Primary, 2 32-bit, partial */
+#define ASI_PST32_S 0xc5 /* Secondary, 2 32-bit, partial */
+#define ASI_PST8_PL 0xc8 /* Primary, 8 8-bit, partial, L */
+#define ASI_PST8_SL 0xc9 /* Secondary, 8 8-bit, partial, L */
+#define ASI_PST16_PL 0xca /* Primary, 4 16-bit, partial, L */
+#define ASI_PST16_SL 0xcb /* Secondary, 4 16-bit, partial, L */
+#define ASI_PST32_PL 0xcc /* Primary, 2 32-bit, partial, L */
+#define ASI_PST32_SL 0xcd /* Secondary, 2 32-bit, partial, L */
+#define ASI_FL8_P 0xd0 /* Primary, 1 8-bit, fpu ld/st */
+#define ASI_FL8_S 0xd1 /* Secondary, 1 8-bit, fpu ld/st */
+#define ASI_FL16_P 0xd2 /* Primary, 1 16-bit, fpu ld/st */
+#define ASI_FL16_S 0xd3 /* Secondary, 1 16-bit, fpu ld/st */
+#define ASI_FL8_PL 0xd8 /* Primary, 1 8-bit, fpu ld/st, L */
+#define ASI_FL8_SL 0xd9 /* Secondary, 1 8-bit, fpu ld/st, L*/
+#define ASI_FL16_PL 0xda /* Primary, 1 16-bit, fpu ld/st, L */
+#define ASI_FL16_SL 0xdb /* Secondary, 1 16-bit, fpu ld/st,L*/
+#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
+#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
+#define ASI_TWINX_P 0xe2 /* twin load, primary implicit */
+#define ASI_BLK_INIT_QUAD_LDD_P 0xe2 /* (NG) init-store, twin load,
+ * primary, implicit */
+#define ASI_TWINX_S 0xe3 /* twin load, secondary implicit */
+#define ASI_BLK_INIT_QUAD_LDD_S 0xe3 /* (NG) init-store, twin load,
+ * secondary, implicit */
+#define ASI_TWINX_PL 0xea /* twin load, primary implicit, LE */
+#define ASI_TWINX_SL 0xeb /* twin load, secondary implicit, LE */
+#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
+#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
+#define ASI_ST_BLKINIT_MRU_P 0xf2 /* (NG4) init-store, twin load,
+ * Most-Recently-Used, primary,
+ * implicit
+ */
+#define ASI_ST_BLKINIT_MRU_S 0xf2 /* (NG4) init-store, twin load,
+ * Most-Recently-Used, secondary,
+ * implicit
+ */
+#define ASI_BLK_PL 0xf8 /* Primary, blk ld/st, little */
+#define ASI_BLK_SL 0xf9 /* Secondary, blk ld/st, little */
+#define ASI_ST_BLKINIT_MRU_PL 0xfa /* (NG4) init-store, twin load,
+ * Most-Recently-Used, primary,
+ * implicit, little-endian
+ */
+#define ASI_ST_BLKINIT_MRU_SL 0xfb /* (NG4) init-store, twin load,
+ * Most-Recently-Used, secondary,
+ * implicit, little-endian
+ */
+
+#endif /* _SPARC_ASI_H */
diff --git a/target/sparc/cc_helper.c b/target/sparc/cc_helper.c
new file mode 100644
index 0000000000..a410a0b98f
--- /dev/null
+++ b/target/sparc/cc_helper.c
@@ -0,0 +1,471 @@
+/*
+ * Helpers for lazy condition code handling
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+static uint32_t compute_all_flags(CPUSPARCState *env)
+{
+ return env->psr & PSR_ICC;
+}
+
+static uint32_t compute_C_flags(CPUSPARCState *env)
+{
+ return env->psr & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_icc(int32_t dst)
+{
+ uint32_t ret = 0;
+
+ if (dst == 0) {
+ ret = PSR_ZERO;
+ } else if (dst < 0) {
+ ret = PSR_NEG;
+ }
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_flags_xcc(CPUSPARCState *env)
+{
+ return env->xcc & PSR_ICC;
+}
+
+static uint32_t compute_C_flags_xcc(CPUSPARCState *env)
+{
+ return env->xcc & PSR_CARRY;
+}
+
+static inline uint32_t get_NZ_xcc(target_long dst)
+{
+ uint32_t ret = 0;
+
+ if (!dst) {
+ ret = PSR_ZERO;
+ } else if (dst < 0) {
+ ret = PSR_NEG;
+ }
+ return ret;
+}
+#endif
+
+static inline uint32_t get_V_div_icc(target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (src2 != 0) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_div(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_V_div_icc(CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_div(CPUSPARCState *env)
+{
+ return 0;
+}
+
+static inline uint32_t get_C_add_icc(uint32_t dst, uint32_t src1)
+{
+ uint32_t ret = 0;
+
+ if (dst < src1) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_addx_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 & src2) | (~dst & (src1 | src2))) & (1U << 31)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_add_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1U << 31)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_add_xcc(target_ulong dst, target_ulong src1)
+{
+ uint32_t ret = 0;
+
+ if (dst < src1) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_addx_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 & src2) | (~dst & (src1 | src2))) & (1ULL << 63)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_add_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2 ^ -1) & (src1 ^ dst)) & (1ULL << 63)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_add_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_add_xcc(CC_DST, CC_SRC);
+ ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_add_xcc(CPUSPARCState *env)
+{
+ return get_C_add_xcc(CC_DST, CC_SRC);
+}
+#endif
+
+static uint32_t compute_all_add(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_add(CPUSPARCState *env)
+{
+ return get_C_add_icc(CC_DST, CC_SRC);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_addx_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_add_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_addx_xcc(CPUSPARCState *env)
+{
+ return get_C_addx_xcc(CC_DST, CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_addx(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_addx(CPUSPARCState *env)
+{
+ return get_C_addx_icc(CC_DST, CC_SRC, CC_SRC2);
+}
+
+static inline uint32_t get_V_tag_icc(target_ulong src1, target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if ((src1 | src2) & 0x3) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_tadd(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ ret |= get_V_add_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_taddtv(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_add_icc(CC_DST, CC_SRC);
+ return ret;
+}
+
+static inline uint32_t get_C_sub_icc(uint32_t src1, uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (src1 < src2) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_subx_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((~src1 & src2) | (dst & (~src1 | src2))) & (1U << 31)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_sub_icc(uint32_t dst, uint32_t src1,
+ uint32_t src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2) & (src1 ^ dst)) & (1U << 31)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+
+#ifdef TARGET_SPARC64
+static inline uint32_t get_C_sub_xcc(target_ulong src1, target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (src1 < src2) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_C_subx_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((~src1 & src2) | (dst & (~src1 | src2))) & (1ULL << 63)) {
+ ret = PSR_CARRY;
+ }
+ return ret;
+}
+
+static inline uint32_t get_V_sub_xcc(target_ulong dst, target_ulong src1,
+ target_ulong src2)
+{
+ uint32_t ret = 0;
+
+ if (((src1 ^ src2) & (src1 ^ dst)) & (1ULL << 63)) {
+ ret = PSR_OVF;
+ }
+ return ret;
+}
+
+static uint32_t compute_all_sub_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_sub_xcc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_sub_xcc(CPUSPARCState *env)
+{
+ return get_C_sub_xcc(CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_sub(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_sub(CPUSPARCState *env)
+{
+ return get_C_sub_icc(CC_SRC, CC_SRC2);
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_subx_xcc(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_xcc(CC_DST);
+ ret |= get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_sub_xcc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_subx_xcc(CPUSPARCState *env)
+{
+ return get_C_subx_xcc(CC_DST, CC_SRC, CC_SRC2);
+}
+#endif
+
+static uint32_t compute_all_subx(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_C_subx(CPUSPARCState *env)
+{
+ return get_C_subx_icc(CC_DST, CC_SRC, CC_SRC2);
+}
+
+static uint32_t compute_all_tsub(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ ret |= get_V_sub_icc(CC_DST, CC_SRC, CC_SRC2);
+ ret |= get_V_tag_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_tsubtv(CPUSPARCState *env)
+{
+ uint32_t ret;
+
+ ret = get_NZ_icc(CC_DST);
+ ret |= get_C_sub_icc(CC_SRC, CC_SRC2);
+ return ret;
+}
+
+static uint32_t compute_all_logic(CPUSPARCState *env)
+{
+ return get_NZ_icc(CC_DST);
+}
+
+static uint32_t compute_C_logic(CPUSPARCState *env)
+{
+ return 0;
+}
+
+#ifdef TARGET_SPARC64
+static uint32_t compute_all_logic_xcc(CPUSPARCState *env)
+{
+ return get_NZ_xcc(CC_DST);
+}
+#endif
+
+typedef struct CCTable {
+ uint32_t (*compute_all)(CPUSPARCState *env); /* return all the flags */
+ uint32_t (*compute_c)(CPUSPARCState *env); /* return the C flag */
+} CCTable;
+
+static const CCTable icc_table[CC_OP_NB] = {
+ /* CC_OP_DYNAMIC should never happen */
+ [CC_OP_FLAGS] = { compute_all_flags, compute_C_flags },
+ [CC_OP_DIV] = { compute_all_div, compute_C_div },
+ [CC_OP_ADD] = { compute_all_add, compute_C_add },
+ [CC_OP_ADDX] = { compute_all_addx, compute_C_addx },
+ [CC_OP_TADD] = { compute_all_tadd, compute_C_add },
+ [CC_OP_TADDTV] = { compute_all_taddtv, compute_C_add },
+ [CC_OP_SUB] = { compute_all_sub, compute_C_sub },
+ [CC_OP_SUBX] = { compute_all_subx, compute_C_subx },
+ [CC_OP_TSUB] = { compute_all_tsub, compute_C_sub },
+ [CC_OP_TSUBTV] = { compute_all_tsubtv, compute_C_sub },
+ [CC_OP_LOGIC] = { compute_all_logic, compute_C_logic },
+};
+
+#ifdef TARGET_SPARC64
+static const CCTable xcc_table[CC_OP_NB] = {
+ /* CC_OP_DYNAMIC should never happen */
+ [CC_OP_FLAGS] = { compute_all_flags_xcc, compute_C_flags_xcc },
+ [CC_OP_DIV] = { compute_all_logic_xcc, compute_C_logic },
+ [CC_OP_ADD] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_ADDX] = { compute_all_addx_xcc, compute_C_addx_xcc },
+ [CC_OP_TADD] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_TADDTV] = { compute_all_add_xcc, compute_C_add_xcc },
+ [CC_OP_SUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_SUBX] = { compute_all_subx_xcc, compute_C_subx_xcc },
+ [CC_OP_TSUB] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_TSUBTV] = { compute_all_sub_xcc, compute_C_sub_xcc },
+ [CC_OP_LOGIC] = { compute_all_logic_xcc, compute_C_logic },
+};
+#endif
+
+void helper_compute_psr(CPUSPARCState *env)
+{
+ uint32_t new_psr;
+
+ new_psr = icc_table[CC_OP].compute_all(env);
+ env->psr = new_psr;
+#ifdef TARGET_SPARC64
+ new_psr = xcc_table[CC_OP].compute_all(env);
+ env->xcc = new_psr;
+#endif
+ CC_OP = CC_OP_FLAGS;
+}
+
+uint32_t helper_compute_C_icc(CPUSPARCState *env)
+{
+ return icc_table[CC_OP].compute_c(env) >> PSR_CARRY_SHIFT;
+}
diff --git a/target/sparc/cpu-qom.h b/target/sparc/cpu-qom.h
new file mode 100644
index 0000000000..f63af728ee
--- /dev/null
+++ b/target/sparc/cpu-qom.h
@@ -0,0 +1,56 @@
+/*
+ * QEMU SPARC CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+#ifndef QEMU_SPARC_CPU_QOM_H
+#define QEMU_SPARC_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#ifdef TARGET_SPARC64
+#define TYPE_SPARC_CPU "sparc64-cpu"
+#else
+#define TYPE_SPARC_CPU "sparc-cpu"
+#endif
+
+#define SPARC_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(SPARCCPUClass, (klass), TYPE_SPARC_CPU)
+#define SPARC_CPU(obj) \
+ OBJECT_CHECK(SPARCCPU, (obj), TYPE_SPARC_CPU)
+#define SPARC_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(SPARCCPUClass, (obj), TYPE_SPARC_CPU)
+
+/**
+ * SPARCCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A SPARC CPU model.
+ */
+typedef struct SPARCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} SPARCCPUClass;
+
+typedef struct SPARCCPU SPARCCPU;
+
+#endif
diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c
new file mode 100644
index 0000000000..4e07b92fbd
--- /dev/null
+++ b/target/sparc/cpu.c
@@ -0,0 +1,895 @@
+/*
+ * Sparc CPU init helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu/error-report.h"
+#include "exec/exec-all.h"
+
+//#define DEBUG_FEATURES
+
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *cpu_model);
+
+/* CPUClass::reset() */
+static void sparc_cpu_reset(CPUState *s)
+{
+ SPARCCPU *cpu = SPARC_CPU(s);
+ SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(cpu);
+ CPUSPARCState *env = &cpu->env;
+
+ scc->parent_reset(s);
+
+ memset(env, 0, offsetof(CPUSPARCState, version));
+ tlb_flush(s, 1);
+ env->cwp = 0;
+#ifndef TARGET_SPARC64
+ env->wim = 1;
+#endif
+ env->regwptr = env->regbase + (env->cwp * 16);
+ CC_OP = CC_OP_FLAGS;
+#if defined(CONFIG_USER_ONLY)
+#ifdef TARGET_SPARC64
+ env->cleanwin = env->nwindows - 2;
+ env->cansave = env->nwindows - 2;
+ env->pstate = PS_RMO | PS_PEF | PS_IE;
+ env->asi = 0x82; /* Primary no-fault */
+#endif
+#else
+#if !defined(TARGET_SPARC64)
+ env->psret = 0;
+ env->psrs = 1;
+ env->psrps = 1;
+#endif
+#ifdef TARGET_SPARC64
+ env->pstate = PS_PRIV|PS_RED|PS_PEF|PS_AG;
+ env->hpstate = cpu_has_hypervisor(env) ? HS_PRIV : 0;
+ env->tl = env->maxtl;
+ cpu_tsptr(env)->tt = TT_POWER_ON_RESET;
+ env->lsu = 0;
+#else
+ env->mmuregs[0] &= ~(MMU_E | MMU_NF);
+ env->mmuregs[0] |= env->def->mmu_bm;
+#endif
+ env->pc = 0;
+ env->npc = env->pc + 4;
+#endif
+ env->cache_control = 0;
+}
+
+static bool sparc_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ if (cpu_interrupts_enabled(env) && env->interrupt_index > 0) {
+ int pil = env->interrupt_index & 0xf;
+ int type = env->interrupt_index & 0xf0;
+
+ if (type != TT_EXTINT || cpu_pil_allowed(env, pil)) {
+ cs->exception_index = env->interrupt_index;
+ sparc_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static void cpu_sparc_disas_set_info(CPUState *cpu, disassemble_info *info)
+{
+ info->print_insn = print_insn_sparc;
+#ifdef TARGET_SPARC64
+ info->mach = bfd_mach_sparc_v9b;
+#endif
+}
+
+static void sparc_cpu_parse_features(CPUState *cs, char *features,
+ Error **errp);
+
+static int cpu_sparc_register(SPARCCPU *cpu, const char *cpu_model)
+{
+ CPUSPARCState *env = &cpu->env;
+ char *s = g_strdup(cpu_model);
+ char *featurestr, *name = strtok(s, ",");
+ sparc_def_t def1, *def = &def1;
+ Error *err = NULL;
+
+ if (cpu_sparc_find_by_name(def, name) < 0) {
+ g_free(s);
+ return -1;
+ }
+
+ env->def = g_memdup(def, sizeof(*def));
+
+ featurestr = strtok(NULL, ",");
+ sparc_cpu_parse_features(CPU(cpu), featurestr, &err);
+ g_free(s);
+ if (err) {
+ error_report_err(err);
+ return -1;
+ }
+
+ env->version = def->iu_version;
+ env->fsr = def->fpu_version;
+ env->nwindows = def->nwindows;
+#if !defined(TARGET_SPARC64)
+ env->mmuregs[0] |= def->mmu_version;
+ cpu_sparc_set_id(env, 0);
+ env->mxccregs[7] |= def->mxcc_version;
+#else
+ env->mmu_version = def->mmu_version;
+ env->maxtl = def->maxtl;
+ env->version |= def->maxtl << 8;
+ env->version |= def->nwindows - 1;
+#endif
+ return 0;
+}
+
+SPARCCPU *cpu_sparc_init(const char *cpu_model)
+{
+ SPARCCPU *cpu;
+
+ cpu = SPARC_CPU(object_new(TYPE_SPARC_CPU));
+
+ if (cpu_sparc_register(cpu, cpu_model) < 0) {
+ object_unref(OBJECT(cpu));
+ return NULL;
+ }
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu)
+{
+#if !defined(TARGET_SPARC64)
+ env->mxccregs[7] = ((cpu + 8) & 0xf) << 24;
+#endif
+}
+
+static const sparc_def_t sparc_defs[] = {
+#ifdef TARGET_SPARC64
+ {
+ .name = "Fujitsu Sparc64",
+ .iu_version = ((0x04ULL << 48) | (0x02ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 4,
+ .maxtl = 4,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 III",
+ .iu_version = ((0x04ULL << 48) | (0x03ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 5,
+ .maxtl = 4,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 IV",
+ .iu_version = ((0x04ULL << 48) | (0x04ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu Sparc64 V",
+ .iu_version = ((0x04ULL << 48) | (0x05ULL << 32) | (0x51ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc I",
+ .iu_version = ((0x17ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc II",
+ .iu_version = ((0x17ULL << 48) | (0x11ULL << 32) | (0x20ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc IIi",
+ .iu_version = ((0x17ULL << 48) | (0x12ULL << 32) | (0x91ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI UltraSparc IIe",
+ .iu_version = ((0x17ULL << 48) | (0x13ULL << 32) | (0x14ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc III",
+ .iu_version = ((0x3eULL << 48) | (0x14ULL << 32) | (0x34ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc III Cu",
+ .iu_version = ((0x3eULL << 48) | (0x15ULL << 32) | (0x41ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_3,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IIIi",
+ .iu_version = ((0x3eULL << 48) | (0x16ULL << 32) | (0x34ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IV",
+ .iu_version = ((0x3eULL << 48) | (0x18ULL << 32) | (0x31ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_4,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc IV+",
+ .iu_version = ((0x3eULL << 48) | (0x19ULL << 32) | (0x22ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_CMT,
+ },
+ {
+ .name = "Sun UltraSparc IIIi+",
+ .iu_version = ((0x3eULL << 48) | (0x22ULL << 32) | (0ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_3,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Sun UltraSparc T1",
+ /* defined in sparc_ifu_fdp.v and ctu.h */
+ .iu_version = ((0x3eULL << 48) | (0x23ULL << 32) | (0x02ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_sun4v,
+ .nwindows = 8,
+ .maxtl = 6,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+ | CPU_FEATURE_GL,
+ },
+ {
+ .name = "Sun UltraSparc T2",
+ /* defined in tlu_asi_ctl.v and n2_revid_cust.v */
+ .iu_version = ((0x3eULL << 48) | (0x24ULL << 32) | (0x02ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_sun4v,
+ .nwindows = 8,
+ .maxtl = 6,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_HYPV | CPU_FEATURE_CMT
+ | CPU_FEATURE_GL,
+ },
+ {
+ .name = "NEC UltraSparc I",
+ .iu_version = ((0x22ULL << 48) | (0x10ULL << 32) | (0x40ULL << 24)),
+ .fpu_version = 0x00000000,
+ .mmu_version = mmu_us_12,
+ .nwindows = 8,
+ .maxtl = 5,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+#else
+ {
+ .name = "Fujitsu MB86904",
+ .iu_version = 0x04 << 24, /* Impl 0, ver 4 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x04 << 24, /* Impl 0, ver 4 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "Fujitsu MB86907",
+ .iu_version = 0x05 << 24, /* Impl 0, ver 5 */
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0x05 << 24, /* Impl 0, ver 5 */
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI MicroSparc I",
+ .iu_version = 0x41000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x41000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x0000003f,
+ .nwindows = 7,
+ .features = CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | CPU_FEATURE_MUL |
+ CPU_FEATURE_DIV | CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT |
+ CPU_FEATURE_FMUL,
+ },
+ {
+ .name = "TI MicroSparc II",
+ .iu_version = 0x42000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x02000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016fff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI MicroSparc IIep",
+ .iu_version = 0x42000000,
+ .fpu_version = 4 << 17,
+ .mmu_version = 0x04000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x00ffffc0,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0x00016bff,
+ .mmu_trcr_mask = 0x00ffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 40", /* STP1020NPGA */
+ .iu_version = 0x41000000, /* SuperSPARC 2.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x00000800, /* SuperSPARC 2.x, no MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 50", /* STP1020PGA */
+ .iu_version = 0x40000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 51",
+ .iu_version = 0x40000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 60", /* STP1020APGA */
+ .iu_version = 0x40000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000800, /* SuperSPARC 3.x, no MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc 61",
+ .iu_version = 0x44000000, /* SuperSPARC 3.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x01000000, /* SuperSPARC 3.x, MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "TI SuperSparc II",
+ .iu_version = 0x40000000, /* SuperSPARC II 1.x */
+ .fpu_version = 0 << 17,
+ .mmu_version = 0x08000000, /* SuperSPARC II 1.x, MXCC */
+ .mmu_bm = 0x00002000,
+ .mmu_ctpr_mask = 0xffffffc0,
+ .mmu_cxr_mask = 0x0000ffff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .mxcc_version = 0x00000104,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES,
+ },
+ {
+ .name = "LEON2",
+ .iu_version = 0xf2000000,
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0xf2000000,
+ .mmu_bm = 0x00004000,
+ .mmu_ctpr_mask = 0x007ffff0,
+ .mmu_cxr_mask = 0x0000003f,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN,
+ },
+ {
+ .name = "LEON3",
+ .iu_version = 0xf3000000,
+ .fpu_version = 4 << 17, /* FPU version 4 (Meiko) */
+ .mmu_version = 0xf3000000,
+ .mmu_bm = 0x00000000,
+ .mmu_ctpr_mask = 0xfffffffc,
+ .mmu_cxr_mask = 0x000000ff,
+ .mmu_sfsr_mask = 0xffffffff,
+ .mmu_trcr_mask = 0xffffffff,
+ .nwindows = 8,
+ .features = CPU_DEFAULT_FEATURES | CPU_FEATURE_TA0_SHUTDOWN |
+ CPU_FEATURE_ASR17 | CPU_FEATURE_CACHE_CTRL | CPU_FEATURE_POWERDOWN |
+ CPU_FEATURE_CASA,
+ },
+#endif
+};
+
+static const char * const feature_name[] = {
+ "float",
+ "float128",
+ "swap",
+ "mul",
+ "div",
+ "flush",
+ "fsqrt",
+ "fmul",
+ "vis1",
+ "vis2",
+ "fsmuld",
+ "hypv",
+ "cmt",
+ "gl",
+};
+
+static void print_features(FILE *f, fprintf_function cpu_fprintf,
+ uint32_t features, const char *prefix)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(feature_name); i++) {
+ if (feature_name[i] && (features & (1 << i))) {
+ if (prefix) {
+ (*cpu_fprintf)(f, "%s", prefix);
+ }
+ (*cpu_fprintf)(f, "%s ", feature_name[i]);
+ }
+ }
+}
+
+static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(feature_name); i++) {
+ if (feature_name[i] && !strcmp(flagname, feature_name[i])) {
+ *features |= 1 << i;
+ return;
+ }
+ }
+ error_report("CPU feature %s not found", flagname);
+}
+
+static int cpu_sparc_find_by_name(sparc_def_t *cpu_def, const char *name)
+{
+ unsigned int i;
+ const sparc_def_t *def = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+ if (strcasecmp(name, sparc_defs[i].name) == 0) {
+ def = &sparc_defs[i];
+ }
+ }
+ if (!def) {
+ return -1;
+ }
+ memcpy(cpu_def, def, sizeof(*def));
+ return 0;
+}
+
+static void sparc_cpu_parse_features(CPUState *cs, char *features,
+ Error **errp)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ sparc_def_t *cpu_def = cpu->env.def;
+ char *featurestr;
+ uint32_t plus_features = 0;
+ uint32_t minus_features = 0;
+ uint64_t iu_version;
+ uint32_t fpu_version, mmu_version, nwindows;
+
+ featurestr = features ? strtok(features, ",") : NULL;
+ while (featurestr) {
+ char *val;
+
+ if (featurestr[0] == '+') {
+ add_flagname_to_bitmaps(featurestr + 1, &plus_features);
+ } else if (featurestr[0] == '-') {
+ add_flagname_to_bitmaps(featurestr + 1, &minus_features);
+ } else if ((val = strchr(featurestr, '='))) {
+ *val = 0; val++;
+ if (!strcmp(featurestr, "iu_version")) {
+ char *err;
+
+ iu_version = strtoll(val, &err, 0);
+ if (!*val || *err) {
+ error_setg(errp, "bad numerical value %s", val);
+ return;
+ }
+ cpu_def->iu_version = iu_version;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "iu_version %" PRIx64 "\n", iu_version);
+#endif
+ } else if (!strcmp(featurestr, "fpu_version")) {
+ char *err;
+
+ fpu_version = strtol(val, &err, 0);
+ if (!*val || *err) {
+ error_setg(errp, "bad numerical value %s", val);
+ return;
+ }
+ cpu_def->fpu_version = fpu_version;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "fpu_version %x\n", fpu_version);
+#endif
+ } else if (!strcmp(featurestr, "mmu_version")) {
+ char *err;
+
+ mmu_version = strtol(val, &err, 0);
+ if (!*val || *err) {
+ error_setg(errp, "bad numerical value %s", val);
+ return;
+ }
+ cpu_def->mmu_version = mmu_version;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "mmu_version %x\n", mmu_version);
+#endif
+ } else if (!strcmp(featurestr, "nwindows")) {
+ char *err;
+
+ nwindows = strtol(val, &err, 0);
+ if (!*val || *err || nwindows > MAX_NWINDOWS ||
+ nwindows < MIN_NWINDOWS) {
+ error_setg(errp, "bad numerical value %s", val);
+ return;
+ }
+ cpu_def->nwindows = nwindows;
+#ifdef DEBUG_FEATURES
+ fprintf(stderr, "nwindows %d\n", nwindows);
+#endif
+ } else {
+ error_setg(errp, "unrecognized feature %s", featurestr);
+ return;
+ }
+ } else {
+ error_setg(errp, "feature string `%s' not in format "
+ "(+feature|-feature|feature=xyz)", featurestr);
+ return;
+ }
+ featurestr = strtok(NULL, ",");
+ }
+ cpu_def->features |= plus_features;
+ cpu_def->features &= ~minus_features;
+#ifdef DEBUG_FEATURES
+ print_features(stderr, fprintf, cpu_def->features, NULL);
+#endif
+}
+
+void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(sparc_defs); i++) {
+ (*cpu_fprintf)(f, "Sparc %16s IU " TARGET_FMT_lx
+ " FPU %08x MMU %08x NWINS %d ",
+ sparc_defs[i].name,
+ sparc_defs[i].iu_version,
+ sparc_defs[i].fpu_version,
+ sparc_defs[i].mmu_version,
+ sparc_defs[i].nwindows);
+ print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES &
+ ~sparc_defs[i].features, "-");
+ print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES &
+ sparc_defs[i].features, "+");
+ (*cpu_fprintf)(f, "\n");
+ }
+ (*cpu_fprintf)(f, "Default CPU feature flags (use '-' to remove): ");
+ print_features(f, cpu_fprintf, CPU_DEFAULT_FEATURES, NULL);
+ (*cpu_fprintf)(f, "\n");
+ (*cpu_fprintf)(f, "Available CPU feature flags (use '+' to add): ");
+ print_features(f, cpu_fprintf, ~CPU_DEFAULT_FEATURES, NULL);
+ (*cpu_fprintf)(f, "\n");
+ (*cpu_fprintf)(f, "Numerical features (use '=' to set): iu_version "
+ "fpu_version mmu_version nwindows\n");
+}
+
+static void cpu_print_cc(FILE *f, fprintf_function cpu_fprintf,
+ uint32_t cc)
+{
+ cpu_fprintf(f, "%c%c%c%c", cc & PSR_NEG ? 'N' : '-',
+ cc & PSR_ZERO ? 'Z' : '-', cc & PSR_OVF ? 'V' : '-',
+ cc & PSR_CARRY ? 'C' : '-');
+}
+
+#ifdef TARGET_SPARC64
+#define REGS_PER_LINE 4
+#else
+#define REGS_PER_LINE 8
+#endif
+
+void sparc_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
+ int flags)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int i, x;
+
+ cpu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc,
+ env->npc);
+
+ for (i = 0; i < 8; i++) {
+ if (i % REGS_PER_LINE == 0) {
+ cpu_fprintf(f, "%%g%d-%d:", i, i + REGS_PER_LINE - 1);
+ }
+ cpu_fprintf(f, " " TARGET_FMT_lx, env->gregs[i]);
+ if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ for (x = 0; x < 3; x++) {
+ for (i = 0; i < 8; i++) {
+ if (i % REGS_PER_LINE == 0) {
+ cpu_fprintf(f, "%%%c%d-%d: ",
+ x == 0 ? 'o' : (x == 1 ? 'l' : 'i'),
+ i, i + REGS_PER_LINE - 1);
+ }
+ cpu_fprintf(f, TARGET_FMT_lx " ", env->regwptr[i + x * 8]);
+ if (i % REGS_PER_LINE == REGS_PER_LINE - 1) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+ }
+
+ for (i = 0; i < TARGET_DPREGS; i++) {
+ if ((i & 3) == 0) {
+ cpu_fprintf(f, "%%f%02d: ", i * 2);
+ }
+ cpu_fprintf(f, " %016" PRIx64, env->fpr[i].ll);
+ if ((i & 3) == 3) {
+ cpu_fprintf(f, "\n");
+ }
+ }
+#ifdef TARGET_SPARC64
+ cpu_fprintf(f, "pstate: %08x ccr: %02x (icc: ", env->pstate,
+ (unsigned)cpu_get_ccr(env));
+ cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << PSR_CARRY_SHIFT);
+ cpu_fprintf(f, " xcc: ");
+ cpu_print_cc(f, cpu_fprintf, cpu_get_ccr(env) << (PSR_CARRY_SHIFT - 4));
+ cpu_fprintf(f, ") asi: %02x tl: %d pil: %x\n", env->asi, env->tl,
+ env->psrpil);
+ cpu_fprintf(f, "cansave: %d canrestore: %d otherwin: %d wstate: %d "
+ "cleanwin: %d cwp: %d\n",
+ env->cansave, env->canrestore, env->otherwin, env->wstate,
+ env->cleanwin, env->nwindows - 1 - env->cwp);
+ cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx " fprs: "
+ TARGET_FMT_lx "\n", env->fsr, env->y, env->fprs);
+#else
+ cpu_fprintf(f, "psr: %08x (icc: ", cpu_get_psr(env));
+ cpu_print_cc(f, cpu_fprintf, cpu_get_psr(env));
+ cpu_fprintf(f, " SPE: %c%c%c) wim: %08x\n", env->psrs ? 'S' : '-',
+ env->psrps ? 'P' : '-', env->psret ? 'E' : '-',
+ env->wim);
+ cpu_fprintf(f, "fsr: " TARGET_FMT_lx " y: " TARGET_FMT_lx "\n",
+ env->fsr, env->y);
+#endif
+ cpu_fprintf(f, "\n");
+}
+
+static void sparc_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+
+ cpu->env.pc = value;
+ cpu->env.npc = value + 4;
+}
+
+static void sparc_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+
+ cpu->env.pc = tb->pc;
+ cpu->env.npc = tb->cs_base;
+}
+
+static bool sparc_cpu_has_work(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ cpu_interrupts_enabled(env);
+}
+
+static void sparc_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+#if defined(CONFIG_USER_ONLY)
+ SPARCCPU *cpu = SPARC_CPU(dev);
+ CPUSPARCState *env = &cpu->env;
+
+ if ((env->def->features & CPU_FEATURE_FLOAT)) {
+ env->def->features |= CPU_FEATURE_FLOAT128;
+ }
+#endif
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ scc->parent_realize(dev, errp);
+}
+
+static void sparc_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ SPARCCPU *cpu = SPARC_CPU(obj);
+ CPUSPARCState *env = &cpu->env;
+
+ cs->env_ptr = env;
+
+ if (tcg_enabled()) {
+ gen_intermediate_code_init(env);
+ }
+}
+
+static void sparc_cpu_uninitfn(Object *obj)
+{
+ SPARCCPU *cpu = SPARC_CPU(obj);
+ CPUSPARCState *env = &cpu->env;
+
+ g_free(env->def);
+}
+
+static void sparc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ SPARCCPUClass *scc = SPARC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ DeviceClass *dc = DEVICE_CLASS(oc);
+
+ scc->parent_realize = dc->realize;
+ dc->realize = sparc_cpu_realizefn;
+
+ scc->parent_reset = cc->reset;
+ cc->reset = sparc_cpu_reset;
+
+ cc->has_work = sparc_cpu_has_work;
+ cc->do_interrupt = sparc_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = sparc_cpu_exec_interrupt;
+ cc->dump_state = sparc_cpu_dump_state;
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+ cc->memory_rw_debug = sparc_cpu_memory_rw_debug;
+#endif
+ cc->set_pc = sparc_cpu_set_pc;
+ cc->synchronize_from_tb = sparc_cpu_synchronize_from_tb;
+ cc->gdb_read_register = sparc_cpu_gdb_read_register;
+ cc->gdb_write_register = sparc_cpu_gdb_write_register;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = sparc_cpu_handle_mmu_fault;
+#else
+ cc->do_unassigned_access = sparc_cpu_unassigned_access;
+ cc->do_unaligned_access = sparc_cpu_do_unaligned_access;
+ cc->get_phys_page_debug = sparc_cpu_get_phys_page_debug;
+ cc->vmsd = &vmstate_sparc_cpu;
+#endif
+ cc->disas_set_info = cpu_sparc_disas_set_info;
+
+#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
+ cc->gdb_num_core_regs = 86;
+#else
+ cc->gdb_num_core_regs = 72;
+#endif
+}
+
+static const TypeInfo sparc_cpu_type_info = {
+ .name = TYPE_SPARC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(SPARCCPU),
+ .instance_init = sparc_cpu_initfn,
+ .instance_finalize = sparc_cpu_uninitfn,
+ .abstract = false,
+ .class_size = sizeof(SPARCCPUClass),
+ .class_init = sparc_cpu_class_init,
+};
+
+static void sparc_cpu_register_types(void)
+{
+ type_register_static(&sparc_cpu_type_info);
+}
+
+type_init(sparc_cpu_register_types)
diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h
new file mode 100644
index 0000000000..5fb0ed1aad
--- /dev/null
+++ b/target/sparc/cpu.h
@@ -0,0 +1,779 @@
+#ifndef SPARC_CPU_H
+#define SPARC_CPU_H
+
+#include "qemu-common.h"
+#include "qemu/bswap.h"
+#include "cpu-qom.h"
+
+#define ALIGNED_ONLY
+
+#if !defined(TARGET_SPARC64)
+#define TARGET_LONG_BITS 32
+#define TARGET_DPREGS 16
+#define TARGET_PAGE_BITS 12 /* 4k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 36
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#else
+#define TARGET_LONG_BITS 64
+#define TARGET_DPREGS 32
+#define TARGET_PAGE_BITS 13 /* 8k */
+#define TARGET_PHYS_ADDR_SPACE_BITS 41
+# ifdef TARGET_ABI32
+# define TARGET_VIRT_ADDR_SPACE_BITS 32
+# else
+# define TARGET_VIRT_ADDR_SPACE_BITS 44
+# endif
+#endif
+
+#define CPUArchState struct CPUSPARCState
+
+#include "exec/cpu-defs.h"
+
+#include "fpu/softfloat.h"
+
+/*#define EXCP_INTERRUPT 0x100*/
+
+/* trap definitions */
+#ifndef TARGET_SPARC64
+#define TT_TFAULT 0x01
+#define TT_ILL_INSN 0x02
+#define TT_PRIV_INSN 0x03
+#define TT_NFPU_INSN 0x04
+#define TT_WIN_OVF 0x05
+#define TT_WIN_UNF 0x06
+#define TT_UNALIGNED 0x07
+#define TT_FP_EXCP 0x08
+#define TT_DFAULT 0x09
+#define TT_TOVF 0x0a
+#define TT_EXTINT 0x10
+#define TT_CODE_ACCESS 0x21
+#define TT_UNIMP_FLUSH 0x25
+#define TT_DATA_ACCESS 0x29
+#define TT_DIV_ZERO 0x2a
+#define TT_NCP_INSN 0x24
+#define TT_TRAP 0x80
+#else
+#define TT_POWER_ON_RESET 0x01
+#define TT_TFAULT 0x08
+#define TT_CODE_ACCESS 0x0a
+#define TT_ILL_INSN 0x10
+#define TT_UNIMP_FLUSH TT_ILL_INSN
+#define TT_PRIV_INSN 0x11
+#define TT_NFPU_INSN 0x20
+#define TT_FP_EXCP 0x21
+#define TT_TOVF 0x23
+#define TT_CLRWIN 0x24
+#define TT_DIV_ZERO 0x28
+#define TT_DFAULT 0x30
+#define TT_DATA_ACCESS 0x32
+#define TT_UNALIGNED 0x34
+#define TT_PRIV_ACT 0x37
+#define TT_EXTINT 0x40
+#define TT_IVEC 0x60
+#define TT_TMISS 0x64
+#define TT_DMISS 0x68
+#define TT_DPROT 0x6c
+#define TT_SPILL 0x80
+#define TT_FILL 0xc0
+#define TT_WOTHER (1 << 5)
+#define TT_TRAP 0x100
+#endif
+
+#define PSR_NEG_SHIFT 23
+#define PSR_NEG (1 << PSR_NEG_SHIFT)
+#define PSR_ZERO_SHIFT 22
+#define PSR_ZERO (1 << PSR_ZERO_SHIFT)
+#define PSR_OVF_SHIFT 21
+#define PSR_OVF (1 << PSR_OVF_SHIFT)
+#define PSR_CARRY_SHIFT 20
+#define PSR_CARRY (1 << PSR_CARRY_SHIFT)
+#define PSR_ICC (PSR_NEG|PSR_ZERO|PSR_OVF|PSR_CARRY)
+#if !defined(TARGET_SPARC64)
+#define PSR_EF (1<<12)
+#define PSR_PIL 0xf00
+#define PSR_S (1<<7)
+#define PSR_PS (1<<6)
+#define PSR_ET (1<<5)
+#define PSR_CWP 0x1f
+#endif
+
+#define CC_SRC (env->cc_src)
+#define CC_SRC2 (env->cc_src2)
+#define CC_DST (env->cc_dst)
+#define CC_OP (env->cc_op)
+
+/* Even though lazy evaluation of CPU condition codes tends to be less
+ * important on RISC systems where condition codes are only updated
+ * when explicitly requested, SPARC uses it to update 32-bit and 64-bit
+ * condition codes.
+ */
+enum {
+ CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
+ CC_OP_FLAGS, /* all cc are back in status register */
+ CC_OP_DIV, /* modify N, Z and V, C = 0*/
+ CC_OP_ADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_ADDX, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TADD, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TADDTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_SUBX, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TSUB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
+ CC_OP_TSUBTV, /* modify all flags except V, CC_DST = res, CC_SRC = src1 */
+ CC_OP_LOGIC, /* modify N and Z, C = V = 0, CC_DST = res */
+ CC_OP_NB,
+};
+
+/* Trap base register */
+#define TBR_BASE_MASK 0xfffff000
+
+#if defined(TARGET_SPARC64)
+#define PS_TCT (1<<12) /* UA2007, impl.dep. trap on control transfer */
+#define PS_IG (1<<11) /* v9, zero on UA2007 */
+#define PS_MG (1<<10) /* v9, zero on UA2007 */
+#define PS_CLE (1<<9) /* UA2007 */
+#define PS_TLE (1<<8) /* UA2007 */
+#define PS_RMO (1<<7)
+#define PS_RED (1<<5) /* v9, zero on UA2007 */
+#define PS_PEF (1<<4) /* enable fpu */
+#define PS_AM (1<<3) /* address mask */
+#define PS_PRIV (1<<2)
+#define PS_IE (1<<1)
+#define PS_AG (1<<0) /* v9, zero on UA2007 */
+
+#define FPRS_FEF (1<<2)
+
+#define HS_PRIV (1<<2)
+#endif
+
+/* Fcc */
+#define FSR_RD1 (1ULL << 31)
+#define FSR_RD0 (1ULL << 30)
+#define FSR_RD_MASK (FSR_RD1 | FSR_RD0)
+#define FSR_RD_NEAREST 0
+#define FSR_RD_ZERO FSR_RD0
+#define FSR_RD_POS FSR_RD1
+#define FSR_RD_NEG (FSR_RD1 | FSR_RD0)
+
+#define FSR_NVM (1ULL << 27)
+#define FSR_OFM (1ULL << 26)
+#define FSR_UFM (1ULL << 25)
+#define FSR_DZM (1ULL << 24)
+#define FSR_NXM (1ULL << 23)
+#define FSR_TEM_MASK (FSR_NVM | FSR_OFM | FSR_UFM | FSR_DZM | FSR_NXM)
+
+#define FSR_NVA (1ULL << 9)
+#define FSR_OFA (1ULL << 8)
+#define FSR_UFA (1ULL << 7)
+#define FSR_DZA (1ULL << 6)
+#define FSR_NXA (1ULL << 5)
+#define FSR_AEXC_MASK (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
+
+#define FSR_NVC (1ULL << 4)
+#define FSR_OFC (1ULL << 3)
+#define FSR_UFC (1ULL << 2)
+#define FSR_DZC (1ULL << 1)
+#define FSR_NXC (1ULL << 0)
+#define FSR_CEXC_MASK (FSR_NVC | FSR_OFC | FSR_UFC | FSR_DZC | FSR_NXC)
+
+#define FSR_FTT2 (1ULL << 16)
+#define FSR_FTT1 (1ULL << 15)
+#define FSR_FTT0 (1ULL << 14)
+//gcc warns about constant overflow for ~FSR_FTT_MASK
+//#define FSR_FTT_MASK (FSR_FTT2 | FSR_FTT1 | FSR_FTT0)
+#ifdef TARGET_SPARC64
+#define FSR_FTT_NMASK 0xfffffffffffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffffffffffe3fe0ULL
+#define FSR_LDFSR_OLDMASK 0x0000003f000fc000ULL
+#define FSR_LDXFSR_MASK 0x0000003fcfc00fffULL
+#define FSR_LDXFSR_OLDMASK 0x00000000000fc000ULL
+#else
+#define FSR_FTT_NMASK 0xfffe3fffULL
+#define FSR_FTT_CEXC_NMASK 0xfffe3fe0ULL
+#define FSR_LDFSR_OLDMASK 0x000fc000ULL
+#endif
+#define FSR_LDFSR_MASK 0xcfc00fffULL
+#define FSR_FTT_IEEE_EXCP (1ULL << 14)
+#define FSR_FTT_UNIMPFPOP (3ULL << 14)
+#define FSR_FTT_SEQ_ERROR (4ULL << 14)
+#define FSR_FTT_INVAL_FPR (6ULL << 14)
+
+#define FSR_FCC1_SHIFT 11
+#define FSR_FCC1 (1ULL << FSR_FCC1_SHIFT)
+#define FSR_FCC0_SHIFT 10
+#define FSR_FCC0 (1ULL << FSR_FCC0_SHIFT)
+
+/* MMU */
+#define MMU_E (1<<0)
+#define MMU_NF (1<<1)
+
+#define PTE_ENTRYTYPE_MASK 3
+#define PTE_ACCESS_MASK 0x1c
+#define PTE_ACCESS_SHIFT 2
+#define PTE_PPN_SHIFT 7
+#define PTE_ADDR_MASK 0xffffff00
+
+#define PG_ACCESSED_BIT 5
+#define PG_MODIFIED_BIT 6
+#define PG_CACHE_BIT 7
+
+#define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
+#define PG_MODIFIED_MASK (1 << PG_MODIFIED_BIT)
+#define PG_CACHE_MASK (1 << PG_CACHE_BIT)
+
+/* 3 <= NWINDOWS <= 32. */
+#define MIN_NWINDOWS 3
+#define MAX_NWINDOWS 32
+
+#if !defined(TARGET_SPARC64)
+#define NB_MMU_MODES 3
+#else
+#define NB_MMU_MODES 7
+typedef struct trap_state {
+ uint64_t tpc;
+ uint64_t tnpc;
+ uint64_t tstate;
+ uint32_t tt;
+} trap_state;
+#endif
+#define TARGET_INSN_START_EXTRA_WORDS 1
+
+typedef struct sparc_def_t {
+ const char *name;
+ target_ulong iu_version;
+ uint32_t fpu_version;
+ uint32_t mmu_version;
+ uint32_t mmu_bm;
+ uint32_t mmu_ctpr_mask;
+ uint32_t mmu_cxr_mask;
+ uint32_t mmu_sfsr_mask;
+ uint32_t mmu_trcr_mask;
+ uint32_t mxcc_version;
+ uint32_t features;
+ uint32_t nwindows;
+ uint32_t maxtl;
+} sparc_def_t;
+
+#define CPU_FEATURE_FLOAT (1 << 0)
+#define CPU_FEATURE_FLOAT128 (1 << 1)
+#define CPU_FEATURE_SWAP (1 << 2)
+#define CPU_FEATURE_MUL (1 << 3)
+#define CPU_FEATURE_DIV (1 << 4)
+#define CPU_FEATURE_FLUSH (1 << 5)
+#define CPU_FEATURE_FSQRT (1 << 6)
+#define CPU_FEATURE_FMUL (1 << 7)
+#define CPU_FEATURE_VIS1 (1 << 8)
+#define CPU_FEATURE_VIS2 (1 << 9)
+#define CPU_FEATURE_FSMULD (1 << 10)
+#define CPU_FEATURE_HYPV (1 << 11)
+#define CPU_FEATURE_CMT (1 << 12)
+#define CPU_FEATURE_GL (1 << 13)
+#define CPU_FEATURE_TA0_SHUTDOWN (1 << 14) /* Shutdown on "ta 0x0" */
+#define CPU_FEATURE_ASR17 (1 << 15)
+#define CPU_FEATURE_CACHE_CTRL (1 << 16)
+#define CPU_FEATURE_POWERDOWN (1 << 17)
+#define CPU_FEATURE_CASA (1 << 18)
+
+#ifndef TARGET_SPARC64
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
+ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+ CPU_FEATURE_FMUL | CPU_FEATURE_FSMULD)
+#else
+#define CPU_DEFAULT_FEATURES (CPU_FEATURE_FLOAT | CPU_FEATURE_SWAP | \
+ CPU_FEATURE_MUL | CPU_FEATURE_DIV | \
+ CPU_FEATURE_FLUSH | CPU_FEATURE_FSQRT | \
+ CPU_FEATURE_FMUL | CPU_FEATURE_VIS1 | \
+ CPU_FEATURE_VIS2 | CPU_FEATURE_FSMULD | \
+ CPU_FEATURE_CASA)
+enum {
+ mmu_us_12, // Ultrasparc < III (64 entry TLB)
+ mmu_us_3, // Ultrasparc III (512 entry TLB)
+ mmu_us_4, // Ultrasparc IV (several TLBs, 32 and 256MB pages)
+ mmu_sun4v, // T1, T2
+};
+#endif
+
+#define TTE_VALID_BIT (1ULL << 63)
+#define TTE_NFO_BIT (1ULL << 60)
+#define TTE_USED_BIT (1ULL << 41)
+#define TTE_LOCKED_BIT (1ULL << 6)
+#define TTE_SIDEEFFECT_BIT (1ULL << 3)
+#define TTE_PRIV_BIT (1ULL << 2)
+#define TTE_W_OK_BIT (1ULL << 1)
+#define TTE_GLOBAL_BIT (1ULL << 0)
+
+#define TTE_IS_VALID(tte) ((tte) & TTE_VALID_BIT)
+#define TTE_IS_NFO(tte) ((tte) & TTE_NFO_BIT)
+#define TTE_IS_USED(tte) ((tte) & TTE_USED_BIT)
+#define TTE_IS_LOCKED(tte) ((tte) & TTE_LOCKED_BIT)
+#define TTE_IS_SIDEEFFECT(tte) ((tte) & TTE_SIDEEFFECT_BIT)
+#define TTE_IS_PRIV(tte) ((tte) & TTE_PRIV_BIT)
+#define TTE_IS_W_OK(tte) ((tte) & TTE_W_OK_BIT)
+#define TTE_IS_GLOBAL(tte) ((tte) & TTE_GLOBAL_BIT)
+
+#define TTE_SET_USED(tte) ((tte) |= TTE_USED_BIT)
+#define TTE_SET_UNUSED(tte) ((tte) &= ~TTE_USED_BIT)
+
+#define TTE_PGSIZE(tte) (((tte) >> 61) & 3ULL)
+#define TTE_PA(tte) ((tte) & 0x1ffffffe000ULL)
+
+#define SFSR_NF_BIT (1ULL << 24) /* JPS1 NoFault */
+#define SFSR_TM_BIT (1ULL << 15) /* JPS1 TLB Miss */
+#define SFSR_FT_VA_IMMU_BIT (1ULL << 13) /* USIIi VA out of range (IMMU) */
+#define SFSR_FT_VA_DMMU_BIT (1ULL << 12) /* USIIi VA out of range (DMMU) */
+#define SFSR_FT_NFO_BIT (1ULL << 11) /* NFO page access */
+#define SFSR_FT_ILL_BIT (1ULL << 10) /* illegal LDA/STA ASI */
+#define SFSR_FT_ATOMIC_BIT (1ULL << 9) /* atomic op on noncacheable area */
+#define SFSR_FT_NF_E_BIT (1ULL << 8) /* NF access on side effect area */
+#define SFSR_FT_PRIV_BIT (1ULL << 7) /* privilege violation */
+#define SFSR_PR_BIT (1ULL << 3) /* privilege mode */
+#define SFSR_WRITE_BIT (1ULL << 2) /* write access mode */
+#define SFSR_OW_BIT (1ULL << 1) /* status overwritten */
+#define SFSR_VALID_BIT (1ULL << 0) /* status valid */
+
+#define SFSR_ASI_SHIFT 16 /* 23:16 ASI value */
+#define SFSR_ASI_MASK (0xffULL << SFSR_ASI_SHIFT)
+#define SFSR_CT_PRIMARY (0ULL << 4) /* 5:4 context type */
+#define SFSR_CT_SECONDARY (1ULL << 4)
+#define SFSR_CT_NUCLEUS (2ULL << 4)
+#define SFSR_CT_NOTRANS (3ULL << 4)
+#define SFSR_CT_MASK (3ULL << 4)
+
+/* Leon3 cache control */
+
+/* Cache control: emulate the behavior of cache control registers but without
+ any effect on the emulated */
+
+#define CACHE_STATE_MASK 0x3
+#define CACHE_DISABLED 0x0
+#define CACHE_FROZEN 0x1
+#define CACHE_ENABLED 0x3
+
+/* Cache Control register fields */
+
+#define CACHE_CTRL_IF (1 << 4) /* Instruction Cache Freeze on Interrupt */
+#define CACHE_CTRL_DF (1 << 5) /* Data Cache Freeze on Interrupt */
+#define CACHE_CTRL_DP (1 << 14) /* Data cache flush pending */
+#define CACHE_CTRL_IP (1 << 15) /* Instruction cache flush pending */
+#define CACHE_CTRL_IB (1 << 16) /* Instruction burst fetch */
+#define CACHE_CTRL_FI (1 << 21) /* Flush Instruction cache (Write only) */
+#define CACHE_CTRL_FD (1 << 22) /* Flush Data cache (Write only) */
+#define CACHE_CTRL_DS (1 << 23) /* Data cache snoop enable */
+
+typedef struct SparcTLBEntry {
+ uint64_t tag;
+ uint64_t tte;
+} SparcTLBEntry;
+
+struct CPUTimer
+{
+ const char *name;
+ uint32_t frequency;
+ uint32_t disabled;
+ uint64_t disabled_mask;
+ uint32_t npt;
+ uint64_t npt_mask;
+ int64_t clock_offset;
+ QEMUTimer *qtimer;
+};
+
+typedef struct CPUTimer CPUTimer;
+
+typedef struct CPUSPARCState CPUSPARCState;
+
+struct CPUSPARCState {
+ target_ulong gregs[8]; /* general registers */
+ target_ulong *regwptr; /* pointer to current register window */
+ target_ulong pc; /* program counter */
+ target_ulong npc; /* next program counter */
+ target_ulong y; /* multiply/divide register */
+
+ /* emulator internal flags handling */
+ target_ulong cc_src, cc_src2;
+ target_ulong cc_dst;
+ uint32_t cc_op;
+
+ target_ulong cond; /* conditional branch result (XXX: save it in a
+ temporary register when possible) */
+
+ uint32_t psr; /* processor state register */
+ target_ulong fsr; /* FPU state register */
+ CPU_DoubleU fpr[TARGET_DPREGS]; /* floating point registers */
+ uint32_t cwp; /* index of current register window (extracted
+ from PSR) */
+#if !defined(TARGET_SPARC64) || defined(TARGET_ABI32)
+ uint32_t wim; /* window invalid mask */
+#endif
+ target_ulong tbr; /* trap base register */
+#if !defined(TARGET_SPARC64)
+ int psrs; /* supervisor mode (extracted from PSR) */
+ int psrps; /* previous supervisor mode */
+ int psret; /* enable traps */
+#endif
+ uint32_t psrpil; /* interrupt blocking level */
+ uint32_t pil_in; /* incoming interrupt level bitmap */
+#if !defined(TARGET_SPARC64)
+ int psref; /* enable fpu */
+#endif
+ int interrupt_index;
+ /* NOTE: we allow 8 more registers to handle wrapping */
+ target_ulong regbase[MAX_NWINDOWS * 16 + 8];
+
+ CPU_COMMON
+
+ /* Fields from here on are preserved across CPU reset. */
+ target_ulong version;
+ uint32_t nwindows;
+
+ /* MMU regs */
+#if defined(TARGET_SPARC64)
+ uint64_t lsu;
+#define DMMU_E 0x8
+#define IMMU_E 0x4
+ //typedef struct SparcMMU
+ union {
+ uint64_t immuregs[16];
+ struct {
+ uint64_t tsb_tag_target;
+ uint64_t unused_mmu_primary_context; // use DMMU
+ uint64_t unused_mmu_secondary_context; // use DMMU
+ uint64_t sfsr;
+ uint64_t sfar;
+ uint64_t tsb;
+ uint64_t tag_access;
+ } immu;
+ };
+ union {
+ uint64_t dmmuregs[16];
+ struct {
+ uint64_t tsb_tag_target;
+ uint64_t mmu_primary_context;
+ uint64_t mmu_secondary_context;
+ uint64_t sfsr;
+ uint64_t sfar;
+ uint64_t tsb;
+ uint64_t tag_access;
+ } dmmu;
+ };
+ SparcTLBEntry itlb[64];
+ SparcTLBEntry dtlb[64];
+ uint32_t mmu_version;
+#else
+ uint32_t mmuregs[32];
+ uint64_t mxccdata[4];
+ uint64_t mxccregs[8];
+ uint32_t mmubpctrv, mmubpctrc, mmubpctrs;
+ uint64_t mmubpaction;
+ uint64_t mmubpregs[4];
+ uint64_t prom_addr;
+#endif
+ /* temporary float registers */
+ float128 qt0, qt1;
+ float_status fp_status;
+#if defined(TARGET_SPARC64)
+#define MAXTL_MAX 8
+#define MAXTL_MASK (MAXTL_MAX - 1)
+ trap_state ts[MAXTL_MAX];
+ uint32_t xcc; /* Extended integer condition codes */
+ uint32_t asi;
+ uint32_t pstate;
+ uint32_t tl;
+ uint32_t maxtl;
+ uint32_t cansave, canrestore, otherwin, wstate, cleanwin;
+ uint64_t agregs[8]; /* alternate general registers */
+ uint64_t bgregs[8]; /* backup for normal global registers */
+ uint64_t igregs[8]; /* interrupt general registers */
+ uint64_t mgregs[8]; /* mmu general registers */
+ uint64_t fprs;
+ uint64_t tick_cmpr, stick_cmpr;
+ CPUTimer *tick, *stick;
+#define TICK_NPT_MASK 0x8000000000000000ULL
+#define TICK_INT_DIS 0x8000000000000000ULL
+ uint64_t gsr;
+ uint32_t gl; // UA2005
+ /* UA 2005 hyperprivileged registers */
+ uint64_t hpstate, htstate[MAXTL_MAX], hintp, htba, hver, hstick_cmpr, ssr;
+ CPUTimer *hstick; // UA 2005
+ /* Interrupt vector registers */
+ uint64_t ivec_status;
+ uint64_t ivec_data[3];
+ uint32_t softint;
+#define SOFTINT_TIMER 1
+#define SOFTINT_STIMER (1 << 16)
+#define SOFTINT_INTRMASK (0xFFFE)
+#define SOFTINT_REG_MASK (SOFTINT_STIMER|SOFTINT_INTRMASK|SOFTINT_TIMER)
+#endif
+ sparc_def_t *def;
+
+ void *irq_manager;
+ void (*qemu_irq_ack)(CPUSPARCState *env, void *irq_manager, int intno);
+
+ /* Leon3 cache control */
+ uint32_t cache_control;
+};
+
+/**
+ * SPARCCPU:
+ * @env: #CPUSPARCState
+ *
+ * A SPARC CPU.
+ */
+struct SPARCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUSPARCState env;
+};
+
+static inline SPARCCPU *sparc_env_get_cpu(CPUSPARCState *env)
+{
+ return container_of(env, SPARCCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(sparc_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(SPARCCPU, env)
+
+#ifndef CONFIG_USER_ONLY
+extern const struct VMStateDescription vmstate_sparc_cpu;
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cpu);
+void sparc_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr sparc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx,
+ uintptr_t retaddr);
+void cpu_raise_exception_ra(CPUSPARCState *, int, uintptr_t) QEMU_NORETURN;
+
+#ifndef NO_CPU_IO_DEFS
+/* cpu_init.c */
+SPARCCPU *cpu_sparc_init(const char *cpu_model);
+void cpu_sparc_set_id(CPUSPARCState *env, unsigned int cpu);
+void sparc_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+/* mmu_helper.c */
+int sparc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev);
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env);
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+int sparc_cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
+ uint8_t *buf, int len, bool is_write);
+#endif
+
+
+/* translate.c */
+void gen_intermediate_code_init(CPUSPARCState *env);
+
+/* cpu-exec.c */
+
+/* win_helper.c */
+target_ulong cpu_get_psr(CPUSPARCState *env1);
+void cpu_put_psr(CPUSPARCState *env1, target_ulong val);
+void cpu_put_psr_raw(CPUSPARCState *env1, target_ulong val);
+#ifdef TARGET_SPARC64
+target_ulong cpu_get_ccr(CPUSPARCState *env1);
+void cpu_put_ccr(CPUSPARCState *env1, target_ulong val);
+target_ulong cpu_get_cwp64(CPUSPARCState *env1);
+void cpu_put_cwp64(CPUSPARCState *env1, int cwp);
+void cpu_change_pstate(CPUSPARCState *env1, uint32_t new_pstate);
+#endif
+int cpu_cwp_inc(CPUSPARCState *env1, int cwp);
+int cpu_cwp_dec(CPUSPARCState *env1, int cwp);
+void cpu_set_cwp(CPUSPARCState *env1, int new_cwp);
+
+/* int_helper.c */
+void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno);
+
+/* sun4m.c, sun4u.c */
+void cpu_check_irqs(CPUSPARCState *env);
+
+/* leon3.c */
+void leon3_irq_ack(void *irq_manager, int intno);
+
+#if defined (TARGET_SPARC64)
+
+static inline int compare_masked(uint64_t x, uint64_t y, uint64_t mask)
+{
+ return (x & mask) == (y & mask);
+}
+
+#define MMU_CONTEXT_BITS 13
+#define MMU_CONTEXT_MASK ((1 << MMU_CONTEXT_BITS) - 1)
+
+static inline int tlb_compare_context(const SparcTLBEntry *tlb,
+ uint64_t context)
+{
+ return compare_masked(context, tlb->tag, MMU_CONTEXT_MASK);
+}
+
+#endif
+#endif
+
+/* cpu-exec.c */
+#if !defined(CONFIG_USER_ONLY)
+void sparc_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size);
+#if defined(TARGET_SPARC64)
+hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
+ int mmu_idx);
+#endif
+#endif
+int cpu_sparc_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#ifndef NO_CPU_IO_DEFS
+#define cpu_init(cpu_model) CPU(cpu_sparc_init(cpu_model))
+#endif
+
+#define cpu_signal_handler cpu_sparc_signal_handler
+#define cpu_list sparc_cpu_list
+
+/* MMU modes definitions */
+#if defined (TARGET_SPARC64)
+#define MMU_USER_IDX 0
+#define MMU_USER_SECONDARY_IDX 1
+#define MMU_KERNEL_IDX 2
+#define MMU_KERNEL_SECONDARY_IDX 3
+#define MMU_NUCLEUS_IDX 4
+#define MMU_HYPV_IDX 5
+#define MMU_PHYS_IDX 6
+#else
+#define MMU_USER_IDX 0
+#define MMU_KERNEL_IDX 1
+#define MMU_PHYS_IDX 2
+#endif
+
+#if defined (TARGET_SPARC64)
+static inline int cpu_has_hypervisor(CPUSPARCState *env1)
+{
+ return env1->def->features & CPU_FEATURE_HYPV;
+}
+
+static inline int cpu_hypervisor_mode(CPUSPARCState *env1)
+{
+ return cpu_has_hypervisor(env1) && (env1->hpstate & HS_PRIV);
+}
+
+static inline int cpu_supervisor_mode(CPUSPARCState *env1)
+{
+ return env1->pstate & PS_PRIV;
+}
+#endif
+
+static inline int cpu_mmu_index(CPUSPARCState *env, bool ifetch)
+{
+#if defined(CONFIG_USER_ONLY)
+ return MMU_USER_IDX;
+#elif !defined(TARGET_SPARC64)
+ if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+ return MMU_PHYS_IDX;
+ } else {
+ return env->psrs;
+ }
+#else
+ /* IMMU or DMMU disabled. */
+ if (ifetch
+ ? (env->lsu & IMMU_E) == 0 || (env->pstate & PS_RED) != 0
+ : (env->lsu & DMMU_E) == 0) {
+ return MMU_PHYS_IDX;
+ } else if (env->tl > 0) {
+ return MMU_NUCLEUS_IDX;
+ } else if (cpu_hypervisor_mode(env)) {
+ return MMU_HYPV_IDX;
+ } else if (cpu_supervisor_mode(env)) {
+ return MMU_KERNEL_IDX;
+ } else {
+ return MMU_USER_IDX;
+ }
+#endif
+}
+
+static inline int cpu_interrupts_enabled(CPUSPARCState *env1)
+{
+#if !defined (TARGET_SPARC64)
+ if (env1->psret != 0)
+ return 1;
+#else
+ if (env1->pstate & PS_IE)
+ return 1;
+#endif
+
+ return 0;
+}
+
+static inline int cpu_pil_allowed(CPUSPARCState *env1, int pil)
+{
+#if !defined(TARGET_SPARC64)
+ /* level 15 is non-maskable on sparc v8 */
+ return pil == 15 || pil > env1->psrpil;
+#else
+ return pil > env1->psrpil;
+#endif
+}
+
+#include "exec/cpu-all.h"
+
+#ifdef TARGET_SPARC64
+/* sun4u.c */
+void cpu_tick_set_count(CPUTimer *timer, uint64_t count);
+uint64_t cpu_tick_get_count(CPUTimer *timer);
+void cpu_tick_set_limit(CPUTimer *timer, uint64_t limit);
+trap_state* cpu_tsptr(CPUSPARCState* env);
+#endif
+
+#define TB_FLAG_MMU_MASK 7
+#define TB_FLAG_FPU_ENABLED (1 << 4)
+#define TB_FLAG_AM_ENABLED (1 << 5)
+#define TB_FLAG_ASI_SHIFT 24
+
+static inline void cpu_get_tb_cpu_state(CPUSPARCState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *pflags)
+{
+ uint32_t flags;
+ *pc = env->pc;
+ *cs_base = env->npc;
+ flags = cpu_mmu_index(env, false);
+#ifdef TARGET_SPARC64
+ if (env->pstate & PS_AM) {
+ flags |= TB_FLAG_AM_ENABLED;
+ }
+ if ((env->def->features & CPU_FEATURE_FLOAT)
+ && (env->pstate & PS_PEF)
+ && (env->fprs & FPRS_FEF)) {
+ flags |= TB_FLAG_FPU_ENABLED;
+ }
+ flags |= env->asi << TB_FLAG_ASI_SHIFT;
+#else
+ if ((env->def->features & CPU_FEATURE_FLOAT) && env->psref) {
+ flags |= TB_FLAG_FPU_ENABLED;
+ }
+#endif
+ *pflags = flags;
+}
+
+static inline bool tb_fpu_enabled(int tb_flags)
+{
+#if defined(CONFIG_USER_ONLY)
+ return true;
+#else
+ return tb_flags & TB_FLAG_FPU_ENABLED;
+#endif
+}
+
+static inline bool tb_am_enabled(int tb_flags)
+{
+#ifndef TARGET_SPARC64
+ return false;
+#else
+ return tb_flags & TB_FLAG_AM_ENABLED;
+#endif
+}
+
+#endif
diff --git a/target/sparc/fop_helper.c b/target/sparc/fop_helper.c
new file mode 100644
index 0000000000..c7fb176e4c
--- /dev/null
+++ b/target/sparc/fop_helper.c
@@ -0,0 +1,400 @@
+/*
+ * FPU op helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+static target_ulong do_check_ieee_exceptions(CPUSPARCState *env, uintptr_t ra)
+{
+ target_ulong status = get_float_exception_flags(&env->fp_status);
+ target_ulong fsr = env->fsr;
+
+ if (unlikely(status)) {
+ /* Keep exception flags clear for next time. */
+ set_float_exception_flags(0, &env->fp_status);
+
+ /* Copy IEEE 754 flags into FSR */
+ if (status & float_flag_invalid) {
+ fsr |= FSR_NVC;
+ }
+ if (status & float_flag_overflow) {
+ fsr |= FSR_OFC;
+ }
+ if (status & float_flag_underflow) {
+ fsr |= FSR_UFC;
+ }
+ if (status & float_flag_divbyzero) {
+ fsr |= FSR_DZC;
+ }
+ if (status & float_flag_inexact) {
+ fsr |= FSR_NXC;
+ }
+
+ if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) {
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ /* Unmasked exception, generate a trap. Note that while
+ the helper is marked as NO_WG, we can get away with
+ writing to cpu state along the exception path, since
+ TCG generated code will never see the write. */
+ env->fsr = fsr | FSR_FTT_IEEE_EXCP;
+ cs->exception_index = TT_FP_EXCP;
+ cpu_loop_exit_restore(cs, ra);
+ } else {
+ /* Accumulate exceptions */
+ fsr |= (fsr & FSR_CEXC_MASK) << 5;
+ }
+ }
+
+ return fsr;
+}
+
+target_ulong helper_check_ieee_exceptions(CPUSPARCState *env)
+{
+ return do_check_ieee_exceptions(env, GETPC());
+}
+
+#define F_HELPER(name, p) void helper_f##name##p(CPUSPARCState *env)
+
+#define F_BINOP(name) \
+ float32 helper_f ## name ## s (CPUSPARCState *env, float32 src1, \
+ float32 src2) \
+ { \
+ return float32_ ## name (src1, src2, &env->fp_status); \
+ } \
+ float64 helper_f ## name ## d (CPUSPARCState * env, float64 src1,\
+ float64 src2) \
+ { \
+ return float64_ ## name (src1, src2, &env->fp_status); \
+ } \
+ F_HELPER(name, q) \
+ { \
+ QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
+ }
+
+F_BINOP(add);
+F_BINOP(sub);
+F_BINOP(mul);
+F_BINOP(div);
+#undef F_BINOP
+
+float64 helper_fsmuld(CPUSPARCState *env, float32 src1, float32 src2)
+{
+ return float64_mul(float32_to_float64(src1, &env->fp_status),
+ float32_to_float64(src2, &env->fp_status),
+ &env->fp_status);
+}
+
+void helper_fdmulq(CPUSPARCState *env, float64 src1, float64 src2)
+{
+ QT0 = float128_mul(float64_to_float128(src1, &env->fp_status),
+ float64_to_float128(src2, &env->fp_status),
+ &env->fp_status);
+}
+
+float32 helper_fnegs(float32 src)
+{
+ return float32_chs(src);
+}
+
+#ifdef TARGET_SPARC64
+float64 helper_fnegd(float64 src)
+{
+ return float64_chs(src);
+}
+
+F_HELPER(neg, q)
+{
+ QT0 = float128_chs(QT1);
+}
+#endif
+
+/* Integer to float conversion. */
+float32 helper_fitos(CPUSPARCState *env, int32_t src)
+{
+ return int32_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fitod(CPUSPARCState *env, int32_t src)
+{
+ return int32_to_float64(src, &env->fp_status);
+}
+
+void helper_fitoq(CPUSPARCState *env, int32_t src)
+{
+ QT0 = int32_to_float128(src, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+float32 helper_fxtos(CPUSPARCState *env, int64_t src)
+{
+ return int64_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fxtod(CPUSPARCState *env, int64_t src)
+{
+ return int64_to_float64(src, &env->fp_status);
+}
+
+void helper_fxtoq(CPUSPARCState *env, int64_t src)
+{
+ QT0 = int64_to_float128(src, &env->fp_status);
+}
+#endif
+#undef F_HELPER
+
+/* floating point conversion */
+float32 helper_fdtos(CPUSPARCState *env, float64 src)
+{
+ return float64_to_float32(src, &env->fp_status);
+}
+
+float64 helper_fstod(CPUSPARCState *env, float32 src)
+{
+ return float32_to_float64(src, &env->fp_status);
+}
+
+float32 helper_fqtos(CPUSPARCState *env)
+{
+ return float128_to_float32(QT1, &env->fp_status);
+}
+
+void helper_fstoq(CPUSPARCState *env, float32 src)
+{
+ QT0 = float32_to_float128(src, &env->fp_status);
+}
+
+float64 helper_fqtod(CPUSPARCState *env)
+{
+ return float128_to_float64(QT1, &env->fp_status);
+}
+
+void helper_fdtoq(CPUSPARCState *env, float64 src)
+{
+ QT0 = float64_to_float128(src, &env->fp_status);
+}
+
+/* Float to integer conversion. */
+int32_t helper_fstoi(CPUSPARCState *env, float32 src)
+{
+ return float32_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fdtoi(CPUSPARCState *env, float64 src)
+{
+ return float64_to_int32_round_to_zero(src, &env->fp_status);
+}
+
+int32_t helper_fqtoi(CPUSPARCState *env)
+{
+ return float128_to_int32_round_to_zero(QT1, &env->fp_status);
+}
+
+#ifdef TARGET_SPARC64
+int64_t helper_fstox(CPUSPARCState *env, float32 src)
+{
+ return float32_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+int64_t helper_fdtox(CPUSPARCState *env, float64 src)
+{
+ return float64_to_int64_round_to_zero(src, &env->fp_status);
+}
+
+int64_t helper_fqtox(CPUSPARCState *env)
+{
+ return float128_to_int64_round_to_zero(QT1, &env->fp_status);
+}
+#endif
+
+float32 helper_fabss(float32 src)
+{
+ return float32_abs(src);
+}
+
+#ifdef TARGET_SPARC64
+float64 helper_fabsd(float64 src)
+{
+ return float64_abs(src);
+}
+
+void helper_fabsq(CPUSPARCState *env)
+{
+ QT0 = float128_abs(QT1);
+}
+#endif
+
+float32 helper_fsqrts(CPUSPARCState *env, float32 src)
+{
+ return float32_sqrt(src, &env->fp_status);
+}
+
+float64 helper_fsqrtd(CPUSPARCState *env, float64 src)
+{
+ return float64_sqrt(src, &env->fp_status);
+}
+
+void helper_fsqrtq(CPUSPARCState *env)
+{
+ QT0 = float128_sqrt(QT1, &env->fp_status);
+}
+
+#define GEN_FCMP(name, size, reg1, reg2, FS, E) \
+ target_ulong glue(helper_, name) (CPUSPARCState *env) \
+ { \
+ int ret; \
+ target_ulong fsr; \
+ if (E) { \
+ ret = glue(size, _compare)(reg1, reg2, &env->fp_status); \
+ } else { \
+ ret = glue(size, _compare_quiet)(reg1, reg2, \
+ &env->fp_status); \
+ } \
+ fsr = do_check_ieee_exceptions(env, GETPC()); \
+ switch (ret) { \
+ case float_relation_unordered: \
+ fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
+ fsr |= FSR_NVA; \
+ break; \
+ case float_relation_less: \
+ fsr &= ~(FSR_FCC1) << FS; \
+ fsr |= FSR_FCC0 << FS; \
+ break; \
+ case float_relation_greater: \
+ fsr &= ~(FSR_FCC0) << FS; \
+ fsr |= FSR_FCC1 << FS; \
+ break; \
+ default: \
+ fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ break; \
+ } \
+ return fsr; \
+ }
+#define GEN_FCMP_T(name, size, FS, E) \
+ target_ulong glue(helper_, name)(CPUSPARCState *env, size src1, size src2)\
+ { \
+ int ret; \
+ target_ulong fsr; \
+ if (E) { \
+ ret = glue(size, _compare)(src1, src2, &env->fp_status); \
+ } else { \
+ ret = glue(size, _compare_quiet)(src1, src2, \
+ &env->fp_status); \
+ } \
+ fsr = do_check_ieee_exceptions(env, GETPC()); \
+ switch (ret) { \
+ case float_relation_unordered: \
+ fsr |= (FSR_FCC1 | FSR_FCC0) << FS; \
+ break; \
+ case float_relation_less: \
+ fsr &= ~(FSR_FCC1 << FS); \
+ fsr |= FSR_FCC0 << FS; \
+ break; \
+ case float_relation_greater: \
+ fsr &= ~(FSR_FCC0 << FS); \
+ fsr |= FSR_FCC1 << FS; \
+ break; \
+ default: \
+ fsr &= ~((FSR_FCC1 | FSR_FCC0) << FS); \
+ break; \
+ } \
+ return fsr; \
+ }
+
+GEN_FCMP_T(fcmps, float32, 0, 0);
+GEN_FCMP_T(fcmpd, float64, 0, 0);
+
+GEN_FCMP_T(fcmpes, float32, 0, 1);
+GEN_FCMP_T(fcmped, float64, 0, 1);
+
+GEN_FCMP(fcmpq, float128, QT0, QT1, 0, 0);
+GEN_FCMP(fcmpeq, float128, QT0, QT1, 0, 1);
+
+#ifdef TARGET_SPARC64
+GEN_FCMP_T(fcmps_fcc1, float32, 22, 0);
+GEN_FCMP_T(fcmpd_fcc1, float64, 22, 0);
+GEN_FCMP(fcmpq_fcc1, float128, QT0, QT1, 22, 0);
+
+GEN_FCMP_T(fcmps_fcc2, float32, 24, 0);
+GEN_FCMP_T(fcmpd_fcc2, float64, 24, 0);
+GEN_FCMP(fcmpq_fcc2, float128, QT0, QT1, 24, 0);
+
+GEN_FCMP_T(fcmps_fcc3, float32, 26, 0);
+GEN_FCMP_T(fcmpd_fcc3, float64, 26, 0);
+GEN_FCMP(fcmpq_fcc3, float128, QT0, QT1, 26, 0);
+
+GEN_FCMP_T(fcmpes_fcc1, float32, 22, 1);
+GEN_FCMP_T(fcmped_fcc1, float64, 22, 1);
+GEN_FCMP(fcmpeq_fcc1, float128, QT0, QT1, 22, 1);
+
+GEN_FCMP_T(fcmpes_fcc2, float32, 24, 1);
+GEN_FCMP_T(fcmped_fcc2, float64, 24, 1);
+GEN_FCMP(fcmpeq_fcc2, float128, QT0, QT1, 24, 1);
+
+GEN_FCMP_T(fcmpes_fcc3, float32, 26, 1);
+GEN_FCMP_T(fcmped_fcc3, float64, 26, 1);
+GEN_FCMP(fcmpeq_fcc3, float128, QT0, QT1, 26, 1);
+#endif
+#undef GEN_FCMP_T
+#undef GEN_FCMP
+
+static void set_fsr(CPUSPARCState *env, target_ulong fsr)
+{
+ int rnd_mode;
+
+ switch (fsr & FSR_RD_MASK) {
+ case FSR_RD_NEAREST:
+ rnd_mode = float_round_nearest_even;
+ break;
+ default:
+ case FSR_RD_ZERO:
+ rnd_mode = float_round_to_zero;
+ break;
+ case FSR_RD_POS:
+ rnd_mode = float_round_up;
+ break;
+ case FSR_RD_NEG:
+ rnd_mode = float_round_down;
+ break;
+ }
+ set_float_rounding_mode(rnd_mode, &env->fp_status);
+}
+
+target_ulong helper_ldfsr(CPUSPARCState *env, target_ulong old_fsr,
+ uint32_t new_fsr)
+{
+ old_fsr = (new_fsr & FSR_LDFSR_MASK) | (old_fsr & FSR_LDFSR_OLDMASK);
+ set_fsr(env, old_fsr);
+ return old_fsr;
+}
+
+#ifdef TARGET_SPARC64
+target_ulong helper_ldxfsr(CPUSPARCState *env, target_ulong old_fsr,
+ uint64_t new_fsr)
+{
+ old_fsr = (new_fsr & FSR_LDXFSR_MASK) | (old_fsr & FSR_LDXFSR_OLDMASK);
+ set_fsr(env, old_fsr);
+ return old_fsr;
+}
+#endif
diff --git a/target/sparc/gdbstub.c b/target/sparc/gdbstub.c
new file mode 100644
index 0000000000..ffc2baa2e7
--- /dev/null
+++ b/target/sparc/gdbstub.c
@@ -0,0 +1,209 @@
+/*
+ * SPARC gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+
+#ifdef TARGET_ABI32
+#define gdb_get_rega(buf, val) gdb_get_reg32(buf, val)
+#else
+#define gdb_get_rega(buf, val) gdb_get_regl(buf, val)
+#endif
+
+int sparc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+ if (n < 8) {
+ /* g0..g7 */
+ return gdb_get_rega(mem_buf, env->gregs[n]);
+ }
+ if (n < 32) {
+ /* register window */
+ return gdb_get_rega(mem_buf, env->regwptr[n - 8]);
+ }
+#if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
+ if (n < 64) {
+ /* fprs */
+ if (n & 1) {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.lower);
+ } else {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.upper);
+ }
+ }
+ /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+ switch (n) {
+ case 64:
+ return gdb_get_rega(mem_buf, env->y);
+ case 65:
+ return gdb_get_rega(mem_buf, cpu_get_psr(env));
+ case 66:
+ return gdb_get_rega(mem_buf, env->wim);
+ case 67:
+ return gdb_get_rega(mem_buf, env->tbr);
+ case 68:
+ return gdb_get_rega(mem_buf, env->pc);
+ case 69:
+ return gdb_get_rega(mem_buf, env->npc);
+ case 70:
+ return gdb_get_rega(mem_buf, env->fsr);
+ case 71:
+ return gdb_get_rega(mem_buf, 0); /* csr */
+ default:
+ return gdb_get_rega(mem_buf, 0);
+ }
+#else
+ if (n < 64) {
+ /* f0-f31 */
+ if (n & 1) {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.lower);
+ } else {
+ return gdb_get_reg32(mem_buf, env->fpr[(n - 32) / 2].l.upper);
+ }
+ }
+ if (n < 80) {
+ /* f32-f62 (double width, even numbers only) */
+ return gdb_get_reg64(mem_buf, env->fpr[(n - 32) / 2].ll);
+ }
+ switch (n) {
+ case 80:
+ return gdb_get_regl(mem_buf, env->pc);
+ case 81:
+ return gdb_get_regl(mem_buf, env->npc);
+ case 82:
+ return gdb_get_regl(mem_buf, (cpu_get_ccr(env) << 32) |
+ ((env->asi & 0xff) << 24) |
+ ((env->pstate & 0xfff) << 8) |
+ cpu_get_cwp64(env));
+ case 83:
+ return gdb_get_regl(mem_buf, env->fsr);
+ case 84:
+ return gdb_get_regl(mem_buf, env->fprs);
+ case 85:
+ return gdb_get_regl(mem_buf, env->y);
+ }
+#endif
+ return 0;
+}
+
+int sparc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+#if defined(TARGET_ABI32)
+ abi_ulong tmp;
+
+ tmp = ldl_p(mem_buf);
+#else
+ target_ulong tmp;
+
+ tmp = ldtul_p(mem_buf);
+#endif
+
+ if (n < 8) {
+ /* g0..g7 */
+ env->gregs[n] = tmp;
+ } else if (n < 32) {
+ /* register window */
+ env->regwptr[n - 8] = tmp;
+ }
+#if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
+ else if (n < 64) {
+ /* fprs */
+ /* f0-f31 */
+ if (n & 1) {
+ env->fpr[(n - 32) / 2].l.lower = tmp;
+ } else {
+ env->fpr[(n - 32) / 2].l.upper = tmp;
+ }
+ } else {
+ /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+ switch (n) {
+ case 64:
+ env->y = tmp;
+ break;
+ case 65:
+ cpu_put_psr(env, tmp);
+ break;
+ case 66:
+ env->wim = tmp;
+ break;
+ case 67:
+ env->tbr = tmp;
+ break;
+ case 68:
+ env->pc = tmp;
+ break;
+ case 69:
+ env->npc = tmp;
+ break;
+ case 70:
+ env->fsr = tmp;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 4;
+#else
+ else if (n < 64) {
+ /* f0-f31 */
+ tmp = ldl_p(mem_buf);
+ if (n & 1) {
+ env->fpr[(n - 32) / 2].l.lower = tmp;
+ } else {
+ env->fpr[(n - 32) / 2].l.upper = tmp;
+ }
+ return 4;
+ } else if (n < 80) {
+ /* f32-f62 (double width, even numbers only) */
+ env->fpr[(n - 32) / 2].ll = tmp;
+ } else {
+ switch (n) {
+ case 80:
+ env->pc = tmp;
+ break;
+ case 81:
+ env->npc = tmp;
+ break;
+ case 82:
+ cpu_put_ccr(env, tmp >> 32);
+ env->asi = (tmp >> 24) & 0xff;
+ env->pstate = (tmp >> 8) & 0xfff;
+ cpu_put_cwp64(env, tmp & 0xff);
+ break;
+ case 83:
+ env->fsr = tmp;
+ break;
+ case 84:
+ env->fprs = tmp;
+ break;
+ case 85:
+ env->y = tmp;
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 8;
+#endif
+}
diff --git a/target/sparc/helper.c b/target/sparc/helper.c
new file mode 100644
index 0000000000..359b0b15ed
--- /dev/null
+++ b/target/sparc/helper.c
@@ -0,0 +1,257 @@
+/*
+ * Misc Sparc helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "sysemu/sysemu.h"
+
+void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ cs->exception_index = tt;
+ cpu_loop_exit_restore(cs, ra);
+}
+
+void helper_raise_exception(CPUSPARCState *env, int tt)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ cs->exception_index = tt;
+ cpu_loop_exit(cs);
+}
+
+void helper_debug(CPUSPARCState *env)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ cs->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(cs);
+}
+
+#ifdef TARGET_SPARC64
+target_ulong helper_popc(target_ulong val)
+{
+ return ctpop64(val);
+}
+
+void helper_tick_set_count(void *opaque, uint64_t count)
+{
+#if !defined(CONFIG_USER_ONLY)
+ cpu_tick_set_count(opaque, count);
+#endif
+}
+
+uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx)
+{
+#if !defined(CONFIG_USER_ONLY)
+ CPUTimer *timer = opaque;
+
+ if (timer->npt && mem_idx < MMU_KERNEL_IDX) {
+ cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC());
+ }
+
+ return cpu_tick_get_count(timer);
+#else
+ return 0;
+#endif
+}
+
+void helper_tick_set_limit(void *opaque, uint64_t limit)
+{
+#if !defined(CONFIG_USER_ONLY)
+ cpu_tick_set_limit(opaque, limit);
+#endif
+}
+#endif
+
+static target_ulong do_udiv(CPUSPARCState *env, target_ulong a,
+ target_ulong b, int cc, uintptr_t ra)
+{
+ int overflow = 0;
+ uint64_t x0;
+ uint32_t x1;
+
+ x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+ x1 = (b & 0xffffffff);
+
+ if (x1 == 0) {
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
+ }
+
+ x0 = x0 / x1;
+ if (x0 > UINT32_MAX) {
+ x0 = UINT32_MAX;
+ overflow = 1;
+ }
+
+ if (cc) {
+ env->cc_dst = x0;
+ env->cc_src2 = overflow;
+ env->cc_op = CC_OP_DIV;
+ }
+ return x0;
+}
+
+target_ulong helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_udiv(env, a, b, 0, GETPC());
+}
+
+target_ulong helper_udiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_udiv(env, a, b, 1, GETPC());
+}
+
+static target_ulong do_sdiv(CPUSPARCState *env, target_ulong a,
+ target_ulong b, int cc, uintptr_t ra)
+{
+ int overflow = 0;
+ int64_t x0;
+ int32_t x1;
+
+ x0 = (a & 0xffffffff) | ((int64_t) (env->y) << 32);
+ x1 = (b & 0xffffffff);
+
+ if (x1 == 0) {
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, ra);
+ } else if (x1 == -1 && x0 == INT64_MIN) {
+ x0 = INT32_MAX;
+ overflow = 1;
+ } else {
+ x0 = x0 / x1;
+ if ((int32_t) x0 != x0) {
+ x0 = x0 < 0 ? INT32_MIN : INT32_MAX;
+ overflow = 1;
+ }
+ }
+
+ if (cc) {
+ env->cc_dst = x0;
+ env->cc_src2 = overflow;
+ env->cc_op = CC_OP_DIV;
+ }
+ return x0;
+}
+
+target_ulong helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_sdiv(env, a, b, 0, GETPC());
+}
+
+target_ulong helper_sdiv_cc(CPUSPARCState *env, target_ulong a, target_ulong b)
+{
+ return do_sdiv(env, a, b, 1, GETPC());
+}
+
+#ifdef TARGET_SPARC64
+int64_t helper_sdivx(CPUSPARCState *env, int64_t a, int64_t b)
+{
+ if (b == 0) {
+ /* Raise divide by zero trap. */
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
+ } else if (b == -1) {
+ /* Avoid overflow trap with i386 divide insn. */
+ return -a;
+ } else {
+ return a / b;
+ }
+}
+
+uint64_t helper_udivx(CPUSPARCState *env, uint64_t a, uint64_t b)
+{
+ if (b == 0) {
+ /* Raise divide by zero trap. */
+ cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
+ }
+ return a / b;
+}
+#endif
+
+target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
+ target_ulong src2)
+{
+ target_ulong dst;
+
+ /* Tag overflow occurs if either input has bits 0 or 1 set. */
+ if ((src1 | src2) & 3) {
+ goto tag_overflow;
+ }
+
+ dst = src1 + src2;
+
+ /* Tag overflow occurs if the addition overflows. */
+ if (~(src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
+ goto tag_overflow;
+ }
+
+ /* Only modify the CC after any exceptions have been generated. */
+ env->cc_op = CC_OP_TADDTV;
+ env->cc_src = src1;
+ env->cc_src2 = src2;
+ env->cc_dst = dst;
+ return dst;
+
+ tag_overflow:
+ cpu_raise_exception_ra(env, TT_TOVF, GETPC());
+}
+
+target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
+ target_ulong src2)
+{
+ target_ulong dst;
+
+ /* Tag overflow occurs if either input has bits 0 or 1 set. */
+ if ((src1 | src2) & 3) {
+ goto tag_overflow;
+ }
+
+ dst = src1 - src2;
+
+ /* Tag overflow occurs if the subtraction overflows. */
+ if ((src1 ^ src2) & (src1 ^ dst) & (1u << 31)) {
+ goto tag_overflow;
+ }
+
+ /* Only modify the CC after any exceptions have been generated. */
+ env->cc_op = CC_OP_TSUBTV;
+ env->cc_src = src1;
+ env->cc_src2 = src2;
+ env->cc_dst = dst;
+ return dst;
+
+ tag_overflow:
+ cpu_raise_exception_ra(env, TT_TOVF, GETPC());
+}
+
+#ifndef TARGET_SPARC64
+void helper_power_down(CPUSPARCState *env)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ cs->halted = 1;
+ cs->exception_index = EXCP_HLT;
+ env->pc = env->npc;
+ env->npc = env->pc + 4;
+ cpu_loop_exit(cs);
+}
+#endif
diff --git a/target/sparc/helper.h b/target/sparc/helper.h
new file mode 100644
index 0000000000..0cf1bfb73a
--- /dev/null
+++ b/target/sparc/helper.h
@@ -0,0 +1,168 @@
+#ifndef TARGET_SPARC64
+DEF_HELPER_1(rett, void, env)
+DEF_HELPER_2(wrpsr, void, env, tl)
+DEF_HELPER_1(rdpsr, tl, env)
+DEF_HELPER_1(power_down, void, env)
+#else
+DEF_HELPER_FLAGS_2(wrpil, TCG_CALL_NO_RWG, void, env, tl)
+DEF_HELPER_2(wrpstate, void, env, tl)
+DEF_HELPER_1(done, void, env)
+DEF_HELPER_1(retry, void, env)
+DEF_HELPER_FLAGS_1(flushw, TCG_CALL_NO_WG, void, env)
+DEF_HELPER_FLAGS_1(saved, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(restored, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_1(rdccr, tl, env)
+DEF_HELPER_2(wrccr, void, env, tl)
+DEF_HELPER_1(rdcwp, tl, env)
+DEF_HELPER_2(wrcwp, void, env, tl)
+DEF_HELPER_FLAGS_2(array8, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_1(popc, TCG_CALL_NO_RWG_SE, tl, tl)
+DEF_HELPER_FLAGS_2(set_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(clear_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(write_softint, TCG_CALL_NO_RWG, void, env, i64)
+DEF_HELPER_FLAGS_2(tick_set_count, TCG_CALL_NO_RWG, void, ptr, i64)
+DEF_HELPER_FLAGS_3(tick_get_count, TCG_CALL_NO_WG, i64, env, ptr, int)
+DEF_HELPER_FLAGS_2(tick_set_limit, TCG_CALL_NO_RWG, void, ptr, i64)
+#endif
+DEF_HELPER_FLAGS_3(check_align, TCG_CALL_NO_WG, void, env, tl, i32)
+DEF_HELPER_1(debug, void, env)
+DEF_HELPER_1(save, void, env)
+DEF_HELPER_1(restore, void, env)
+DEF_HELPER_3(udiv, tl, env, tl, tl)
+DEF_HELPER_3(udiv_cc, tl, env, tl, tl)
+DEF_HELPER_3(sdiv, tl, env, tl, tl)
+DEF_HELPER_3(sdiv_cc, tl, env, tl, tl)
+DEF_HELPER_3(taddcctv, tl, env, tl, tl)
+DEF_HELPER_3(tsubcctv, tl, env, tl, tl)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_3(sdivx, TCG_CALL_NO_WG, s64, env, s64, s64)
+DEF_HELPER_FLAGS_3(udivx, TCG_CALL_NO_WG, i64, env, i64, i64)
+#endif
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+DEF_HELPER_FLAGS_4(ld_asi, TCG_CALL_NO_WG, i64, env, tl, int, i32)
+DEF_HELPER_FLAGS_5(st_asi, TCG_CALL_NO_WG, void, env, tl, i64, int, i32)
+#endif
+DEF_HELPER_FLAGS_1(check_ieee_exceptions, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_3(ldfsr, TCG_CALL_NO_RWG, tl, env, tl, i32)
+DEF_HELPER_FLAGS_1(fabss, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, f32, env, f32)
+DEF_HELPER_FLAGS_2(fsqrtd, TCG_CALL_NO_RWG, f64, env, f64)
+DEF_HELPER_FLAGS_3(fcmps, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpd, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpes, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmped, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_1(fsqrtq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(fcmpq, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq, TCG_CALL_NO_WG, tl, env)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_3(ldxfsr, TCG_CALL_NO_RWG, tl, env, tl, i64)
+DEF_HELPER_FLAGS_1(fabsd, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_3(fcmps_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmps_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmps_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpd_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpd_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpd_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmpes_fcc1, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpes_fcc2, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmpes_fcc3, TCG_CALL_NO_WG, tl, env, f32, f32)
+DEF_HELPER_FLAGS_3(fcmped_fcc1, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmped_fcc2, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_3(fcmped_fcc3, TCG_CALL_NO_WG, tl, env, f64, f64)
+DEF_HELPER_FLAGS_1(fabsq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc1, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc2, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpq_fcc3, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc1, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc2, TCG_CALL_NO_WG, tl, env)
+DEF_HELPER_FLAGS_1(fcmpeq_fcc3, TCG_CALL_NO_WG, tl, env)
+#endif
+DEF_HELPER_2(raise_exception, noreturn, env, int)
+#define F_HELPER_0_1(name) \
+ DEF_HELPER_FLAGS_1(f ## name, TCG_CALL_NO_RWG, void, env)
+
+DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, f64, env, f64, f64)
+DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, f64, env, f64, f64)
+F_HELPER_0_1(addq)
+F_HELPER_0_1(subq)
+F_HELPER_0_1(mulq)
+F_HELPER_0_1(divq)
+
+DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, f32, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, f32, env, f32, f32)
+
+DEF_HELPER_FLAGS_3(fsmuld, TCG_CALL_NO_RWG, f64, env, f32, f32)
+DEF_HELPER_FLAGS_3(fdmulq, TCG_CALL_NO_RWG, void, env, f64, f64)
+
+DEF_HELPER_FLAGS_1(fnegs, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_2(fitod, TCG_CALL_NO_RWG_SE, f64, env, s32)
+DEF_HELPER_FLAGS_2(fitoq, TCG_CALL_NO_RWG, void, env, s32)
+
+DEF_HELPER_FLAGS_2(fitos, TCG_CALL_NO_RWG, f32, env, s32)
+
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_1(fnegd, TCG_CALL_NO_RWG_SE, f64, f64)
+DEF_HELPER_FLAGS_1(fnegq, TCG_CALL_NO_RWG, void, env)
+DEF_HELPER_FLAGS_2(fxtos, TCG_CALL_NO_RWG, f32, env, s64)
+DEF_HELPER_FLAGS_2(fxtod, TCG_CALL_NO_RWG, f64, env, s64)
+DEF_HELPER_FLAGS_2(fxtoq, TCG_CALL_NO_RWG, void, env, s64)
+#endif
+DEF_HELPER_FLAGS_2(fdtos, TCG_CALL_NO_RWG, f32, env, f64)
+DEF_HELPER_FLAGS_2(fstod, TCG_CALL_NO_RWG, f64, env, f32)
+DEF_HELPER_FLAGS_1(fqtos, TCG_CALL_NO_RWG, f32, env)
+DEF_HELPER_FLAGS_2(fstoq, TCG_CALL_NO_RWG, void, env, f32)
+DEF_HELPER_FLAGS_1(fqtod, TCG_CALL_NO_RWG, f64, env)
+DEF_HELPER_FLAGS_2(fdtoq, TCG_CALL_NO_RWG, void, env, f64)
+DEF_HELPER_FLAGS_2(fstoi, TCG_CALL_NO_RWG, s32, env, f32)
+DEF_HELPER_FLAGS_2(fdtoi, TCG_CALL_NO_RWG, s32, env, f64)
+DEF_HELPER_FLAGS_1(fqtoi, TCG_CALL_NO_RWG, s32, env)
+#ifdef TARGET_SPARC64
+DEF_HELPER_FLAGS_2(fstox, TCG_CALL_NO_RWG, s64, env, f32)
+DEF_HELPER_FLAGS_2(fdtox, TCG_CALL_NO_RWG, s64, env, f64)
+DEF_HELPER_FLAGS_1(fqtox, TCG_CALL_NO_RWG, s64, env)
+
+DEF_HELPER_FLAGS_2(fpmerge, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16al, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8x16au, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmul8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmuld8sux16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fmuld8ulx16, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fexpand, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_3(pdist, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fpack16, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(fpack32, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(fpackfix, TCG_CALL_NO_RWG_SE, i32, i64, i64)
+DEF_HELPER_FLAGS_3(bshuffle, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+#define VIS_HELPER(name) \
+ DEF_HELPER_FLAGS_2(f ## name ## 16, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64) \
+ DEF_HELPER_FLAGS_2(f ## name ## 16s, TCG_CALL_NO_RWG_SE, \
+ i32, i32, i32) \
+ DEF_HELPER_FLAGS_2(f ## name ## 32, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64) \
+ DEF_HELPER_FLAGS_2(f ## name ## 32s, TCG_CALL_NO_RWG_SE, \
+ i32, i32, i32)
+
+VIS_HELPER(padd)
+VIS_HELPER(psub)
+#define VIS_CMPHELPER(name) \
+ DEF_HELPER_FLAGS_2(f##name##16, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64) \
+ DEF_HELPER_FLAGS_2(f##name##32, TCG_CALL_NO_RWG_SE, \
+ i64, i64, i64)
+VIS_CMPHELPER(cmpgt)
+VIS_CMPHELPER(cmpeq)
+VIS_CMPHELPER(cmple)
+VIS_CMPHELPER(cmpne)
+#endif
+#undef F_HELPER_0_1
+#undef VIS_HELPER
+#undef VIS_CMPHELPER
+DEF_HELPER_1(compute_psr, void, env)
+DEF_HELPER_FLAGS_1(compute_C_icc, TCG_CALL_NO_WG_SE, i32, env)
diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c
new file mode 100644
index 0000000000..09afe136e5
--- /dev/null
+++ b/target/sparc/int32_helper.c
@@ -0,0 +1,175 @@
+/*
+ * Sparc32 interrupt helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "trace.h"
+#include "sysemu/sysemu.h"
+#include "exec/log.h"
+
+#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+ [TT_TFAULT] = "Instruction Access Fault",
+ [TT_ILL_INSN] = "Illegal Instruction",
+ [TT_PRIV_INSN] = "Privileged Instruction",
+ [TT_NFPU_INSN] = "FPU Disabled",
+ [TT_WIN_OVF] = "Window Overflow",
+ [TT_WIN_UNF] = "Window Underflow",
+ [TT_UNALIGNED] = "Unaligned Memory Access",
+ [TT_FP_EXCP] = "FPU Exception",
+ [TT_DFAULT] = "Data Access Fault",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_EXTINT | 0x1] = "External Interrupt 1",
+ [TT_EXTINT | 0x2] = "External Interrupt 2",
+ [TT_EXTINT | 0x3] = "External Interrupt 3",
+ [TT_EXTINT | 0x4] = "External Interrupt 4",
+ [TT_EXTINT | 0x5] = "External Interrupt 5",
+ [TT_EXTINT | 0x6] = "External Interrupt 6",
+ [TT_EXTINT | 0x7] = "External Interrupt 7",
+ [TT_EXTINT | 0x8] = "External Interrupt 8",
+ [TT_EXTINT | 0x9] = "External Interrupt 9",
+ [TT_EXTINT | 0xa] = "External Interrupt 10",
+ [TT_EXTINT | 0xb] = "External Interrupt 11",
+ [TT_EXTINT | 0xc] = "External Interrupt 12",
+ [TT_EXTINT | 0xd] = "External Interrupt 13",
+ [TT_EXTINT | 0xe] = "External Interrupt 14",
+ [TT_EXTINT | 0xf] = "External Interrupt 15",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_CODE_ACCESS] = "Instruction Access Error",
+ [TT_DATA_ACCESS] = "Data Access Error",
+ [TT_DIV_ZERO] = "Division By Zero",
+ [TT_NCP_INSN] = "Coprocessor Disabled",
+};
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int cwp, intno = cs->exception_index;
+
+ /* Compute PSR before exposing state. */
+ if (env->cc_op != CC_OP_FLAGS) {
+ cpu_get_psr(env);
+ }
+
+#ifdef DEBUG_PCALL
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name;
+
+ if (intno < 0 || intno >= 0x100) {
+ name = "Unknown";
+ } else if (intno >= 0x80) {
+ name = "Trap Instruction";
+ } else {
+ name = excp_names[intno];
+ if (!name) {
+ name = "Unknown";
+ }
+ }
+
+ qemu_log("%6d: %s (v=%02x)\n", count, name, intno);
+ log_cpu_state(cs, 0);
+#if 0
+ {
+ int i;
+ uint8_t *ptr;
+
+ qemu_log(" code=");
+ ptr = (uint8_t *)env->pc;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+ if (env->psret == 0) {
+ if (cs->exception_index == 0x80 &&
+ env->def->features & CPU_FEATURE_TA0_SHUTDOWN) {
+ qemu_system_shutdown_request();
+ } else {
+ cpu_abort(cs, "Trap 0x%02x while interrupts disabled, Error state",
+ cs->exception_index);
+ }
+ return;
+ }
+#endif
+ env->psret = 0;
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
+ cpu_set_cwp(env, cwp);
+ env->regwptr[9] = env->pc;
+ env->regwptr[10] = env->npc;
+ env->psrps = env->psrs;
+ env->psrs = 1;
+ env->tbr = (env->tbr & TBR_BASE_MASK) | (intno << 4);
+ env->pc = env->tbr;
+ env->npc = env->pc + 4;
+ cs->exception_index = -1;
+
+#if !defined(CONFIG_USER_ONLY)
+ /* IRQ acknowledgment */
+ if ((intno & ~15) == TT_EXTINT && env->qemu_irq_ack != NULL) {
+ env->qemu_irq_ack(env, env->irq_manager, intno);
+ }
+#endif
+}
+
+#if !defined(CONFIG_USER_ONLY)
+static void leon3_cache_control_int(CPUSPARCState *env)
+{
+ uint32_t state = 0;
+
+ if (env->cache_control & CACHE_CTRL_IF) {
+ /* Instruction cache state */
+ state = env->cache_control & CACHE_STATE_MASK;
+ if (state == CACHE_ENABLED) {
+ state = CACHE_FROZEN;
+ trace_int_helper_icache_freeze();
+ }
+
+ env->cache_control &= ~CACHE_STATE_MASK;
+ env->cache_control |= state;
+ }
+
+ if (env->cache_control & CACHE_CTRL_DF) {
+ /* Data cache state */
+ state = (env->cache_control >> 2) & CACHE_STATE_MASK;
+ if (state == CACHE_ENABLED) {
+ state = CACHE_FROZEN;
+ trace_int_helper_dcache_freeze();
+ }
+
+ env->cache_control &= ~(CACHE_STATE_MASK << 2);
+ env->cache_control |= (state << 2);
+ }
+}
+
+void leon3_irq_manager(CPUSPARCState *env, void *irq_manager, int intno)
+{
+ leon3_irq_ack(irq_manager, intno);
+ leon3_cache_control_int(env);
+}
+#endif
diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c
new file mode 100644
index 0000000000..29360fa5fe
--- /dev/null
+++ b/target/sparc/int64_helper.c
@@ -0,0 +1,205 @@
+/*
+ * Sparc64 interrupt helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/log.h"
+#include "trace.h"
+
+#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+static const char * const excp_names[0x80] = {
+ [TT_TFAULT] = "Instruction Access Fault",
+ [TT_TMISS] = "Instruction Access MMU Miss",
+ [TT_CODE_ACCESS] = "Instruction Access Error",
+ [TT_ILL_INSN] = "Illegal Instruction",
+ [TT_PRIV_INSN] = "Privileged Instruction",
+ [TT_NFPU_INSN] = "FPU Disabled",
+ [TT_FP_EXCP] = "FPU Exception",
+ [TT_TOVF] = "Tag Overflow",
+ [TT_CLRWIN] = "Clean Windows",
+ [TT_DIV_ZERO] = "Division By Zero",
+ [TT_DFAULT] = "Data Access Fault",
+ [TT_DMISS] = "Data Access MMU Miss",
+ [TT_DATA_ACCESS] = "Data Access Error",
+ [TT_DPROT] = "Data Protection Error",
+ [TT_UNALIGNED] = "Unaligned Memory Access",
+ [TT_PRIV_ACT] = "Privileged Action",
+ [TT_EXTINT | 0x1] = "External Interrupt 1",
+ [TT_EXTINT | 0x2] = "External Interrupt 2",
+ [TT_EXTINT | 0x3] = "External Interrupt 3",
+ [TT_EXTINT | 0x4] = "External Interrupt 4",
+ [TT_EXTINT | 0x5] = "External Interrupt 5",
+ [TT_EXTINT | 0x6] = "External Interrupt 6",
+ [TT_EXTINT | 0x7] = "External Interrupt 7",
+ [TT_EXTINT | 0x8] = "External Interrupt 8",
+ [TT_EXTINT | 0x9] = "External Interrupt 9",
+ [TT_EXTINT | 0xa] = "External Interrupt 10",
+ [TT_EXTINT | 0xb] = "External Interrupt 11",
+ [TT_EXTINT | 0xc] = "External Interrupt 12",
+ [TT_EXTINT | 0xd] = "External Interrupt 13",
+ [TT_EXTINT | 0xe] = "External Interrupt 14",
+ [TT_EXTINT | 0xf] = "External Interrupt 15",
+};
+#endif
+
+void sparc_cpu_do_interrupt(CPUState *cs)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int intno = cs->exception_index;
+ trap_state *tsptr;
+
+ /* Compute PSR before exposing state. */
+ if (env->cc_op != CC_OP_FLAGS) {
+ cpu_get_psr(env);
+ }
+
+#ifdef DEBUG_PCALL
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ static int count;
+ const char *name;
+
+ if (intno < 0 || intno >= 0x180) {
+ name = "Unknown";
+ } else if (intno >= 0x100) {
+ name = "Trap Instruction";
+ } else if (intno >= 0xc0) {
+ name = "Window Fill";
+ } else if (intno >= 0x80) {
+ name = "Window Spill";
+ } else {
+ name = excp_names[intno];
+ if (!name) {
+ name = "Unknown";
+ }
+ }
+
+ qemu_log("%6d: %s (v=%04x)\n", count, name, intno);
+ log_cpu_state(cs, 0);
+#if 0
+ {
+ int i;
+ uint8_t *ptr;
+
+ qemu_log(" code=");
+ ptr = (uint8_t *)env->pc;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+#endif
+#if !defined(CONFIG_USER_ONLY)
+ if (env->tl >= env->maxtl) {
+ cpu_abort(cs, "Trap 0x%04x while trap level (%d) >= MAXTL (%d),"
+ " Error state", cs->exception_index, env->tl, env->maxtl);
+ return;
+ }
+#endif
+ if (env->tl < env->maxtl - 1) {
+ env->tl++;
+ } else {
+ env->pstate |= PS_RED;
+ if (env->tl < env->maxtl) {
+ env->tl++;
+ }
+ }
+ tsptr = cpu_tsptr(env);
+
+ tsptr->tstate = (cpu_get_ccr(env) << 32) |
+ ((env->asi & 0xff) << 24) | ((env->pstate & 0xf3f) << 8) |
+ cpu_get_cwp64(env);
+ tsptr->tpc = env->pc;
+ tsptr->tnpc = env->npc;
+ tsptr->tt = intno;
+
+ switch (intno) {
+ case TT_IVEC:
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_IG);
+ break;
+ case TT_TFAULT:
+ case TT_DFAULT:
+ case TT_TMISS ... TT_TMISS + 3:
+ case TT_DMISS ... TT_DMISS + 3:
+ case TT_DPROT ... TT_DPROT + 3:
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_MG);
+ break;
+ default:
+ cpu_change_pstate(env, PS_PEF | PS_PRIV | PS_AG);
+ break;
+ }
+
+ if (intno == TT_CLRWIN) {
+ cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - 1));
+ } else if ((intno & 0x1c0) == TT_SPILL) {
+ cpu_set_cwp(env, cpu_cwp_dec(env, env->cwp - env->cansave - 2));
+ } else if ((intno & 0x1c0) == TT_FILL) {
+ cpu_set_cwp(env, cpu_cwp_inc(env, env->cwp + 1));
+ }
+ env->pc = env->tbr & ~0x7fffULL;
+ env->pc |= ((env->tl > 1) ? 1 << 14 : 0) | (intno << 5);
+ env->npc = env->pc + 4;
+ cs->exception_index = -1;
+}
+
+trap_state *cpu_tsptr(CPUSPARCState* env)
+{
+ return &env->ts[env->tl & MAXTL_MASK];
+}
+
+static bool do_modify_softint(CPUSPARCState *env, uint32_t value)
+{
+ if (env->softint != value) {
+ env->softint = value;
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+ return true;
+ }
+ return false;
+}
+
+void helper_set_softint(CPUSPARCState *env, uint64_t value)
+{
+ if (do_modify_softint(env, env->softint | (uint32_t)value)) {
+ trace_int_helper_set_softint(env->softint);
+ }
+}
+
+void helper_clear_softint(CPUSPARCState *env, uint64_t value)
+{
+ if (do_modify_softint(env, env->softint & (uint32_t)~value)) {
+ trace_int_helper_clear_softint(env->softint);
+ }
+}
+
+void helper_write_softint(CPUSPARCState *env, uint64_t value)
+{
+ if (do_modify_softint(env, (uint32_t)value)) {
+ trace_int_helper_write_softint(env->softint);
+ }
+}
diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c
new file mode 100644
index 0000000000..de7d53ae20
--- /dev/null
+++ b/target/sparc/ldst_helper.c
@@ -0,0 +1,1709 @@
+/*
+ * Helpers for loads and stores
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "tcg.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "asi.h"
+
+//#define DEBUG_MMU
+//#define DEBUG_MXCC
+//#define DEBUG_UNALIGNED
+//#define DEBUG_UNASSIGNED
+//#define DEBUG_ASI
+//#define DEBUG_CACHE_CONTROL
+
+#ifdef DEBUG_MMU
+#define DPRINTF_MMU(fmt, ...) \
+ do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MMU(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_MXCC
+#define DPRINTF_MXCC(fmt, ...) \
+ do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_MXCC(fmt, ...) do {} while (0)
+#endif
+
+#ifdef DEBUG_ASI
+#define DPRINTF_ASI(fmt, ...) \
+ do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
+#endif
+
+#ifdef DEBUG_CACHE_CONTROL
+#define DPRINTF_CACHE_CONTROL(fmt, ...) \
+ do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
+#else
+#define AM_CHECK(env1) (1)
+#endif
+#endif
+
+#define QT0 (env->qt0)
+#define QT1 (env->qt1)
+
+#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+/* Calculates TSB pointer value for fault page size 8k or 64k */
+static uint64_t ultrasparc_tsb_pointer(uint64_t tsb_register,
+ uint64_t tag_access_register,
+ int page_size)
+{
+ uint64_t tsb_base = tsb_register & ~0x1fffULL;
+ int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
+ int tsb_size = tsb_register & 0xf;
+
+ /* discard lower 13 bits which hold tag access context */
+ uint64_t tag_access_va = tag_access_register & ~0x1fffULL;
+
+ /* now reorder bits */
+ uint64_t tsb_base_mask = ~0x1fffULL;
+ uint64_t va = tag_access_va;
+
+ /* move va bits to correct position */
+ if (page_size == 8*1024) {
+ va >>= 9;
+ } else if (page_size == 64*1024) {
+ va >>= 12;
+ }
+
+ if (tsb_size) {
+ tsb_base_mask <<= tsb_size;
+ }
+
+ /* calculate tsb_base mask and adjust va if split is in use */
+ if (tsb_split) {
+ if (page_size == 8*1024) {
+ va &= ~(1ULL << (13 + tsb_size));
+ } else if (page_size == 64*1024) {
+ va |= (1ULL << (13 + tsb_size));
+ }
+ tsb_base_mask <<= 1;
+ }
+
+ return ((tsb_base & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
+}
+
+/* Calculates tag target register value by reordering bits
+ in tag access register */
+static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
+{
+ return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
+}
+
+static void replace_tlb_entry(SparcTLBEntry *tlb,
+ uint64_t tlb_tag, uint64_t tlb_tte,
+ CPUSPARCState *env1)
+{
+ target_ulong mask, size, va, offset;
+
+ /* flush page range if translation is valid */
+ if (TTE_IS_VALID(tlb->tte)) {
+ CPUState *cs = CPU(sparc_env_get_cpu(env1));
+
+ mask = 0xffffffffffffe000ULL;
+ mask <<= 3 * ((tlb->tte >> 61) & 3);
+ size = ~mask + 1;
+
+ va = tlb->tag & mask;
+
+ for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
+ tlb_flush_page(cs, va + offset);
+ }
+ }
+
+ tlb->tag = tlb_tag;
+ tlb->tte = tlb_tte;
+}
+
+static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
+ const char *strmmu, CPUSPARCState *env1)
+{
+ unsigned int i;
+ target_ulong mask;
+ uint64_t context;
+
+ int is_demap_context = (demap_addr >> 6) & 1;
+
+ /* demap context */
+ switch ((demap_addr >> 4) & 3) {
+ case 0: /* primary */
+ context = env1->dmmu.mmu_primary_context;
+ break;
+ case 1: /* secondary */
+ context = env1->dmmu.mmu_secondary_context;
+ break;
+ case 2: /* nucleus */
+ context = 0;
+ break;
+ case 3: /* reserved */
+ default:
+ return;
+ }
+
+ for (i = 0; i < 64; i++) {
+ if (TTE_IS_VALID(tlb[i].tte)) {
+
+ if (is_demap_context) {
+ /* will remove non-global entries matching context value */
+ if (TTE_IS_GLOBAL(tlb[i].tte) ||
+ !tlb_compare_context(&tlb[i], context)) {
+ continue;
+ }
+ } else {
+ /* demap page
+ will remove any entry matching VA */
+ mask = 0xffffffffffffe000ULL;
+ mask <<= 3 * ((tlb[i].tte >> 61) & 3);
+
+ if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
+ continue;
+ }
+
+ /* entry should be global or matching context value */
+ if (!TTE_IS_GLOBAL(tlb[i].tte) &&
+ !tlb_compare_context(&tlb[i], context)) {
+ continue;
+ }
+ }
+
+ replace_tlb_entry(&tlb[i], 0, 0, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ }
+ }
+}
+
+static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
+ uint64_t tlb_tag, uint64_t tlb_tte,
+ const char *strmmu, CPUSPARCState *env1)
+{
+ unsigned int i, replace_used;
+
+ /* Try replacing invalid entry */
+ for (i = 0; i < 64; i++) {
+ if (!TTE_IS_VALID(tlb[i].tte)) {
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ return;
+ }
+ }
+
+ /* All entries are valid, try replacing unlocked entry */
+
+ for (replace_used = 0; replace_used < 2; ++replace_used) {
+
+ /* Used entries are not replaced on first pass */
+
+ for (i = 0; i < 64; i++) {
+ if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
+
+ replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
+ strmmu, (replace_used ? "used" : "unused"), i);
+ dump_mmu(stdout, fprintf, env1);
+#endif
+ return;
+ }
+ }
+
+ /* Now reset used bit and search for unused entries again */
+
+ for (i = 0; i < 64; i++) {
+ TTE_SET_UNUSED(tlb[i].tte);
+ }
+ }
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("%s lru replacement failed: no entries available\n", strmmu);
+#endif
+ /* error state? */
+}
+
+#endif
+
+#ifdef TARGET_SPARC64
+/* returns true if access using this ASI is to have address translated by MMU
+ otherwise access is to raw physical address */
+/* TODO: check sparc32 bits */
+static inline int is_translating_asi(int asi)
+{
+ /* Ultrasparc IIi translating asi
+ - note this list is defined by cpu implementation
+ */
+ switch (asi) {
+ case 0x04 ... 0x11:
+ case 0x16 ... 0x19:
+ case 0x1E ... 0x1F:
+ case 0x24 ... 0x2C:
+ case 0x70 ... 0x73:
+ case 0x78 ... 0x79:
+ case 0x80 ... 0xFF:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
+{
+ if (AM_CHECK(env1)) {
+ addr &= 0xffffffffULL;
+ }
+ return addr;
+}
+
+static inline target_ulong asi_address_mask(CPUSPARCState *env,
+ int asi, target_ulong addr)
+{
+ if (is_translating_asi(asi)) {
+ addr = address_mask(env, addr);
+ }
+ return addr;
+}
+#endif
+
+static void do_check_align(CPUSPARCState *env, target_ulong addr,
+ uint32_t align, uintptr_t ra)
+{
+ if (addr & align) {
+#ifdef DEBUG_UNALIGNED
+ printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+ cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
+ }
+}
+
+void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
+{
+ do_check_align(env, addr, align, GETPC());
+}
+
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
+ defined(DEBUG_MXCC)
+static void dump_mxcc(CPUSPARCState *env)
+{
+ printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n",
+ env->mxccdata[0], env->mxccdata[1],
+ env->mxccdata[2], env->mxccdata[3]);
+ printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n"
+ " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
+ "\n",
+ env->mxccregs[0], env->mxccregs[1],
+ env->mxccregs[2], env->mxccregs[3],
+ env->mxccregs[4], env->mxccregs[5],
+ env->mxccregs[6], env->mxccregs[7]);
+}
+#endif
+
+#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
+ && defined(DEBUG_ASI)
+static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
+ uint64_t r1)
+{
+ switch (size) {
+ case 1:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xff);
+ break;
+ case 2:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xffff);
+ break;
+ case 4:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
+ addr, asi, r1 & 0xffffffff);
+ break;
+ case 8:
+ DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
+ addr, asi, r1);
+ break;
+ }
+}
+#endif
+
+#ifndef TARGET_SPARC64
+#ifndef CONFIG_USER_ONLY
+
+
+/* Leon3 cache control */
+
+static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
+ uint64_t val, int size)
+{
+ DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
+ addr, val, size);
+
+ if (size != 4) {
+ DPRINTF_CACHE_CONTROL("32bits only\n");
+ return;
+ }
+
+ switch (addr) {
+ case 0x00: /* Cache control */
+
+ /* These values must always be read as zeros */
+ val &= ~CACHE_CTRL_FD;
+ val &= ~CACHE_CTRL_FI;
+ val &= ~CACHE_CTRL_IB;
+ val &= ~CACHE_CTRL_IP;
+ val &= ~CACHE_CTRL_DP;
+
+ env->cache_control = val;
+ break;
+ case 0x04: /* Instruction cache configuration */
+ case 0x08: /* Data cache configuration */
+ /* Read Only */
+ break;
+ default:
+ DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
+ break;
+ };
+}
+
+static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
+ int size)
+{
+ uint64_t ret = 0;
+
+ if (size != 4) {
+ DPRINTF_CACHE_CONTROL("32bits only\n");
+ return 0;
+ }
+
+ switch (addr) {
+ case 0x00: /* Cache control */
+ ret = env->cache_control;
+ break;
+
+ /* Configuration registers are read and only always keep those
+ predefined values */
+
+ case 0x04: /* Instruction cache configuration */
+ ret = 0x10220000;
+ break;
+ case 0x08: /* Data cache configuration */
+ ret = 0x18220000;
+ break;
+ default:
+ DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
+ break;
+ };
+ DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
+ addr, ret, size);
+ return ret;
+}
+
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ int sign = memop & MO_SIGN;
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+ uint64_t ret = 0;
+#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
+ uint32_t last_addr = addr;
+#endif
+
+ do_check_align(env, addr, size - 1, GETPC());
+ switch (asi) {
+ case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
+ /* case ASI_LEON_CACHEREGS: Leon3 cache control */
+ switch (addr) {
+ case 0x00: /* Leon3 Cache Control */
+ case 0x08: /* Leon3 Instruction Cache config */
+ case 0x0C: /* Leon3 Date Cache config */
+ if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+ ret = leon3_cache_control_ld(env, addr, size);
+ }
+ break;
+ case 0x01c00a00: /* MXCC control register */
+ if (size == 8) {
+ ret = env->mxccregs[3];
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00a04: /* MXCC control register */
+ if (size == 4) {
+ ret = env->mxccregs[3];
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00c00: /* Module reset register */
+ if (size == 8) {
+ ret = env->mxccregs[5];
+ /* should we do something here? */
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00f00: /* MBus port address register */
+ if (size == 8) {
+ ret = env->mxccregs[7];
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented address, size: %d\n", addr,
+ size);
+ break;
+ }
+ DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
+ "addr = %08x -> ret = %" PRIx64 ","
+ "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
+#ifdef DEBUG_MXCC
+ dump_mxcc(env);
+#endif
+ break;
+ case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
+ case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
+ {
+ int mmulev;
+
+ mmulev = (addr >> 8) & 15;
+ if (mmulev > 4) {
+ ret = 0;
+ } else {
+ ret = mmu_probe(env, addr, mmulev);
+ }
+ DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
+ addr, mmulev, ret);
+ }
+ break;
+ case ASI_M_MMUREGS: /* SuperSparc MMU regs */
+ case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
+ {
+ int reg = (addr >> 8) & 0x1f;
+
+ ret = env->mmuregs[reg];
+ if (reg == 3) { /* Fault status cleared on read */
+ env->mmuregs[3] = 0;
+ } else if (reg == 0x13) { /* Fault status read */
+ ret = env->mmuregs[3];
+ } else if (reg == 0x14) { /* Fault address read */
+ ret = env->mmuregs[4];
+ }
+ DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
+ }
+ break;
+ case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
+ case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
+ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
+ break;
+ case ASI_KERNELTXT: /* Supervisor code access */
+ switch (size) {
+ case 1:
+ ret = cpu_ldub_code(env, addr);
+ break;
+ case 2:
+ ret = cpu_lduw_code(env, addr);
+ break;
+ default:
+ case 4:
+ ret = cpu_ldl_code(env, addr);
+ break;
+ case 8:
+ ret = cpu_ldq_code(env, addr);
+ break;
+ }
+ break;
+ case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
+ case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
+ case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
+ case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
+ break;
+ case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+ switch (size) {
+ case 1:
+ ret = ldub_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32));
+ break;
+ case 2:
+ ret = lduw_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32));
+ break;
+ default:
+ case 4:
+ ret = ldl_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32));
+ break;
+ case 8:
+ ret = ldq_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32));
+ break;
+ }
+ break;
+ case 0x30: /* Turbosparc secondary cache diagnostic */
+ case 0x31: /* Turbosparc RAM snoop */
+ case 0x32: /* Turbosparc page table descriptor diagnostic */
+ case 0x39: /* data cache diagnostic register */
+ ret = 0;
+ break;
+ case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
+ {
+ int reg = (addr >> 8) & 3;
+
+ switch (reg) {
+ case 0: /* Breakpoint Value (Addr) */
+ ret = env->mmubpregs[reg];
+ break;
+ case 1: /* Breakpoint Mask */
+ ret = env->mmubpregs[reg];
+ break;
+ case 2: /* Breakpoint Control */
+ ret = env->mmubpregs[reg];
+ break;
+ case 3: /* Breakpoint Status */
+ ret = env->mmubpregs[reg];
+ env->mmubpregs[reg] = 0ULL;
+ break;
+ }
+ DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
+ ret);
+ }
+ break;
+ case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
+ ret = env->mmubpctrv;
+ break;
+ case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
+ ret = env->mmubpctrc;
+ break;
+ case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
+ ret = env->mmubpctrs;
+ break;
+ case 0x4c: /* SuperSPARC MMU Breakpoint Action */
+ ret = env->mmubpaction;
+ break;
+ case ASI_USERTXT: /* User code access, XXX */
+ default:
+ cpu_unassigned_access(cs, addr, false, false, asi, size);
+ ret = 0;
+ break;
+
+ case ASI_USERDATA: /* User data access */
+ case ASI_KERNELDATA: /* Supervisor data access */
+ case ASI_P: /* Implicit primary context data access (v9 only?) */
+ case ASI_M_BYPASS: /* MMU passthrough */
+ case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+ }
+ if (sign) {
+ switch (size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ do_check_align(env, addr, size - 1, GETPC());
+ switch (asi) {
+ case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
+ /* case ASI_LEON_CACHEREGS: Leon3 cache control */
+ switch (addr) {
+ case 0x00: /* Leon3 Cache Control */
+ case 0x08: /* Leon3 Instruction Cache config */
+ case 0x0C: /* Leon3 Date Cache config */
+ if (env->def->features & CPU_FEATURE_CACHE_CTRL) {
+ leon3_cache_control_st(env, addr, val, size);
+ }
+ break;
+
+ case 0x01c00000: /* MXCC stream data register 0 */
+ if (size == 8) {
+ env->mxccdata[0] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00008: /* MXCC stream data register 1 */
+ if (size == 8) {
+ env->mxccdata[1] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00010: /* MXCC stream data register 2 */
+ if (size == 8) {
+ env->mxccdata[2] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00018: /* MXCC stream data register 3 */
+ if (size == 8) {
+ env->mxccdata[3] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00100: /* MXCC stream source */
+ if (size == 8) {
+ env->mxccregs[0] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ env->mxccdata[0] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
+ 0);
+ env->mxccdata[1] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
+ 8);
+ env->mxccdata[2] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
+ 16);
+ env->mxccdata[3] = ldq_phys(cs->as,
+ (env->mxccregs[0] & 0xffffffffULL) +
+ 24);
+ break;
+ case 0x01c00200: /* MXCC stream destination */
+ if (size == 8) {
+ env->mxccregs[1] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
+ env->mxccdata[0]);
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
+ env->mxccdata[1]);
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
+ env->mxccdata[2]);
+ stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
+ env->mxccdata[3]);
+ break;
+ case 0x01c00a00: /* MXCC control register */
+ if (size == 8) {
+ env->mxccregs[3] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00a04: /* MXCC control register */
+ if (size == 4) {
+ env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
+ | val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00e00: /* MXCC error register */
+ /* writing a 1 bit clears the error */
+ if (size == 8) {
+ env->mxccregs[6] &= ~val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ case 0x01c00f00: /* MBus port address register */
+ if (size == 8) {
+ env->mxccregs[7] = val;
+ } else {
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented access size: %d\n", addr,
+ size);
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%08x: unimplemented address, size: %d\n", addr,
+ size);
+ break;
+ }
+ DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
+ asi, size, addr, val);
+#ifdef DEBUG_MXCC
+ dump_mxcc(env);
+#endif
+ break;
+ case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
+ case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
+ {
+ int mmulev;
+
+ mmulev = (addr >> 8) & 15;
+ DPRINTF_MMU("mmu flush level %d\n", mmulev);
+ switch (mmulev) {
+ case 0: /* flush page */
+ tlb_flush_page(CPU(cpu), addr & 0xfffff000);
+ break;
+ case 1: /* flush segment (256k) */
+ case 2: /* flush region (16M) */
+ case 3: /* flush context (4G) */
+ case 4: /* flush entire */
+ tlb_flush(CPU(cpu), 1);
+ break;
+ default:
+ break;
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ }
+ break;
+ case ASI_M_MMUREGS: /* write MMU regs */
+ case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
+ {
+ int reg = (addr >> 8) & 0x1f;
+ uint32_t oldreg;
+
+ oldreg = env->mmuregs[reg];
+ switch (reg) {
+ case 0: /* Control Register */
+ env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
+ (val & 0x00ffffff);
+ /* Mappings generated during no-fault mode
+ are invalid in normal mode. */
+ if ((oldreg ^ env->mmuregs[reg])
+ & (MMU_NF | env->def->mmu_bm)) {
+ tlb_flush(CPU(cpu), 1);
+ }
+ break;
+ case 1: /* Context Table Pointer Register */
+ env->mmuregs[reg] = val & env->def->mmu_ctpr_mask;
+ break;
+ case 2: /* Context Register */
+ env->mmuregs[reg] = val & env->def->mmu_cxr_mask;
+ if (oldreg != env->mmuregs[reg]) {
+ /* we flush when the MMU context changes because
+ QEMU has no MMU context support */
+ tlb_flush(CPU(cpu), 1);
+ }
+ break;
+ case 3: /* Synchronous Fault Status Register with Clear */
+ case 4: /* Synchronous Fault Address Register */
+ break;
+ case 0x10: /* TLB Replacement Control Register */
+ env->mmuregs[reg] = val & env->def->mmu_trcr_mask;
+ break;
+ case 0x13: /* Synchronous Fault Status Register with Read
+ and Clear */
+ env->mmuregs[3] = val & env->def->mmu_sfsr_mask;
+ break;
+ case 0x14: /* Synchronous Fault Address Register */
+ env->mmuregs[4] = val;
+ break;
+ default:
+ env->mmuregs[reg] = val;
+ break;
+ }
+ if (oldreg != env->mmuregs[reg]) {
+ DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
+ reg, oldreg, env->mmuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ }
+ break;
+ case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
+ case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
+ case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
+ break;
+ case ASI_M_TXTC_TAG: /* I-cache tag */
+ case ASI_M_TXTC_DATA: /* I-cache data */
+ case ASI_M_DATAC_TAG: /* D-cache tag */
+ case ASI_M_DATAC_DATA: /* D-cache data */
+ case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
+ case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
+ case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
+ case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
+ case ASI_M_FLUSH_USER: /* I/D-cache flush user */
+ break;
+ case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
+ {
+ switch (size) {
+ case 1:
+ stb_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32), val);
+ break;
+ case 2:
+ stw_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32), val);
+ break;
+ case 4:
+ default:
+ stl_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32), val);
+ break;
+ case 8:
+ stq_phys(cs->as, (hwaddr)addr
+ | ((hwaddr)(asi & 0xf) << 32), val);
+ break;
+ }
+ }
+ break;
+ case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
+ case 0x31: /* store buffer data, Ross RT620 I-cache flush or
+ Turbosparc snoop RAM */
+ case 0x32: /* store buffer control or Turbosparc page table
+ descriptor diagnostic */
+ case 0x36: /* I-cache flash clear */
+ case 0x37: /* D-cache flash clear */
+ break;
+ case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
+ {
+ int reg = (addr >> 8) & 3;
+
+ switch (reg) {
+ case 0: /* Breakpoint Value (Addr) */
+ env->mmubpregs[reg] = (val & 0xfffffffffULL);
+ break;
+ case 1: /* Breakpoint Mask */
+ env->mmubpregs[reg] = (val & 0xfffffffffULL);
+ break;
+ case 2: /* Breakpoint Control */
+ env->mmubpregs[reg] = (val & 0x7fULL);
+ break;
+ case 3: /* Breakpoint Status */
+ env->mmubpregs[reg] = (val & 0xfULL);
+ break;
+ }
+ DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
+ env->mmuregs[reg]);
+ }
+ break;
+ case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
+ env->mmubpctrv = val & 0xffffffff;
+ break;
+ case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
+ env->mmubpctrc = val & 0x3;
+ break;
+ case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
+ env->mmubpctrs = val & 0x3;
+ break;
+ case 0x4c: /* SuperSPARC MMU Breakpoint Action */
+ env->mmubpaction = val & 0x1fff;
+ break;
+ case ASI_USERTXT: /* User code access, XXX */
+ case ASI_KERNELTXT: /* Supervisor code access, XXX */
+ default:
+ cpu_unassigned_access(CPU(sparc_env_get_cpu(env)),
+ addr, true, false, asi, size);
+ break;
+
+ case ASI_USERDATA: /* User data access */
+ case ASI_KERNELDATA: /* Supervisor data access */
+ case ASI_P:
+ case ASI_M_BYPASS: /* MMU passthrough */
+ case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+ case ASI_M_BCOPY: /* Block copy, sta access */
+ case ASI_M_BFILL: /* Block fill, stda access */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+ }
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+}
+
+#endif /* CONFIG_USER_ONLY */
+#else /* TARGET_SPARC64 */
+
+#ifdef CONFIG_USER_ONLY
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ int sign = memop & MO_SIGN;
+ uint64_t ret = 0;
+
+ if (asi < 0x80) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+ }
+ do_check_align(env, addr, size - 1, GETPC());
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case ASI_PNF: /* Primary no-fault */
+ case ASI_PNFL: /* Primary no-fault LE */
+ case ASI_SNF: /* Secondary no-fault */
+ case ASI_SNFL: /* Secondary no-fault LE */
+ if (page_check_range(addr, size, PAGE_READ) == -1) {
+ ret = 0;
+ break;
+ }
+ switch (size) {
+ case 1:
+ ret = cpu_ldub_data(env, addr);
+ break;
+ case 2:
+ ret = cpu_lduw_data(env, addr);
+ break;
+ case 4:
+ ret = cpu_ldl_data(env, addr);
+ break;
+ case 8:
+ ret = cpu_ldq_data(env, addr);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+ break;
+
+ case ASI_P: /* Primary */
+ case ASI_PL: /* Primary LE */
+ case ASI_S: /* Secondary */
+ case ASI_SL: /* Secondary LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ default:
+ cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+ }
+
+ /* Convert from little endian */
+ switch (asi) {
+ case ASI_PNFL: /* Primary no-fault LE */
+ case ASI_SNFL: /* Secondary no-fault LE */
+ switch (size) {
+ case 2:
+ ret = bswap16(ret);
+ break;
+ case 4:
+ ret = bswap32(ret);
+ break;
+ case 8:
+ ret = bswap64(ret);
+ break;
+ }
+ }
+
+ /* Convert to signed number */
+ if (sign) {
+ switch (size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read", addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+ if (asi < 0x80) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+ }
+ do_check_align(env, addr, size - 1, GETPC());
+
+ switch (asi) {
+ case ASI_P: /* Primary */
+ case ASI_PL: /* Primary LE */
+ case ASI_S: /* Secondary */
+ case ASI_SL: /* Secondary LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ case ASI_PNF: /* Primary no-fault, RO */
+ case ASI_SNF: /* Secondary no-fault, RO */
+ case ASI_PNFL: /* Primary no-fault LE, RO */
+ case ASI_SNFL: /* Secondary no-fault LE, RO */
+ default:
+ cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
+ }
+}
+
+#else /* CONFIG_USER_ONLY */
+
+uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ int sign = memop & MO_SIGN;
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+ uint64_t ret = 0;
+#if defined(DEBUG_ASI)
+ target_ulong last_addr = addr;
+#endif
+
+ asi &= 0xff;
+
+ if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+ || (cpu_has_hypervisor(env)
+ && asi >= 0x30 && asi < 0x80
+ && !(env->hpstate & HS_PRIV))) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+ }
+
+ do_check_align(env, addr, size - 1, GETPC());
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case ASI_PNF:
+ case ASI_PNFL:
+ case ASI_SNF:
+ case ASI_SNFL:
+ {
+ TCGMemOpIdx oi;
+ int idx = (env->pstate & PS_PRIV
+ ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
+ : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
+
+ if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ /* exception_index is set in get_physical_address_data. */
+ cpu_raise_exception_ra(env, cs->exception_index, GETPC());
+ }
+ oi = make_memop_idx(memop, idx);
+ switch (size) {
+ case 1:
+ ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
+ break;
+ case 2:
+ if (asi & 8) {
+ ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
+ } else {
+ ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
+ }
+ break;
+ case 4:
+ if (asi & 8) {
+ ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
+ } else {
+ ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
+ }
+ break;
+ case 8:
+ if (asi & 8) {
+ ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
+ } else {
+ ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+ break;
+
+ case ASI_AIUP: /* As if user primary */
+ case ASI_AIUS: /* As if user secondary */
+ case ASI_AIUPL: /* As if user primary LE */
+ case ASI_AIUSL: /* As if user secondary LE */
+ case ASI_P: /* Primary */
+ case ASI_S: /* Secondary */
+ case ASI_PL: /* Primary LE */
+ case ASI_SL: /* Secondary LE */
+ case ASI_REAL: /* Bypass */
+ case ASI_REAL_IO: /* Bypass, non-cacheable */
+ case ASI_REAL_L: /* Bypass LE */
+ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+ case ASI_N: /* Nucleus */
+ case ASI_NL: /* Nucleus Little Endian (LE) */
+ case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
+ case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
+ case ASI_TWINX_AIUP: /* As if user primary, twinx */
+ case ASI_TWINX_AIUS: /* As if user secondary, twinx */
+ case ASI_TWINX_REAL: /* Real address, twinx */
+ case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
+ case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
+ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+ case ASI_TWINX_N: /* Nucleus, twinx */
+ case ASI_TWINX_NL: /* Nucleus, twinx, LE */
+ /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
+ case ASI_TWINX_P: /* Primary, twinx */
+ case ASI_TWINX_PL: /* Primary, twinx, LE */
+ case ASI_TWINX_S: /* Secondary, twinx */
+ case ASI_TWINX_SL: /* Secondary, twinx, LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ case ASI_UPA_CONFIG: /* UPA config */
+ /* XXX */
+ break;
+ case ASI_LSU_CONTROL: /* LSU */
+ ret = env->lsu;
+ break;
+ case ASI_IMMU: /* I-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+
+ if (reg == 0) {
+ /* I-TSB Tag Target register */
+ ret = ultrasparc_tag_target(env->immu.tag_access);
+ } else {
+ ret = env->immuregs[reg];
+ }
+
+ break;
+ }
+ case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
+ {
+ /* env->immuregs[5] holds I-MMU TSB register value
+ env->immuregs[6] holds I-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
+ 8*1024);
+ break;
+ }
+ case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
+ {
+ /* env->immuregs[5] holds I-MMU TSB register value
+ env->immuregs[6] holds I-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env->immu.tsb, env->immu.tag_access,
+ 64*1024);
+ break;
+ }
+ case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->itlb[reg].tte;
+ break;
+ }
+ case ASI_ITLB_TAG_READ: /* I-MMU tag read */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->itlb[reg].tag;
+ break;
+ }
+ case ASI_DMMU: /* D-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+
+ if (reg == 0) {
+ /* D-TSB Tag Target register */
+ ret = ultrasparc_tag_target(env->dmmu.tag_access);
+ } else {
+ ret = env->dmmuregs[reg];
+ }
+ break;
+ }
+ case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
+ {
+ /* env->dmmuregs[5] holds D-MMU TSB register value
+ env->dmmuregs[6] holds D-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
+ 8*1024);
+ break;
+ }
+ case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
+ {
+ /* env->dmmuregs[5] holds D-MMU TSB register value
+ env->dmmuregs[6] holds D-MMU Tag Access register value */
+ ret = ultrasparc_tsb_pointer(env->dmmu.tsb, env->dmmu.tag_access,
+ 64*1024);
+ break;
+ }
+ case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->dtlb[reg].tte;
+ break;
+ }
+ case ASI_DTLB_TAG_READ: /* D-MMU tag read */
+ {
+ int reg = (addr >> 3) & 0x3f;
+
+ ret = env->dtlb[reg].tag;
+ break;
+ }
+ case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
+ break;
+ case ASI_INTR_RECEIVE: /* Interrupt data receive */
+ ret = env->ivec_status;
+ break;
+ case ASI_INTR_R: /* Incoming interrupt vector, RO */
+ {
+ int reg = (addr >> 4) & 0x3;
+ if (reg < 3) {
+ ret = env->ivec_data[reg];
+ }
+ break;
+ }
+ case ASI_DCACHE_DATA: /* D-cache data */
+ case ASI_DCACHE_TAG: /* D-cache tag access */
+ case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
+ case ASI_AFSR: /* E-cache asynchronous fault status */
+ case ASI_AFAR: /* E-cache asynchronous fault address */
+ case ASI_EC_TAG_DATA: /* E-cache tag data */
+ case ASI_IC_INSTR: /* I-cache instruction access */
+ case ASI_IC_TAG: /* I-cache tag access */
+ case ASI_IC_PRE_DECODE: /* I-cache predecode */
+ case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
+ case ASI_EC_W: /* E-cache tag */
+ case ASI_EC_R: /* E-cache tag */
+ break;
+ case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
+ case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
+ case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
+ case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
+ case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
+ case ASI_INTR_W: /* Interrupt vector, WO */
+ default:
+ cpu_unassigned_access(cs, addr, false, false, 1, size);
+ ret = 0;
+ break;
+ }
+
+ /* Convert to signed number */
+ if (sign) {
+ switch (size) {
+ case 1:
+ ret = (int8_t) ret;
+ break;
+ case 2:
+ ret = (int16_t) ret;
+ break;
+ case 4:
+ ret = (int32_t) ret;
+ break;
+ default:
+ break;
+ }
+ }
+#ifdef DEBUG_ASI
+ dump_asi("read ", last_addr, asi, size, ret);
+#endif
+ return ret;
+}
+
+void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
+ int asi, uint32_t memop)
+{
+ int size = 1 << (memop & MO_SIZE);
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+#ifdef DEBUG_ASI
+ dump_asi("write", addr, asi, size, val);
+#endif
+
+ asi &= 0xff;
+
+ if ((asi < 0x80 && (env->pstate & PS_PRIV) == 0)
+ || (cpu_has_hypervisor(env)
+ && asi >= 0x30 && asi < 0x80
+ && !(env->hpstate & HS_PRIV))) {
+ cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
+ }
+
+ do_check_align(env, addr, size - 1, GETPC());
+ addr = asi_address_mask(env, asi, addr);
+
+ switch (asi) {
+ case ASI_AIUP: /* As if user primary */
+ case ASI_AIUS: /* As if user secondary */
+ case ASI_AIUPL: /* As if user primary LE */
+ case ASI_AIUSL: /* As if user secondary LE */
+ case ASI_P: /* Primary */
+ case ASI_S: /* Secondary */
+ case ASI_PL: /* Primary LE */
+ case ASI_SL: /* Secondary LE */
+ case ASI_REAL: /* Bypass */
+ case ASI_REAL_IO: /* Bypass, non-cacheable */
+ case ASI_REAL_L: /* Bypass LE */
+ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+ case ASI_N: /* Nucleus */
+ case ASI_NL: /* Nucleus Little Endian (LE) */
+ case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
+ case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
+ case ASI_TWINX_AIUP: /* As if user primary, twinx */
+ case ASI_TWINX_AIUS: /* As if user secondary, twinx */
+ case ASI_TWINX_REAL: /* Real address, twinx */
+ case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
+ case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
+ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+ case ASI_TWINX_N: /* Nucleus, twinx */
+ case ASI_TWINX_NL: /* Nucleus, twinx, LE */
+ /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
+ case ASI_TWINX_P: /* Primary, twinx */
+ case ASI_TWINX_PL: /* Primary, twinx, LE */
+ case ASI_TWINX_S: /* Secondary, twinx */
+ case ASI_TWINX_SL: /* Secondary, twinx, LE */
+ /* These are always handled inline. */
+ g_assert_not_reached();
+
+ case ASI_UPA_CONFIG: /* UPA config */
+ /* XXX */
+ return;
+ case ASI_LSU_CONTROL: /* LSU */
+ env->lsu = val & (DMMU_E | IMMU_E);
+ return;
+ case ASI_IMMU: /* I-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+ uint64_t oldreg;
+
+ oldreg = env->immuregs[reg];
+ switch (reg) {
+ case 0: /* RO */
+ return;
+ case 1: /* Not in I-MMU */
+ case 2:
+ return;
+ case 3: /* SFSR */
+ if ((val & 1) == 0) {
+ val = 0; /* Clear SFSR */
+ }
+ env->immu.sfsr = val;
+ break;
+ case 4: /* RO */
+ return;
+ case 5: /* TSB access */
+ DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", env->immu.tsb, val);
+ env->immu.tsb = val;
+ break;
+ case 6: /* Tag access */
+ env->immu.tag_access = val;
+ break;
+ case 7:
+ case 8:
+ return;
+ default:
+ break;
+ }
+
+ if (oldreg != env->immuregs[reg]) {
+ DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case ASI_ITLB_DATA_IN: /* I-MMU data in */
+ replace_tlb_1bit_lru(env->itlb, env->immu.tag_access, val, "immu", env);
+ return;
+ case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
+ {
+ /* TODO: auto demap */
+
+ unsigned int i = (addr >> 3) & 0x3f;
+
+ replace_tlb_entry(&env->itlb[i], env->immu.tag_access, val, env);
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case ASI_IMMU_DEMAP: /* I-MMU demap */
+ demap_tlb(env->itlb, addr, "immu", env);
+ return;
+ case ASI_DMMU: /* D-MMU regs */
+ {
+ int reg = (addr >> 3) & 0xf;
+ uint64_t oldreg;
+
+ oldreg = env->dmmuregs[reg];
+ switch (reg) {
+ case 0: /* RO */
+ case 4:
+ return;
+ case 3: /* SFSR */
+ if ((val & 1) == 0) {
+ val = 0; /* Clear SFSR, Fault address */
+ env->dmmu.sfar = 0;
+ }
+ env->dmmu.sfsr = val;
+ break;
+ case 1: /* Primary context */
+ env->dmmu.mmu_primary_context = val;
+ /* can be optimized to only flush MMU_USER_IDX
+ and MMU_KERNEL_IDX entries */
+ tlb_flush(CPU(cpu), 1);
+ break;
+ case 2: /* Secondary context */
+ env->dmmu.mmu_secondary_context = val;
+ /* can be optimized to only flush MMU_USER_SECONDARY_IDX
+ and MMU_KERNEL_SECONDARY_IDX entries */
+ tlb_flush(CPU(cpu), 1);
+ break;
+ case 5: /* TSB access */
+ DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", env->dmmu.tsb, val);
+ env->dmmu.tsb = val;
+ break;
+ case 6: /* Tag access */
+ env->dmmu.tag_access = val;
+ break;
+ case 7: /* Virtual Watchpoint */
+ case 8: /* Physical Watchpoint */
+ default:
+ env->dmmuregs[reg] = val;
+ break;
+ }
+
+ if (oldreg != env->dmmuregs[reg]) {
+ DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
+ PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
+ }
+#ifdef DEBUG_MMU
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case ASI_DTLB_DATA_IN: /* D-MMU data in */
+ replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access, val, "dmmu", env);
+ return;
+ case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
+ {
+ unsigned int i = (addr >> 3) & 0x3f;
+
+ replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access, val, env);
+
+#ifdef DEBUG_MMU
+ DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
+ dump_mmu(stdout, fprintf, env);
+#endif
+ return;
+ }
+ case ASI_DMMU_DEMAP: /* D-MMU demap */
+ demap_tlb(env->dtlb, addr, "dmmu", env);
+ return;
+ case ASI_INTR_RECEIVE: /* Interrupt data receive */
+ env->ivec_status = val & 0x20;
+ return;
+ case ASI_DCACHE_DATA: /* D-cache data */
+ case ASI_DCACHE_TAG: /* D-cache tag access */
+ case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
+ case ASI_AFSR: /* E-cache asynchronous fault status */
+ case ASI_AFAR: /* E-cache asynchronous fault address */
+ case ASI_EC_TAG_DATA: /* E-cache tag data */
+ case ASI_IC_INSTR: /* I-cache instruction access */
+ case ASI_IC_TAG: /* I-cache tag access */
+ case ASI_IC_PRE_DECODE: /* I-cache predecode */
+ case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
+ case ASI_EC_W: /* E-cache tag */
+ case ASI_EC_R: /* E-cache tag */
+ return;
+ case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
+ case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
+ case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
+ case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
+ case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
+ case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
+ case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
+ case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
+ case ASI_INTR_R: /* Incoming interrupt vector, RO */
+ case ASI_PNF: /* Primary no-fault, RO */
+ case ASI_SNF: /* Secondary no-fault, RO */
+ case ASI_PNFL: /* Primary no-fault LE, RO */
+ case ASI_SNFL: /* Secondary no-fault LE, RO */
+ default:
+ cpu_unassigned_access(cs, addr, true, false, 1, size);
+ return;
+ }
+}
+#endif /* CONFIG_USER_ONLY */
+#endif /* TARGET_SPARC64 */
+
+#if !defined(CONFIG_USER_ONLY)
+#ifndef TARGET_SPARC64
+void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int fault_type;
+
+#ifdef DEBUG_UNASSIGNED
+ if (is_asi) {
+ printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ " asi 0x%02x from " TARGET_FMT_lx "\n",
+ is_exec ? "exec" : is_write ? "write" : "read", size,
+ size == 1 ? "" : "s", addr, is_asi, env->pc);
+ } else {
+ printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
+ " from " TARGET_FMT_lx "\n",
+ is_exec ? "exec" : is_write ? "write" : "read", size,
+ size == 1 ? "" : "s", addr, env->pc);
+ }
+#endif
+ /* Don't overwrite translation and access faults */
+ fault_type = (env->mmuregs[3] & 0x1c) >> 2;
+ if ((fault_type > 4) || (fault_type == 0)) {
+ env->mmuregs[3] = 0; /* Fault status register */
+ if (is_asi) {
+ env->mmuregs[3] |= 1 << 16;
+ }
+ if (env->psrs) {
+ env->mmuregs[3] |= 1 << 5;
+ }
+ if (is_exec) {
+ env->mmuregs[3] |= 1 << 6;
+ }
+ if (is_write) {
+ env->mmuregs[3] |= 1 << 7;
+ }
+ env->mmuregs[3] |= (5 << 2) | 2;
+ /* SuperSPARC will never place instruction fault addresses in the FAR */
+ if (!is_exec) {
+ env->mmuregs[4] = addr; /* Fault address register */
+ }
+ }
+ /* overflow (same type fault was not read before another fault) */
+ if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
+ env->mmuregs[3] |= 1;
+ }
+
+ if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
+ int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
+ cpu_raise_exception_ra(env, tt, GETPC());
+ }
+
+ /* flush neverland mappings created during no-fault mode,
+ so the sequential MMU faults report proper fault types */
+ if (env->mmuregs[0] & MMU_NF) {
+ tlb_flush(cs, 1);
+ }
+}
+#else
+void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int is_asi,
+ unsigned size)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
+
+#ifdef DEBUG_UNASSIGNED
+ printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+
+ cpu_raise_exception_ra(env, tt, GETPC());
+}
+#endif
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx,
+ uintptr_t retaddr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+
+#ifdef DEBUG_UNALIGNED
+ printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
+ "\n", addr, env->pc);
+#endif
+ cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
+}
+
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = sparc_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (ret) {
+ cpu_loop_exit_restore(cs, retaddr);
+ }
+}
+#endif
diff --git a/target/sparc/machine.c b/target/sparc/machine.c
new file mode 100644
index 0000000000..aea6397861
--- /dev/null
+++ b/target/sparc/machine.c
@@ -0,0 +1,194 @@
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "hw/hw.h"
+#include "hw/boards.h"
+#include "qemu/timer.h"
+
+#include "migration/cpu.h"
+
+#ifdef TARGET_SPARC64
+static const VMStateDescription vmstate_cpu_timer = {
+ .name = "cpu_timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(frequency, CPUTimer),
+ VMSTATE_UINT32(disabled, CPUTimer),
+ VMSTATE_UINT64(disabled_mask, CPUTimer),
+ VMSTATE_UINT32(npt, CPUTimer),
+ VMSTATE_UINT64(npt_mask, CPUTimer),
+ VMSTATE_INT64(clock_offset, CPUTimer),
+ VMSTATE_TIMER_PTR(qtimer, CPUTimer),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define VMSTATE_CPU_TIMER(_f, _s) \
+ VMSTATE_STRUCT_POINTER(_f, _s, vmstate_cpu_timer, CPUTimer)
+
+static const VMStateDescription vmstate_trap_state = {
+ .name = "trap_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tpc, trap_state),
+ VMSTATE_UINT64(tnpc, trap_state),
+ VMSTATE_UINT64(tstate, trap_state),
+ VMSTATE_UINT32(tt, trap_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription vmstate_tlb_entry = {
+ .name = "tlb_entry",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tag, SparcTLBEntry),
+ VMSTATE_UINT64(tte, SparcTLBEntry),
+ VMSTATE_END_OF_LIST()
+ }
+};
+#endif
+
+static int get_psr(QEMUFile *f, void *opaque, size_t size)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+ uint32_t val = qemu_get_be32(f);
+
+ /* needed to ensure that the wrapping registers are correctly updated */
+ env->cwp = 0;
+ cpu_put_psr_raw(env, val);
+
+ return 0;
+}
+
+static void put_psr(QEMUFile *f, void *opaque, size_t size)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+ uint32_t val;
+
+ val = cpu_get_psr(env);
+
+ qemu_put_be32(f, val);
+}
+
+static const VMStateInfo vmstate_psr = {
+ .name = "psr",
+ .get = get_psr,
+ .put = put_psr,
+};
+
+static void cpu_pre_save(void *opaque)
+{
+ SPARCCPU *cpu = opaque;
+ CPUSPARCState *env = &cpu->env;
+
+ /* if env->cwp == env->nwindows - 1, this will set the ins of the last
+ * window as the outs of the first window
+ */
+ cpu_set_cwp(env, env->cwp);
+}
+
+/* 32-bit SPARC retains migration compatibility with older versions
+ * of QEMU; 64-bit SPARC has had a migration break since then, so the
+ * versions are different.
+ */
+#ifndef TARGET_SPARC64
+#define SPARC_VMSTATE_VER 7
+#else
+#define SPARC_VMSTATE_VER 9
+#endif
+
+const VMStateDescription vmstate_sparc_cpu = {
+ .name = "cpu",
+ .version_id = SPARC_VMSTATE_VER,
+ .minimum_version_id = SPARC_VMSTATE_VER,
+ .minimum_version_id_old = SPARC_VMSTATE_VER,
+ .pre_save = cpu_pre_save,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINTTL_ARRAY(env.gregs, SPARCCPU, 8),
+ VMSTATE_UINT32(env.nwindows, SPARCCPU),
+ VMSTATE_VARRAY_MULTIPLY(env.regbase, SPARCCPU, env.nwindows, 16,
+ vmstate_info_uinttl, target_ulong),
+ VMSTATE_CPUDOUBLE_ARRAY(env.fpr, SPARCCPU, TARGET_DPREGS),
+ VMSTATE_UINTTL(env.pc, SPARCCPU),
+ VMSTATE_UINTTL(env.npc, SPARCCPU),
+ VMSTATE_UINTTL(env.y, SPARCCPU),
+ {
+
+ .name = "psr",
+ .version_id = 0,
+ .size = sizeof(uint32_t),
+ .info = &vmstate_psr,
+ .flags = VMS_SINGLE,
+ .offset = 0,
+ },
+ VMSTATE_UINTTL(env.fsr, SPARCCPU),
+ VMSTATE_UINTTL(env.tbr, SPARCCPU),
+ VMSTATE_INT32(env.interrupt_index, SPARCCPU),
+ VMSTATE_UINT32(env.pil_in, SPARCCPU),
+#ifndef TARGET_SPARC64
+ /* MMU */
+ VMSTATE_UINT32(env.wim, SPARCCPU),
+ VMSTATE_UINT32_ARRAY(env.mmuregs, SPARCCPU, 32),
+ VMSTATE_UINT64_ARRAY(env.mxccdata, SPARCCPU, 4),
+ VMSTATE_UINT64_ARRAY(env.mxccregs, SPARCCPU, 8),
+ VMSTATE_UINT32(env.mmubpctrv, SPARCCPU),
+ VMSTATE_UINT32(env.mmubpctrc, SPARCCPU),
+ VMSTATE_UINT32(env.mmubpctrs, SPARCCPU),
+ VMSTATE_UINT64(env.mmubpaction, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.mmubpregs, SPARCCPU, 4),
+#else
+ VMSTATE_UINT64(env.lsu, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.immuregs, SPARCCPU, 16),
+ VMSTATE_UINT64_ARRAY(env.dmmuregs, SPARCCPU, 16),
+ VMSTATE_STRUCT_ARRAY(env.itlb, SPARCCPU, 64, 0,
+ vmstate_tlb_entry, SparcTLBEntry),
+ VMSTATE_STRUCT_ARRAY(env.dtlb, SPARCCPU, 64, 0,
+ vmstate_tlb_entry, SparcTLBEntry),
+ VMSTATE_UINT32(env.mmu_version, SPARCCPU),
+ VMSTATE_STRUCT_ARRAY(env.ts, SPARCCPU, MAXTL_MAX, 0,
+ vmstate_trap_state, trap_state),
+ VMSTATE_UINT32(env.xcc, SPARCCPU),
+ VMSTATE_UINT32(env.asi, SPARCCPU),
+ VMSTATE_UINT32(env.pstate, SPARCCPU),
+ VMSTATE_UINT32(env.tl, SPARCCPU),
+ VMSTATE_UINT32(env.cansave, SPARCCPU),
+ VMSTATE_UINT32(env.canrestore, SPARCCPU),
+ VMSTATE_UINT32(env.otherwin, SPARCCPU),
+ VMSTATE_UINT32(env.wstate, SPARCCPU),
+ VMSTATE_UINT32(env.cleanwin, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.agregs, SPARCCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.bgregs, SPARCCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.igregs, SPARCCPU, 8),
+ VMSTATE_UINT64_ARRAY(env.mgregs, SPARCCPU, 8),
+ VMSTATE_UINT64(env.fprs, SPARCCPU),
+ VMSTATE_UINT64(env.tick_cmpr, SPARCCPU),
+ VMSTATE_UINT64(env.stick_cmpr, SPARCCPU),
+ VMSTATE_CPU_TIMER(env.tick, SPARCCPU),
+ VMSTATE_CPU_TIMER(env.stick, SPARCCPU),
+ VMSTATE_UINT64(env.gsr, SPARCCPU),
+ VMSTATE_UINT32(env.gl, SPARCCPU),
+ VMSTATE_UINT64(env.hpstate, SPARCCPU),
+ VMSTATE_UINT64_ARRAY(env.htstate, SPARCCPU, MAXTL_MAX),
+ VMSTATE_UINT64(env.hintp, SPARCCPU),
+ VMSTATE_UINT64(env.htba, SPARCCPU),
+ VMSTATE_UINT64(env.hver, SPARCCPU),
+ VMSTATE_UINT64(env.hstick_cmpr, SPARCCPU),
+ VMSTATE_UINT64(env.ssr, SPARCCPU),
+ VMSTATE_CPU_TIMER(env.hstick, SPARCCPU),
+ /* On SPARC32 env.psrpil and env.cwp are migrated as part of the PSR */
+ VMSTATE_UINT32(env.psrpil, SPARCCPU),
+ VMSTATE_UINT32(env.cwp, SPARCCPU),
+#endif
+ VMSTATE_END_OF_LIST()
+ },
+};
diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c
new file mode 100644
index 0000000000..044e88c4c5
--- /dev/null
+++ b/target/sparc/mmu_helper.c
@@ -0,0 +1,880 @@
+/*
+ * Sparc MMU helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "trace.h"
+#include "exec/address-spaces.h"
+
+/* Sparc MMU emulation */
+
+#if defined(CONFIG_USER_ONLY)
+
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ if (rw & 2) {
+ cs->exception_index = TT_TFAULT;
+ } else {
+ cs->exception_index = TT_DFAULT;
+ }
+ return 1;
+}
+
+#else
+
+#ifndef TARGET_SPARC64
+/*
+ * Sparc V8 Reference MMU (SRMMU)
+ */
+static const int access_table[8][8] = {
+ { 0, 0, 0, 0, 8, 0, 12, 12 },
+ { 0, 0, 0, 0, 8, 0, 0, 0 },
+ { 8, 8, 0, 0, 0, 8, 12, 12 },
+ { 8, 8, 0, 0, 0, 8, 0, 0 },
+ { 8, 0, 8, 0, 8, 8, 12, 12 },
+ { 8, 0, 8, 0, 8, 0, 8, 0 },
+ { 8, 8, 8, 0, 8, 8, 12, 12 },
+ { 8, 8, 8, 0, 8, 8, 8, 0 }
+};
+
+static const int perm_table[2][8] = {
+ {
+ PAGE_READ,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC
+ },
+ {
+ PAGE_READ,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_EXEC,
+ PAGE_READ,
+ 0,
+ 0,
+ }
+};
+
+static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
+ int *prot, int *access_index,
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
+{
+ int access_perms = 0;
+ hwaddr pde_ptr;
+ uint32_t pde;
+ int error_code = 0, is_dirty, is_user;
+ unsigned long page_offset;
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+
+ is_user = mmu_idx == MMU_USER_IDX;
+
+ if (mmu_idx == MMU_PHYS_IDX) {
+ *page_size = TARGET_PAGE_SIZE;
+ /* Boot mode: instruction fetches are taken from PROM */
+ if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
+ *physical = env->prom_addr | (address & 0x7ffffULL);
+ *prot = PAGE_READ | PAGE_EXEC;
+ return 0;
+ }
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
+ *physical = 0xffffffffffff0000ULL;
+
+ /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
+ /* Context base + context number */
+ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ /* Ctx pde */
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return 1 << 2;
+ case 2: /* L0 PTE, maybe should not happen? */
+ case 3: /* Reserved */
+ return 4 << 2;
+ case 1: /* L0 PDE */
+ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (1 << 8) | (1 << 2);
+ case 3: /* Reserved */
+ return (1 << 8) | (4 << 2);
+ case 1: /* L1 PDE */
+ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (2 << 8) | (1 << 2);
+ case 3: /* Reserved */
+ return (2 << 8) | (4 << 2);
+ case 1: /* L2 PDE */
+ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ return (3 << 8) | (1 << 2);
+ case 1: /* PDE, should not happen */
+ case 3: /* Reserved */
+ return (3 << 8) | (4 << 2);
+ case 2: /* L3 PTE */
+ page_offset = 0;
+ }
+ *page_size = TARGET_PAGE_SIZE;
+ break;
+ case 2: /* L2 PTE */
+ page_offset = address & 0x3f000;
+ *page_size = 0x40000;
+ }
+ break;
+ case 2: /* L1 PTE */
+ page_offset = address & 0xfff000;
+ *page_size = 0x1000000;
+ }
+ }
+
+ /* check access */
+ access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
+ error_code = access_table[*access_index][access_perms];
+ if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
+ return error_code;
+ }
+
+ /* update page modified and dirty bits */
+ is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
+ if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
+ pde |= PG_ACCESSED_MASK;
+ if (is_dirty) {
+ pde |= PG_MODIFIED_MASK;
+ }
+ stl_phys_notdirty(cs->as, pde_ptr, pde);
+ }
+
+ /* the page can be put in the TLB */
+ *prot = perm_table[is_user][access_perms];
+ if (!(pde & PG_MODIFIED_MASK)) {
+ /* only set write access if already dirty... otherwise wait
+ for dirty access */
+ *prot &= ~PAGE_WRITE;
+ }
+
+ /* Even if large ptes, we map only one 4KB page in the cache to
+ avoid filling it too fast */
+ *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
+ return error_code;
+}
+
+/* Perform address translation */
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ hwaddr paddr;
+ target_ulong vaddr;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
+
+ address &= TARGET_PAGE_MASK;
+ error_code = get_physical_address(env, &paddr, &prot, &access_index,
+ address, rw, mmu_idx, &page_size);
+ vaddr = address;
+ if (error_code == 0) {
+ qemu_log_mask(CPU_LOG_MMU,
+ "Translate at %" VADDR_PRIx " -> " TARGET_FMT_plx ", vaddr "
+ TARGET_FMT_lx "\n", address, paddr, vaddr);
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
+ }
+
+ if (env->mmuregs[3]) { /* Fault status register */
+ env->mmuregs[3] = 1; /* overflow (not read before another fault) */
+ }
+ env->mmuregs[3] |= (access_index << 5) | error_code | 2;
+ env->mmuregs[4] = address; /* Fault address register */
+
+ if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
+ /* No fault mode: if a mapping is available, just override
+ permissions. If no mapping is available, redirect accesses to
+ neverland. Fake/overridden mappings will be flushed when
+ switching to normal mode. */
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
+ } else {
+ if (rw & 2) {
+ cs->exception_index = TT_TFAULT;
+ } else {
+ cs->exception_index = TT_DFAULT;
+ }
+ return 1;
+ }
+}
+
+target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+ hwaddr pde_ptr;
+ uint32_t pde;
+
+ /* Context base + context number */
+ pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
+ (env->mmuregs[2] << 2);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 2: /* PTE, maybe should not happen? */
+ case 3: /* Reserved */
+ return 0;
+ case 1: /* L1 PDE */
+ if (mmulev == 3) {
+ return pde;
+ }
+ pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L1 PTE */
+ return pde;
+ case 1: /* L2 PDE */
+ if (mmulev == 2) {
+ return pde;
+ }
+ pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L2 PTE */
+ return pde;
+ case 1: /* L3 PDE */
+ if (mmulev == 1) {
+ return pde;
+ }
+ pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
+ pde = ldl_phys(cs->as, pde_ptr);
+
+ switch (pde & PTE_ENTRYTYPE_MASK) {
+ default:
+ case 0: /* Invalid */
+ case 1: /* PDE, should not happen */
+ case 3: /* Reserved */
+ return 0;
+ case 2: /* L3 PTE */
+ return pde;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+ target_ulong va, va1, va2;
+ unsigned int n, m, o;
+ hwaddr pde_ptr, pa;
+ uint32_t pde;
+
+ pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
+ pde = ldl_phys(cs->as, pde_ptr);
+ (*cpu_fprintf)(f, "Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
+ (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
+ for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
+ pde = mmu_probe(env, va, 2);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(cs, va);
+ (*cpu_fprintf)(f, "VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
+ " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
+ for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
+ pde = mmu_probe(env, va1, 1);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(cs, va1);
+ (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
+ TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
+ va1, pa, pde);
+ for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
+ pde = mmu_probe(env, va2, 0);
+ if (pde) {
+ pa = cpu_get_phys_page_debug(cs, va2);
+ (*cpu_fprintf)(f, " VA: " TARGET_FMT_lx ", PA: "
+ TARGET_FMT_plx " PTE: "
+ TARGET_FMT_lx "\n",
+ va2, pa, pde);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/* Gdb expects all registers windows to be flushed in ram. This function handles
+ * reads (and only reads) in stack frames as if windows were flushed. We assume
+ * that the sparc ABI is followed.
+ */
+int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
+ uint8_t *buf, int len, bool is_write)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ target_ulong addr = address;
+ int i;
+ int len1;
+ int cwp = env->cwp;
+
+ if (!is_write) {
+ for (i = 0; i < env->nwindows; i++) {
+ int off;
+ target_ulong fp = env->regbase[cwp * 16 + 22];
+
+ /* Assume fp == 0 means end of frame. */
+ if (fp == 0) {
+ break;
+ }
+
+ cwp = cpu_cwp_inc(env, cwp + 1);
+
+ /* Invalid window ? */
+ if (env->wim & (1 << cwp)) {
+ break;
+ }
+
+ /* According to the ABI, the stack is growing downward. */
+ if (addr + len < fp) {
+ break;
+ }
+
+ /* Not in this frame. */
+ if (addr > fp + 64) {
+ continue;
+ }
+
+ /* Handle access before this window. */
+ if (addr < fp) {
+ len1 = fp - addr;
+ if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
+ return -1;
+ }
+ addr += len1;
+ len -= len1;
+ buf += len1;
+ }
+
+ /* Access byte per byte to registers. Not very efficient but speed
+ * is not critical.
+ */
+ off = addr - fp;
+ len1 = 64 - off;
+
+ if (len1 > len) {
+ len1 = len;
+ }
+
+ for (; len1; len1--) {
+ int reg = cwp * 16 + 8 + (off >> 2);
+ union {
+ uint32_t v;
+ uint8_t c[4];
+ } u;
+ u.v = cpu_to_be32(env->regbase[reg]);
+ *buf++ = u.c[off & 3];
+ addr++;
+ len--;
+ off++;
+ }
+
+ if (len == 0) {
+ return 0;
+ }
+ }
+ }
+ return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
+}
+
+#else /* !TARGET_SPARC64 */
+
+/* 41 bit physical address space */
+static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
+{
+ return x & 0x1ffffffffffULL;
+}
+
+/*
+ * UltraSparc IIi I/DMMUs
+ */
+
+/* Returns true if TTE tag is valid and matches virtual address value
+ in context requires virtual address mask value calculated from TTE
+ entry size */
+static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
+ uint64_t address, uint64_t context,
+ hwaddr *physical)
+{
+ uint64_t mask;
+
+ switch (TTE_PGSIZE(tlb->tte)) {
+ default:
+ case 0x0: /* 8k */
+ mask = 0xffffffffffffe000ULL;
+ break;
+ case 0x1: /* 64k */
+ mask = 0xffffffffffff0000ULL;
+ break;
+ case 0x2: /* 512k */
+ mask = 0xfffffffffff80000ULL;
+ break;
+ case 0x3: /* 4M */
+ mask = 0xffffffffffc00000ULL;
+ break;
+ }
+
+ /* valid, context match, virtual address match? */
+ if (TTE_IS_VALID(tlb->tte) &&
+ (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
+ && compare_masked(address, tlb->tag, mask)) {
+ /* decode physical address */
+ *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int get_physical_address_data(CPUSPARCState *env,
+ hwaddr *physical, int *prot,
+ target_ulong address, int rw, int mmu_idx)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+ unsigned int i;
+ uint64_t context;
+ uint64_t sfsr = 0;
+ bool is_user = false;
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ g_assert_not_reached();
+ case MMU_USER_IDX:
+ is_user = true;
+ /* fallthru */
+ case MMU_KERNEL_IDX:
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ sfsr |= SFSR_CT_PRIMARY;
+ break;
+ case MMU_USER_SECONDARY_IDX:
+ is_user = true;
+ /* fallthru */
+ case MMU_KERNEL_SECONDARY_IDX:
+ context = env->dmmu.mmu_secondary_context & 0x1fff;
+ sfsr |= SFSR_CT_SECONDARY;
+ break;
+ case MMU_NUCLEUS_IDX:
+ sfsr |= SFSR_CT_NUCLEUS;
+ /* FALLTHRU */
+ default:
+ context = 0;
+ break;
+ }
+
+ if (rw == 1) {
+ sfsr |= SFSR_WRITE_BIT;
+ } else if (rw == 4) {
+ sfsr |= SFSR_NF_BIT;
+ }
+
+ for (i = 0; i < 64; i++) {
+ /* ctx match, vaddr match, valid? */
+ if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
+ int do_fault = 0;
+
+ /* access ok? */
+ /* multiple bits in SFSR.FT may be set on TT_DFAULT */
+ if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
+ trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
+ }
+ if (rw == 4) {
+ if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_NF_E_BIT;
+ }
+ } else {
+ if (TTE_IS_NFO(env->dtlb[i].tte)) {
+ do_fault = 1;
+ sfsr |= SFSR_FT_NFO_BIT;
+ }
+ }
+
+ if (do_fault) {
+ /* faults above are reported with TT_DFAULT. */
+ cs->exception_index = TT_DFAULT;
+ } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
+ do_fault = 1;
+ cs->exception_index = TT_DPROT;
+
+ trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
+ }
+
+ if (!do_fault) {
+ *prot = PAGE_READ;
+ if (TTE_IS_W_OK(env->dtlb[i].tte)) {
+ *prot |= PAGE_WRITE;
+ }
+
+ TTE_SET_USED(env->dtlb[i].tte);
+
+ return 0;
+ }
+
+ if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
+ sfsr |= SFSR_OW_BIT; /* overflow (not read before
+ another fault) */
+ }
+
+ if (env->pstate & PS_PRIV) {
+ sfsr |= SFSR_PR_BIT;
+ }
+
+ /* FIXME: ASI field in SFSR must be set */
+ env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
+
+ env->dmmu.sfar = address; /* Fault address register */
+
+ env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+
+ return 1;
+ }
+ }
+
+ trace_mmu_helper_dmiss(address, context);
+
+ /*
+ * On MMU misses:
+ * - UltraSPARC IIi: SFSR and SFAR unmodified
+ * - JPS1: SFAR updated and some fields of SFSR updated
+ */
+ env->dmmu.tag_access = (address & ~0x1fffULL) | context;
+ cs->exception_index = TT_DMISS;
+ return 1;
+}
+
+static int get_physical_address_code(CPUSPARCState *env,
+ hwaddr *physical, int *prot,
+ target_ulong address, int mmu_idx)
+{
+ CPUState *cs = CPU(sparc_env_get_cpu(env));
+ unsigned int i;
+ uint64_t context;
+ bool is_user = false;
+
+ switch (mmu_idx) {
+ case MMU_PHYS_IDX:
+ case MMU_USER_SECONDARY_IDX:
+ case MMU_KERNEL_SECONDARY_IDX:
+ g_assert_not_reached();
+ case MMU_USER_IDX:
+ is_user = true;
+ /* fallthru */
+ case MMU_KERNEL_IDX:
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ break;
+ default:
+ context = 0;
+ break;
+ }
+
+ if (env->tl == 0) {
+ /* PRIMARY context */
+ context = env->dmmu.mmu_primary_context & 0x1fff;
+ } else {
+ /* NUCLEUS context */
+ context = 0;
+ }
+
+ for (i = 0; i < 64; i++) {
+ /* ctx match, vaddr match, valid? */
+ if (ultrasparc_tag_match(&env->itlb[i],
+ address, context, physical)) {
+ /* access ok? */
+ if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
+ /* Fault status register */
+ if (env->immu.sfsr & SFSR_VALID_BIT) {
+ env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
+ another fault) */
+ } else {
+ env->immu.sfsr = 0;
+ }
+ if (env->pstate & PS_PRIV) {
+ env->immu.sfsr |= SFSR_PR_BIT;
+ }
+ if (env->tl > 0) {
+ env->immu.sfsr |= SFSR_CT_NUCLEUS;
+ }
+
+ /* FIXME: ASI field in SFSR must be set */
+ env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
+ cs->exception_index = TT_TFAULT;
+
+ env->immu.tag_access = (address & ~0x1fffULL) | context;
+
+ trace_mmu_helper_tfault(address, context);
+
+ return 1;
+ }
+ *prot = PAGE_EXEC;
+ TTE_SET_USED(env->itlb[i].tte);
+ return 0;
+ }
+ }
+
+ trace_mmu_helper_tmiss(address, context);
+
+ /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
+ env->immu.tag_access = (address & ~0x1fffULL) | context;
+ cs->exception_index = TT_TMISS;
+ return 1;
+}
+
+static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
+ int *prot, int *access_index,
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
+{
+ /* ??? We treat everything as a small page, then explicitly flush
+ everything when an entry is evicted. */
+ *page_size = TARGET_PAGE_SIZE;
+
+ /* safety net to catch wrong softmmu index use from dynamic code */
+ if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
+ if (rw == 2) {
+ trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context,
+ address);
+ } else {
+ trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context,
+ address);
+ }
+ }
+
+ if (mmu_idx == MMU_PHYS_IDX) {
+ *physical = ultrasparc_truncate_physical(address);
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return 0;
+ }
+
+ if (rw == 2) {
+ return get_physical_address_code(env, physical, prot, address,
+ mmu_idx);
+ } else {
+ return get_physical_address_data(env, physical, prot, address, rw,
+ mmu_idx);
+ }
+}
+
+/* Perform address translation */
+int sparc_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ target_ulong vaddr;
+ hwaddr paddr;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
+
+ address &= TARGET_PAGE_MASK;
+ error_code = get_physical_address(env, &paddr, &prot, &access_index,
+ address, rw, mmu_idx, &page_size);
+ if (error_code == 0) {
+ vaddr = address;
+
+ trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context);
+
+ tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
+ }
+ /* XXX */
+ return 1;
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUSPARCState *env)
+{
+ unsigned int i;
+ const char *mask;
+
+ (*cpu_fprintf)(f, "MMU contexts: Primary: %" PRId64 ", Secondary: %"
+ PRId64 "\n",
+ env->dmmu.mmu_primary_context,
+ env->dmmu.mmu_secondary_context);
+ if ((env->lsu & DMMU_E) == 0) {
+ (*cpu_fprintf)(f, "DMMU disabled\n");
+ } else {
+ (*cpu_fprintf)(f, "DMMU dump\n");
+ for (i = 0; i < 64; i++) {
+ switch (TTE_PGSIZE(env->dtlb[i].tte)) {
+ default:
+ case 0x0:
+ mask = " 8k";
+ break;
+ case 0x1:
+ mask = " 64k";
+ break;
+ case 0x2:
+ mask = "512k";
+ break;
+ case 0x3:
+ mask = " 4M";
+ break;
+ }
+ if (TTE_IS_VALID(env->dtlb[i].tte)) {
+ (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
+ ", %s, %s, %s, %s, ctx %" PRId64 " %s\n",
+ i,
+ env->dtlb[i].tag & (uint64_t)~0x1fffULL,
+ TTE_PA(env->dtlb[i].tte),
+ mask,
+ TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
+ TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
+ TTE_IS_LOCKED(env->dtlb[i].tte) ?
+ "locked" : "unlocked",
+ env->dtlb[i].tag & (uint64_t)0x1fffULL,
+ TTE_IS_GLOBAL(env->dtlb[i].tte) ?
+ "global" : "local");
+ }
+ }
+ }
+ if ((env->lsu & IMMU_E) == 0) {
+ (*cpu_fprintf)(f, "IMMU disabled\n");
+ } else {
+ (*cpu_fprintf)(f, "IMMU dump\n");
+ for (i = 0; i < 64; i++) {
+ switch (TTE_PGSIZE(env->itlb[i].tte)) {
+ default:
+ case 0x0:
+ mask = " 8k";
+ break;
+ case 0x1:
+ mask = " 64k";
+ break;
+ case 0x2:
+ mask = "512k";
+ break;
+ case 0x3:
+ mask = " 4M";
+ break;
+ }
+ if (TTE_IS_VALID(env->itlb[i].tte)) {
+ (*cpu_fprintf)(f, "[%02u] VA: %" PRIx64 ", PA: %llx"
+ ", %s, %s, %s, ctx %" PRId64 " %s\n",
+ i,
+ env->itlb[i].tag & (uint64_t)~0x1fffULL,
+ TTE_PA(env->itlb[i].tte),
+ mask,
+ TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
+ TTE_IS_LOCKED(env->itlb[i].tte) ?
+ "locked" : "unlocked",
+ env->itlb[i].tag & (uint64_t)0x1fffULL,
+ TTE_IS_GLOBAL(env->itlb[i].tte) ?
+ "global" : "local");
+ }
+ }
+ }
+}
+
+#endif /* TARGET_SPARC64 */
+
+static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
+ target_ulong addr, int rw, int mmu_idx)
+{
+ target_ulong page_size;
+ int prot, access_index;
+
+ return get_physical_address(env, phys, &prot, &access_index, addr, rw,
+ mmu_idx, &page_size);
+}
+
+#if defined(TARGET_SPARC64)
+hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
+ int mmu_idx)
+{
+ hwaddr phys_addr;
+
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
+ return -1;
+ }
+ return phys_addr;
+}
+#endif
+
+hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ SPARCCPU *cpu = SPARC_CPU(cs);
+ CPUSPARCState *env = &cpu->env;
+ hwaddr phys_addr;
+ int mmu_idx = cpu_mmu_index(env, false);
+ MemoryRegionSection section;
+
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
+ if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
+ return -1;
+ }
+ }
+ section = memory_region_find(get_system_memory(), phys_addr, 1);
+ memory_region_unref(section.mr);
+ if (!int128_nz(section.size)) {
+ return -1;
+ }
+ return phys_addr;
+}
+#endif
diff --git a/target/sparc/monitor.c b/target/sparc/monitor.c
new file mode 100644
index 0000000000..7cc1b0f87f
--- /dev/null
+++ b/target/sparc/monitor.c
@@ -0,0 +1,159 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env();
+
+ dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
+}
+
+#ifndef TARGET_SPARC64
+static target_long monitor_get_psr (const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+
+ return cpu_get_psr(env);
+}
+#endif
+
+static target_long monitor_get_reg(const struct MonitorDef *md, int val)
+{
+ CPUArchState *env = mon_get_cpu_env();
+ return env->regwptr[val];
+}
+
+const MonitorDef monitor_defs[] = {
+ { "g0", offsetof(CPUSPARCState, gregs[0]) },
+ { "g1", offsetof(CPUSPARCState, gregs[1]) },
+ { "g2", offsetof(CPUSPARCState, gregs[2]) },
+ { "g3", offsetof(CPUSPARCState, gregs[3]) },
+ { "g4", offsetof(CPUSPARCState, gregs[4]) },
+ { "g5", offsetof(CPUSPARCState, gregs[5]) },
+ { "g6", offsetof(CPUSPARCState, gregs[6]) },
+ { "g7", offsetof(CPUSPARCState, gregs[7]) },
+ { "o0", 0, monitor_get_reg },
+ { "o1", 1, monitor_get_reg },
+ { "o2", 2, monitor_get_reg },
+ { "o3", 3, monitor_get_reg },
+ { "o4", 4, monitor_get_reg },
+ { "o5", 5, monitor_get_reg },
+ { "o6", 6, monitor_get_reg },
+ { "o7", 7, monitor_get_reg },
+ { "l0", 8, monitor_get_reg },
+ { "l1", 9, monitor_get_reg },
+ { "l2", 10, monitor_get_reg },
+ { "l3", 11, monitor_get_reg },
+ { "l4", 12, monitor_get_reg },
+ { "l5", 13, monitor_get_reg },
+ { "l6", 14, monitor_get_reg },
+ { "l7", 15, monitor_get_reg },
+ { "i0", 16, monitor_get_reg },
+ { "i1", 17, monitor_get_reg },
+ { "i2", 18, monitor_get_reg },
+ { "i3", 19, monitor_get_reg },
+ { "i4", 20, monitor_get_reg },
+ { "i5", 21, monitor_get_reg },
+ { "i6", 22, monitor_get_reg },
+ { "i7", 23, monitor_get_reg },
+ { "pc", offsetof(CPUSPARCState, pc) },
+ { "npc", offsetof(CPUSPARCState, npc) },
+ { "y", offsetof(CPUSPARCState, y) },
+#ifndef TARGET_SPARC64
+ { "psr", 0, &monitor_get_psr, },
+ { "wim", offsetof(CPUSPARCState, wim) },
+#endif
+ { "tbr", offsetof(CPUSPARCState, tbr) },
+ { "fsr", offsetof(CPUSPARCState, fsr) },
+ { "f0", offsetof(CPUSPARCState, fpr[0].l.upper) },
+ { "f1", offsetof(CPUSPARCState, fpr[0].l.lower) },
+ { "f2", offsetof(CPUSPARCState, fpr[1].l.upper) },
+ { "f3", offsetof(CPUSPARCState, fpr[1].l.lower) },
+ { "f4", offsetof(CPUSPARCState, fpr[2].l.upper) },
+ { "f5", offsetof(CPUSPARCState, fpr[2].l.lower) },
+ { "f6", offsetof(CPUSPARCState, fpr[3].l.upper) },
+ { "f7", offsetof(CPUSPARCState, fpr[3].l.lower) },
+ { "f8", offsetof(CPUSPARCState, fpr[4].l.upper) },
+ { "f9", offsetof(CPUSPARCState, fpr[4].l.lower) },
+ { "f10", offsetof(CPUSPARCState, fpr[5].l.upper) },
+ { "f11", offsetof(CPUSPARCState, fpr[5].l.lower) },
+ { "f12", offsetof(CPUSPARCState, fpr[6].l.upper) },
+ { "f13", offsetof(CPUSPARCState, fpr[6].l.lower) },
+ { "f14", offsetof(CPUSPARCState, fpr[7].l.upper) },
+ { "f15", offsetof(CPUSPARCState, fpr[7].l.lower) },
+ { "f16", offsetof(CPUSPARCState, fpr[8].l.upper) },
+ { "f17", offsetof(CPUSPARCState, fpr[8].l.lower) },
+ { "f18", offsetof(CPUSPARCState, fpr[9].l.upper) },
+ { "f19", offsetof(CPUSPARCState, fpr[9].l.lower) },
+ { "f20", offsetof(CPUSPARCState, fpr[10].l.upper) },
+ { "f21", offsetof(CPUSPARCState, fpr[10].l.lower) },
+ { "f22", offsetof(CPUSPARCState, fpr[11].l.upper) },
+ { "f23", offsetof(CPUSPARCState, fpr[11].l.lower) },
+ { "f24", offsetof(CPUSPARCState, fpr[12].l.upper) },
+ { "f25", offsetof(CPUSPARCState, fpr[12].l.lower) },
+ { "f26", offsetof(CPUSPARCState, fpr[13].l.upper) },
+ { "f27", offsetof(CPUSPARCState, fpr[13].l.lower) },
+ { "f28", offsetof(CPUSPARCState, fpr[14].l.upper) },
+ { "f29", offsetof(CPUSPARCState, fpr[14].l.lower) },
+ { "f30", offsetof(CPUSPARCState, fpr[15].l.upper) },
+ { "f31", offsetof(CPUSPARCState, fpr[15].l.lower) },
+#ifdef TARGET_SPARC64
+ { "f32", offsetof(CPUSPARCState, fpr[16]) },
+ { "f34", offsetof(CPUSPARCState, fpr[17]) },
+ { "f36", offsetof(CPUSPARCState, fpr[18]) },
+ { "f38", offsetof(CPUSPARCState, fpr[19]) },
+ { "f40", offsetof(CPUSPARCState, fpr[20]) },
+ { "f42", offsetof(CPUSPARCState, fpr[21]) },
+ { "f44", offsetof(CPUSPARCState, fpr[22]) },
+ { "f46", offsetof(CPUSPARCState, fpr[23]) },
+ { "f48", offsetof(CPUSPARCState, fpr[24]) },
+ { "f50", offsetof(CPUSPARCState, fpr[25]) },
+ { "f52", offsetof(CPUSPARCState, fpr[26]) },
+ { "f54", offsetof(CPUSPARCState, fpr[27]) },
+ { "f56", offsetof(CPUSPARCState, fpr[28]) },
+ { "f58", offsetof(CPUSPARCState, fpr[29]) },
+ { "f60", offsetof(CPUSPARCState, fpr[30]) },
+ { "f62", offsetof(CPUSPARCState, fpr[31]) },
+ { "asi", offsetof(CPUSPARCState, asi) },
+ { "pstate", offsetof(CPUSPARCState, pstate) },
+ { "cansave", offsetof(CPUSPARCState, cansave) },
+ { "canrestore", offsetof(CPUSPARCState, canrestore) },
+ { "otherwin", offsetof(CPUSPARCState, otherwin) },
+ { "wstate", offsetof(CPUSPARCState, wstate) },
+ { "cleanwin", offsetof(CPUSPARCState, cleanwin) },
+ { "fprs", offsetof(CPUSPARCState, fprs) },
+#endif
+ { NULL },
+};
+
+const MonitorDef *target_monitor_defs(void)
+{
+ return monitor_defs;
+}
diff --git a/target/sparc/trace-events b/target/sparc/trace-events
new file mode 100644
index 0000000000..8df178a347
--- /dev/null
+++ b/target/sparc/trace-events
@@ -0,0 +1,28 @@
+# See docs/tracing.txt for syntax documentation.
+
+# target/sparc/mmu_helper.c
+mmu_helper_dfault(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DFAULT at %"PRIx64" context %"PRIx64" mmu_idx=%d tl=%d"
+mmu_helper_dprot(uint64_t address, uint64_t context, int mmu_idx, uint32_t tl) "DPROT at %"PRIx64" context %"PRIx64" mmu_idx=%d tl=%d"
+mmu_helper_dmiss(uint64_t address, uint64_t context) "DMISS at %"PRIx64" context %"PRIx64
+mmu_helper_tfault(uint64_t address, uint64_t context) "TFAULT at %"PRIx64" context %"PRIx64
+mmu_helper_tmiss(uint64_t address, uint64_t context) "TMISS at %"PRIx64" context %"PRIx64
+mmu_helper_get_phys_addr_code(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=%"PRIx64" secondary context=%"PRIx64" address=%"PRIx64
+mmu_helper_get_phys_addr_data(uint32_t tl, int mmu_idx, uint64_t prim_context, uint64_t sec_context, uint64_t address) "tl=%d mmu_idx=%d primary context=%"PRIx64" secondary context=%"PRIx64" address=%"PRIx64
+mmu_helper_mmu_fault(uint64_t address, uint64_t paddr, int mmu_idx, uint32_t tl, uint64_t prim_context, uint64_t sec_context) "Translate at %"PRIx64" -> %"PRIx64", mmu_idx=%d tl=%d primary context=%"PRIx64" secondary context=%"PRIx64
+
+# target/sparc/int64_helper.c
+int_helper_set_softint(uint32_t softint) "new %08x"
+int_helper_clear_softint(uint32_t softint) "new %08x"
+int_helper_write_softint(uint32_t softint) "new %08x"
+
+# target/sparc/int32_helper.c
+int_helper_icache_freeze(void) "Instruction cache: freeze"
+int_helper_dcache_freeze(void) "Data cache: freeze"
+
+# target/sparc/win_helper.c
+win_helper_gregset_error(uint32_t pstate) "ERROR in get_gregset: active pstate bits=%x"
+win_helper_switch_pstate(uint32_t pstate_regs, uint32_t new_pstate_regs) "change_pstate: switching regs old=%x new=%x"
+win_helper_no_switch_pstate(uint32_t new_pstate_regs) "change_pstate: regs new=%x (unchanged)"
+win_helper_wrpil(uint32_t psrpil, uint32_t new_pil) "old=%x new=%x"
+win_helper_done(uint32_t tl) "tl=%d"
+win_helper_retry(uint32_t tl) "tl=%d"
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
new file mode 100644
index 0000000000..2205f89837
--- /dev/null
+++ b/target/sparc/translate.c
@@ -0,0 +1,5924 @@
+/*
+ SPARC translation
+
+ Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
+ Copyright (C) 2003-2005 Fabrice Bellard
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+#include "asi.h"
+
+
+#define DEBUG_DISAS
+
+#define DYNAMIC_PC 1 /* dynamic pc value */
+#define JUMP_PC 2 /* dynamic pc value which takes only two values
+ according to jump_pc[T2] */
+
+/* global register indexes */
+static TCGv_env cpu_env;
+static TCGv_ptr cpu_regwptr;
+static TCGv cpu_cc_src, cpu_cc_src2, cpu_cc_dst;
+static TCGv_i32 cpu_cc_op;
+static TCGv_i32 cpu_psr;
+static TCGv cpu_fsr, cpu_pc, cpu_npc;
+static TCGv cpu_regs[32];
+static TCGv cpu_y;
+#ifndef CONFIG_USER_ONLY
+static TCGv cpu_tbr;
+#endif
+static TCGv cpu_cond;
+#ifdef TARGET_SPARC64
+static TCGv_i32 cpu_xcc, cpu_fprs;
+static TCGv cpu_gsr;
+static TCGv cpu_tick_cmpr, cpu_stick_cmpr, cpu_hstick_cmpr;
+static TCGv cpu_hintp, cpu_htba, cpu_hver, cpu_ssr, cpu_ver;
+#else
+static TCGv cpu_wim;
+#endif
+/* Floating point registers */
+static TCGv_i64 cpu_fpr[TARGET_DPREGS];
+
+#include "exec/gen-icount.h"
+
+typedef struct DisasContext {
+ target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
+ target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
+ target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
+ int is_br;
+ int mem_idx;
+ int fpu_enabled;
+ int address_mask_32bit;
+ int singlestep;
+ uint32_t cc_op; /* current CC operation */
+ struct TranslationBlock *tb;
+ sparc_def_t *def;
+ TCGv_i32 t32[3];
+ TCGv ttl[5];
+ int n_t32;
+ int n_ttl;
+#ifdef TARGET_SPARC64
+ int fprs_dirty;
+ int asi;
+#endif
+} DisasContext;
+
+typedef struct {
+ TCGCond cond;
+ bool is_bool;
+ bool g1, g2;
+ TCGv c1, c2;
+} DisasCompare;
+
+// This function uses non-native bit order
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
+
+// This function uses the order in the manuals, i.e. bit 0 is 2^0
+#define GET_FIELD_SP(X, FROM, TO) \
+ GET_FIELD(X, 31 - (TO), 31 - (FROM))
+
+#define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
+#define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
+
+#ifdef TARGET_SPARC64
+#define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
+#define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
+#else
+#define DFPREG(r) (r & 0x1e)
+#define QFPREG(r) (r & 0x1c)
+#endif
+
+#define UA2005_HTRAP_MASK 0xff
+#define V8_TRAP_MASK 0x7f
+
+static int sign_extend(int x, int len)
+{
+ len = 32 - len;
+ return (x << len) >> len;
+}
+
+#define IS_IMM (insn & (1<<13))
+
+static inline TCGv_i32 get_temp_i32(DisasContext *dc)
+{
+ TCGv_i32 t;
+ assert(dc->n_t32 < ARRAY_SIZE(dc->t32));
+ dc->t32[dc->n_t32++] = t = tcg_temp_new_i32();
+ return t;
+}
+
+static inline TCGv get_temp_tl(DisasContext *dc)
+{
+ TCGv t;
+ assert(dc->n_ttl < ARRAY_SIZE(dc->ttl));
+ dc->ttl[dc->n_ttl++] = t = tcg_temp_new();
+ return t;
+}
+
+static inline void gen_update_fprs_dirty(DisasContext *dc, int rd)
+{
+#if defined(TARGET_SPARC64)
+ int bit = (rd < 32) ? 1 : 2;
+ /* If we know we've already set this bit within the TB,
+ we can avoid setting it again. */
+ if (!(dc->fprs_dirty & bit)) {
+ dc->fprs_dirty |= bit;
+ tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
+ }
+#endif
+}
+
+/* floating point registers moves */
+static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
+{
+#if TCG_TARGET_REG_BITS == 32
+ if (src & 1) {
+ return TCGV_LOW(cpu_fpr[src / 2]);
+ } else {
+ return TCGV_HIGH(cpu_fpr[src / 2]);
+ }
+#else
+ if (src & 1) {
+ return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr[src / 2]));
+ } else {
+ TCGv_i32 ret = get_temp_i32(dc);
+ TCGv_i64 t = tcg_temp_new_i64();
+
+ tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32);
+ tcg_gen_extrl_i64_i32(ret, t);
+ tcg_temp_free_i64(t);
+
+ return ret;
+ }
+#endif
+}
+
+static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
+{
+#if TCG_TARGET_REG_BITS == 32
+ if (dst & 1) {
+ tcg_gen_mov_i32(TCGV_LOW(cpu_fpr[dst / 2]), v);
+ } else {
+ tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr[dst / 2]), v);
+ }
+#else
+ TCGv_i64 t = MAKE_TCGV_I64(GET_TCGV_I32(v));
+ tcg_gen_deposit_i64(cpu_fpr[dst / 2], cpu_fpr[dst / 2], t,
+ (dst & 1 ? 0 : 32), 32);
+#endif
+ gen_update_fprs_dirty(dc, dst);
+}
+
+static TCGv_i32 gen_dest_fpr_F(DisasContext *dc)
+{
+ return get_temp_i32(dc);
+}
+
+static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
+{
+ src = DFPREG(src);
+ return cpu_fpr[src / 2];
+}
+
+static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
+{
+ dst = DFPREG(dst);
+ tcg_gen_mov_i64(cpu_fpr[dst / 2], v);
+ gen_update_fprs_dirty(dc, dst);
+}
+
+static TCGv_i64 gen_dest_fpr_D(DisasContext *dc, unsigned int dst)
+{
+ return cpu_fpr[DFPREG(dst) / 2];
+}
+
+static void gen_op_load_fpr_QT0(unsigned int src)
+{
+ tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.upper));
+ tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_op_load_fpr_QT1(unsigned int src)
+{
+ tcg_gen_st_i64(cpu_fpr[src / 2], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, ll.upper));
+ tcg_gen_st_i64(cpu_fpr[src/2 + 1], cpu_env, offsetof(CPUSPARCState, qt1) +
+ offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_op_store_QT0_fpr(unsigned int dst)
+{
+ tcg_gen_ld_i64(cpu_fpr[dst / 2], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.upper));
+ tcg_gen_ld_i64(cpu_fpr[dst/2 + 1], cpu_env, offsetof(CPUSPARCState, qt0) +
+ offsetof(CPU_QuadU, ll.lower));
+}
+
+static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst,
+ TCGv_i64 v1, TCGv_i64 v2)
+{
+ dst = QFPREG(dst);
+
+ tcg_gen_mov_i64(cpu_fpr[dst / 2], v1);
+ tcg_gen_mov_i64(cpu_fpr[dst / 2 + 1], v2);
+ gen_update_fprs_dirty(dc, dst);
+}
+
+#ifdef TARGET_SPARC64
+static TCGv_i64 gen_load_fpr_Q0(DisasContext *dc, unsigned int src)
+{
+ src = QFPREG(src);
+ return cpu_fpr[src / 2];
+}
+
+static TCGv_i64 gen_load_fpr_Q1(DisasContext *dc, unsigned int src)
+{
+ src = QFPREG(src);
+ return cpu_fpr[src / 2 + 1];
+}
+
+static void gen_move_Q(DisasContext *dc, unsigned int rd, unsigned int rs)
+{
+ rd = QFPREG(rd);
+ rs = QFPREG(rs);
+
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], cpu_fpr[rs / 2]);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2 + 1], cpu_fpr[rs / 2 + 1]);
+ gen_update_fprs_dirty(dc, rd);
+}
+#endif
+
+/* moves */
+#ifdef CONFIG_USER_ONLY
+#define supervisor(dc) 0
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) 0
+#endif
+#else
+#define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
+#ifdef TARGET_SPARC64
+#define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
+#else
+#endif
+#endif
+
+#ifdef TARGET_SPARC64
+#ifndef TARGET_ABI32
+#define AM_CHECK(dc) ((dc)->address_mask_32bit)
+#else
+#define AM_CHECK(dc) (1)
+#endif
+#endif
+
+static inline void gen_address_mask(DisasContext *dc, TCGv addr)
+{
+#ifdef TARGET_SPARC64
+ if (AM_CHECK(dc))
+ tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
+#endif
+}
+
+static inline TCGv gen_load_gpr(DisasContext *dc, int reg)
+{
+ if (reg > 0) {
+ assert(reg < 32);
+ return cpu_regs[reg];
+ } else {
+ TCGv t = get_temp_tl(dc);
+ tcg_gen_movi_tl(t, 0);
+ return t;
+ }
+}
+
+static inline void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
+{
+ if (reg > 0) {
+ assert(reg < 32);
+ tcg_gen_mov_tl(cpu_regs[reg], v);
+ }
+}
+
+static inline TCGv gen_dest_gpr(DisasContext *dc, int reg)
+{
+ if (reg > 0) {
+ assert(reg < 32);
+ return cpu_regs[reg];
+ } else {
+ return get_temp_tl(dc);
+ }
+}
+
+static inline bool use_goto_tb(DisasContext *s, target_ulong pc,
+ target_ulong npc)
+{
+ if (unlikely(s->singlestep)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) &&
+ (npc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *s, int tb_num,
+ target_ulong pc, target_ulong npc)
+{
+ if (use_goto_tb(s, pc, npc)) {
+ /* jump to same page: we can use a direct jump */
+ tcg_gen_goto_tb(tb_num);
+ tcg_gen_movi_tl(cpu_pc, pc);
+ tcg_gen_movi_tl(cpu_npc, npc);
+ tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
+ } else {
+ /* jump to another page: currently not optimized */
+ tcg_gen_movi_tl(cpu_pc, pc);
+ tcg_gen_movi_tl(cpu_npc, npc);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+// XXX suboptimal
+static inline void gen_mov_reg_N(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_NEG_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_Z(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_ZERO_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_V(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_OVF_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_C(TCGv reg, TCGv_i32 src)
+{
+ tcg_gen_extu_i32_tl(reg, src);
+ tcg_gen_shri_tl(reg, reg, PSR_CARRY_SHIFT);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_op_add_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static TCGv_i32 gen_add32_carry32(void)
+{
+ TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+ /* Carry is computed from a previous add: (dst < src) */
+#if TARGET_LONG_BITS == 64
+ cc_src1_32 = tcg_temp_new_i32();
+ cc_src2_32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst);
+ tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src);
+#else
+ cc_src1_32 = cpu_cc_dst;
+ cc_src2_32 = cpu_cc_src;
+#endif
+
+ carry_32 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free_i32(cc_src1_32);
+ tcg_temp_free_i32(cc_src2_32);
+#endif
+
+ return carry_32;
+}
+
+static TCGv_i32 gen_sub32_carry32(void)
+{
+ TCGv_i32 carry_32, cc_src1_32, cc_src2_32;
+
+ /* Carry is computed from a previous borrow: (src1 < src2) */
+#if TARGET_LONG_BITS == 64
+ cc_src1_32 = tcg_temp_new_i32();
+ cc_src2_32 = tcg_temp_new_i32();
+ tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src);
+ tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2);
+#else
+ cc_src1_32 = cpu_cc_src;
+ cc_src2_32 = cpu_cc_src2;
+#endif
+
+ carry_32 = tcg_temp_new_i32();
+ tcg_gen_setcond_i32(TCG_COND_LTU, carry_32, cc_src1_32, cc_src2_32);
+
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free_i32(cc_src1_32);
+ tcg_temp_free_i32(cc_src2_32);
+#endif
+
+ return carry_32;
+}
+
+static void gen_op_addx_int(DisasContext *dc, TCGv dst, TCGv src1,
+ TCGv src2, int update_cc)
+{
+ TCGv_i32 carry_32;
+ TCGv carry;
+
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain ADD. */
+ if (update_cc) {
+ gen_op_add_cc(dst, src1, src2);
+ } else {
+ tcg_gen_add_tl(dst, src1, src2);
+ }
+ return;
+
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ if (TARGET_LONG_BITS == 32) {
+ /* We can re-use the host's hardware carry generation by using
+ an ADD2 opcode. We discard the low part of the output.
+ Ideally we'd combine this operation with the add that
+ generated the carry in the first place. */
+ carry = tcg_temp_new();
+ tcg_gen_add2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+ tcg_temp_free(carry);
+ goto add_done;
+ }
+ carry_32 = gen_add32_carry32();
+ break;
+
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ carry_32 = gen_sub32_carry32();
+ break;
+
+ default:
+ /* We need external help to produce the carry. */
+ carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32, cpu_env);
+ break;
+ }
+
+#if TARGET_LONG_BITS == 64
+ carry = tcg_temp_new();
+ tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+ carry = carry_32;
+#endif
+
+ tcg_gen_add_tl(dst, src1, src2);
+ tcg_gen_add_tl(dst, dst, carry);
+
+ tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free(carry);
+#endif
+
+ add_done:
+ if (update_cc) {
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADDX);
+ dc->cc_op = CC_OP_ADDX;
+ }
+}
+
+static inline void gen_op_sub_cc(TCGv dst, TCGv src1, TCGv src2)
+{
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_sub_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static void gen_op_subx_int(DisasContext *dc, TCGv dst, TCGv src1,
+ TCGv src2, int update_cc)
+{
+ TCGv_i32 carry_32;
+ TCGv carry;
+
+ switch (dc->cc_op) {
+ case CC_OP_DIV:
+ case CC_OP_LOGIC:
+ /* Carry is known to be zero. Fall back to plain SUB. */
+ if (update_cc) {
+ gen_op_sub_cc(dst, src1, src2);
+ } else {
+ tcg_gen_sub_tl(dst, src1, src2);
+ }
+ return;
+
+ case CC_OP_ADD:
+ case CC_OP_TADD:
+ case CC_OP_TADDTV:
+ carry_32 = gen_add32_carry32();
+ break;
+
+ case CC_OP_SUB:
+ case CC_OP_TSUB:
+ case CC_OP_TSUBTV:
+ if (TARGET_LONG_BITS == 32) {
+ /* We can re-use the host's hardware carry generation by using
+ a SUB2 opcode. We discard the low part of the output.
+ Ideally we'd combine this operation with the add that
+ generated the carry in the first place. */
+ carry = tcg_temp_new();
+ tcg_gen_sub2_tl(carry, dst, cpu_cc_src, src1, cpu_cc_src2, src2);
+ tcg_temp_free(carry);
+ goto sub_done;
+ }
+ carry_32 = gen_sub32_carry32();
+ break;
+
+ default:
+ /* We need external help to produce the carry. */
+ carry_32 = tcg_temp_new_i32();
+ gen_helper_compute_C_icc(carry_32, cpu_env);
+ break;
+ }
+
+#if TARGET_LONG_BITS == 64
+ carry = tcg_temp_new();
+ tcg_gen_extu_i32_i64(carry, carry_32);
+#else
+ carry = carry_32;
+#endif
+
+ tcg_gen_sub_tl(dst, src1, src2);
+ tcg_gen_sub_tl(dst, dst, carry);
+
+ tcg_temp_free_i32(carry_32);
+#if TARGET_LONG_BITS == 64
+ tcg_temp_free(carry);
+#endif
+
+ sub_done:
+ if (update_cc) {
+ tcg_gen_mov_tl(cpu_cc_src, src1);
+ tcg_gen_mov_tl(cpu_cc_src2, src2);
+ tcg_gen_mov_tl(cpu_cc_dst, dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUBX);
+ dc->cc_op = CC_OP_SUBX;
+ }
+}
+
+static inline void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
+{
+ TCGv r_temp, zero, t0;
+
+ r_temp = tcg_temp_new();
+ t0 = tcg_temp_new();
+
+ /* old op:
+ if (!(env->y & 1))
+ T1 = 0;
+ */
+ zero = tcg_const_tl(0);
+ tcg_gen_andi_tl(cpu_cc_src, src1, 0xffffffff);
+ tcg_gen_andi_tl(r_temp, cpu_y, 0x1);
+ tcg_gen_andi_tl(cpu_cc_src2, src2, 0xffffffff);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_cc_src2, r_temp, zero,
+ zero, cpu_cc_src2);
+ tcg_temp_free(zero);
+
+ // b2 = T0 & 1;
+ // env->y = (b2 << 31) | (env->y >> 1);
+ tcg_gen_andi_tl(r_temp, cpu_cc_src, 0x1);
+ tcg_gen_shli_tl(r_temp, r_temp, 31);
+ tcg_gen_shri_tl(t0, cpu_y, 1);
+ tcg_gen_andi_tl(t0, t0, 0x7fffffff);
+ tcg_gen_or_tl(t0, t0, r_temp);
+ tcg_gen_andi_tl(cpu_y, t0, 0xffffffff);
+
+ // b1 = N ^ V;
+ gen_mov_reg_N(t0, cpu_psr);
+ gen_mov_reg_V(r_temp, cpu_psr);
+ tcg_gen_xor_tl(t0, t0, r_temp);
+ tcg_temp_free(r_temp);
+
+ // T0 = (b1 << 31) | (T0 >> 1);
+ // src1 = T0;
+ tcg_gen_shli_tl(t0, t0, 31);
+ tcg_gen_shri_tl(cpu_cc_src, cpu_cc_src, 1);
+ tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
+ tcg_temp_free(t0);
+
+ tcg_gen_add_tl(cpu_cc_dst, cpu_cc_src, cpu_cc_src2);
+
+ tcg_gen_mov_tl(dst, cpu_cc_dst);
+}
+
+static inline void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
+{
+#if TARGET_LONG_BITS == 32
+ if (sign_ext) {
+ tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
+ } else {
+ tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
+ }
+#else
+ TCGv t0 = tcg_temp_new_i64();
+ TCGv t1 = tcg_temp_new_i64();
+
+ if (sign_ext) {
+ tcg_gen_ext32s_i64(t0, src1);
+ tcg_gen_ext32s_i64(t1, src2);
+ } else {
+ tcg_gen_ext32u_i64(t0, src1);
+ tcg_gen_ext32u_i64(t1, src2);
+ }
+
+ tcg_gen_mul_i64(dst, t0, t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(t1);
+
+ tcg_gen_shri_i64(cpu_y, dst, 32);
+#endif
+}
+
+static inline void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
+{
+ /* zero-extend truncated operands before multiplication */
+ gen_op_multiply(dst, src1, src2, 0);
+}
+
+static inline void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
+{
+ /* sign-extend truncated operands before multiplication */
+ gen_op_multiply(dst, src1, src2, 1);
+}
+
+// 1
+static inline void gen_op_eval_ba(TCGv dst)
+{
+ tcg_gen_movi_tl(dst, 1);
+}
+
+// Z
+static inline void gen_op_eval_be(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(dst, src);
+}
+
+// Z | (N ^ V)
+static inline void gen_op_eval_ble(TCGv dst, TCGv_i32 src)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_N(t0, src);
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xor_tl(dst, dst, t0);
+ gen_mov_reg_Z(t0, src);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// N ^ V
+static inline void gen_op_eval_bl(TCGv dst, TCGv_i32 src)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_V(t0, src);
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xor_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// C | Z
+static inline void gen_op_eval_bleu(TCGv dst, TCGv_i32 src)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_Z(t0, src);
+ gen_mov_reg_C(dst, src);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// C
+static inline void gen_op_eval_bcs(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_C(dst, src);
+}
+
+// V
+static inline void gen_op_eval_bvs(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(dst, src);
+}
+
+// 0
+static inline void gen_op_eval_bn(TCGv dst)
+{
+ tcg_gen_movi_tl(dst, 0);
+}
+
+// N
+static inline void gen_op_eval_bneg(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(dst, src);
+}
+
+// !Z
+static inline void gen_op_eval_bne(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_Z(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(Z | (N ^ V))
+static inline void gen_op_eval_bg(TCGv dst, TCGv_i32 src)
+{
+ gen_op_eval_ble(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(N ^ V)
+static inline void gen_op_eval_bge(TCGv dst, TCGv_i32 src)
+{
+ gen_op_eval_bl(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !(C | Z)
+static inline void gen_op_eval_bgu(TCGv dst, TCGv_i32 src)
+{
+ gen_op_eval_bleu(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !C
+static inline void gen_op_eval_bcc(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_C(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !N
+static inline void gen_op_eval_bpos(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_N(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !V
+static inline void gen_op_eval_bvc(TCGv dst, TCGv_i32 src)
+{
+ gen_mov_reg_V(dst, src);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+/*
+ FPSR bit field FCC1 | FCC0:
+ 0 =
+ 1 <
+ 2 >
+ 3 unordered
+*/
+static inline void gen_mov_reg_FCC0(TCGv reg, TCGv src,
+ unsigned int fcc_offset)
+{
+ tcg_gen_shri_tl(reg, src, FSR_FCC0_SHIFT + fcc_offset);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+static inline void gen_mov_reg_FCC1(TCGv reg, TCGv src,
+ unsigned int fcc_offset)
+{
+ tcg_gen_shri_tl(reg, src, FSR_FCC1_SHIFT + fcc_offset);
+ tcg_gen_andi_tl(reg, reg, 0x1);
+}
+
+// !0: FCC0 | FCC1
+static inline void gen_op_eval_fbne(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 1 or 2: FCC0 ^ FCC1
+static inline void gen_op_eval_fblg(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_xor_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 1 or 3: FCC0
+static inline void gen_op_eval_fbul(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+}
+
+// 1: FCC0 & !FCC1
+static inline void gen_op_eval_fbl(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 2 or 3: FCC1
+static inline void gen_op_eval_fbug(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC1(dst, src, fcc_offset);
+}
+
+// 2: !FCC0 & FCC1
+static inline void gen_op_eval_fbg(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, t0, dst);
+ tcg_temp_free(t0);
+}
+
+// 3: FCC0 & FCC1
+static inline void gen_op_eval_fbu(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, t0);
+ tcg_temp_free(t0);
+}
+
+// 0: !(FCC0 | FCC1)
+static inline void gen_op_eval_fbe(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_or_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// 0 or 3: !(FCC0 ^ FCC1)
+static inline void gen_op_eval_fbue(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_xor_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// 0 or 2: !FCC0
+static inline void gen_op_eval_fbge(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !1: !(FCC0 & !FCC1)
+static inline void gen_op_eval_fbuge(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// 0 or 1: !FCC1
+static inline void gen_op_eval_fble(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ gen_mov_reg_FCC1(dst, src, fcc_offset);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+}
+
+// !2: !(!FCC0 & FCC1)
+static inline void gen_op_eval_fbule(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_andc_tl(dst, t0, dst);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+// !3: !(FCC0 & FCC1)
+static inline void gen_op_eval_fbo(TCGv dst, TCGv src,
+ unsigned int fcc_offset)
+{
+ TCGv t0 = tcg_temp_new();
+ gen_mov_reg_FCC0(dst, src, fcc_offset);
+ gen_mov_reg_FCC1(t0, src, fcc_offset);
+ tcg_gen_and_tl(dst, dst, t0);
+ tcg_gen_xori_tl(dst, dst, 0x1);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_branch2(DisasContext *dc, target_ulong pc1,
+ target_ulong pc2, TCGv r_cond)
+{
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
+
+ gen_goto_tb(dc, 0, pc1, pc1 + 4);
+
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, pc2, pc2 + 4);
+}
+
+static void gen_branch_a(DisasContext *dc, target_ulong pc1)
+{
+ TCGLabel *l1 = gen_new_label();
+ target_ulong npc = dc->npc;
+
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cond, 0, l1);
+
+ gen_goto_tb(dc, 0, npc, pc1);
+
+ gen_set_label(l1);
+ gen_goto_tb(dc, 1, npc + 4, npc + 8);
+
+ dc->is_br = 1;
+}
+
+static void gen_branch_n(DisasContext *dc, target_ulong pc1)
+{
+ target_ulong npc = dc->npc;
+
+ if (likely(npc != DYNAMIC_PC)) {
+ dc->pc = npc;
+ dc->jump_pc[0] = pc1;
+ dc->jump_pc[1] = npc + 4;
+ dc->npc = JUMP_PC;
+ } else {
+ TCGv t, z;
+
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+ t = tcg_const_tl(pc1);
+ z = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, z, t, cpu_npc);
+ tcg_temp_free(t);
+ tcg_temp_free(z);
+
+ dc->pc = DYNAMIC_PC;
+ }
+}
+
+static inline void gen_generic_branch(DisasContext *dc)
+{
+ TCGv npc0 = tcg_const_tl(dc->jump_pc[0]);
+ TCGv npc1 = tcg_const_tl(dc->jump_pc[1]);
+ TCGv zero = tcg_const_tl(0);
+
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_npc, cpu_cond, zero, npc0, npc1);
+
+ tcg_temp_free(npc0);
+ tcg_temp_free(npc1);
+ tcg_temp_free(zero);
+}
+
+/* call this function before using the condition register as it may
+ have been set for a jump */
+static inline void flush_cond(DisasContext *dc)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc);
+ dc->npc = DYNAMIC_PC;
+ }
+}
+
+static inline void save_npc(DisasContext *dc)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc);
+ dc->npc = DYNAMIC_PC;
+ } else if (dc->npc != DYNAMIC_PC) {
+ tcg_gen_movi_tl(cpu_npc, dc->npc);
+ }
+}
+
+static inline void update_psr(DisasContext *dc)
+{
+ if (dc->cc_op != CC_OP_FLAGS) {
+ dc->cc_op = CC_OP_FLAGS;
+ gen_helper_compute_psr(cpu_env);
+ }
+}
+
+static inline void save_state(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ save_npc(dc);
+}
+
+static void gen_exception(DisasContext *dc, int which)
+{
+ TCGv_i32 t;
+
+ save_state(dc);
+ t = tcg_const_i32(which);
+ gen_helper_raise_exception(cpu_env, t);
+ tcg_temp_free_i32(t);
+ dc->is_br = 1;
+}
+
+static void gen_check_align(TCGv addr, int mask)
+{
+ TCGv_i32 r_mask = tcg_const_i32(mask);
+ gen_helper_check_align(cpu_env, addr, r_mask);
+ tcg_temp_free_i32(r_mask);
+}
+
+static inline void gen_mov_pc_npc(DisasContext *dc)
+{
+ if (dc->npc == JUMP_PC) {
+ gen_generic_branch(dc);
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC;
+ } else if (dc->npc == DYNAMIC_PC) {
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ dc->pc = DYNAMIC_PC;
+ } else {
+ dc->pc = dc->npc;
+ }
+}
+
+static inline void gen_op_next_insn(void)
+{
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
+}
+
+static void free_compare(DisasCompare *cmp)
+{
+ if (!cmp->g1) {
+ tcg_temp_free(cmp->c1);
+ }
+ if (!cmp->g2) {
+ tcg_temp_free(cmp->c2);
+ }
+}
+
+static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
+ DisasContext *dc)
+{
+ static int subcc_cond[16] = {
+ TCG_COND_NEVER,
+ TCG_COND_EQ,
+ TCG_COND_LE,
+ TCG_COND_LT,
+ TCG_COND_LEU,
+ TCG_COND_LTU,
+ -1, /* neg */
+ -1, /* overflow */
+ TCG_COND_ALWAYS,
+ TCG_COND_NE,
+ TCG_COND_GT,
+ TCG_COND_GE,
+ TCG_COND_GTU,
+ TCG_COND_GEU,
+ -1, /* pos */
+ -1, /* no overflow */
+ };
+
+ static int logic_cond[16] = {
+ TCG_COND_NEVER,
+ TCG_COND_EQ, /* eq: Z */
+ TCG_COND_LE, /* le: Z | (N ^ V) -> Z | N */
+ TCG_COND_LT, /* lt: N ^ V -> N */
+ TCG_COND_EQ, /* leu: C | Z -> Z */
+ TCG_COND_NEVER, /* ltu: C -> 0 */
+ TCG_COND_LT, /* neg: N */
+ TCG_COND_NEVER, /* vs: V -> 0 */
+ TCG_COND_ALWAYS,
+ TCG_COND_NE, /* ne: !Z */
+ TCG_COND_GT, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
+ TCG_COND_GE, /* ge: !(N ^ V) -> !N */
+ TCG_COND_NE, /* gtu: !(C | Z) -> !Z */
+ TCG_COND_ALWAYS, /* geu: !C -> 1 */
+ TCG_COND_GE, /* pos: !N */
+ TCG_COND_ALWAYS, /* vc: !V -> 1 */
+ };
+
+ TCGv_i32 r_src;
+ TCGv r_dst;
+
+#ifdef TARGET_SPARC64
+ if (xcc) {
+ r_src = cpu_xcc;
+ } else {
+ r_src = cpu_psr;
+ }
+#else
+ r_src = cpu_psr;
+#endif
+
+ switch (dc->cc_op) {
+ case CC_OP_LOGIC:
+ cmp->cond = logic_cond[cond];
+ do_compare_dst_0:
+ cmp->is_bool = false;
+ cmp->g2 = false;
+ cmp->c2 = tcg_const_tl(0);
+#ifdef TARGET_SPARC64
+ if (!xcc) {
+ cmp->g1 = false;
+ cmp->c1 = tcg_temp_new();
+ tcg_gen_ext32s_tl(cmp->c1, cpu_cc_dst);
+ break;
+ }
+#endif
+ cmp->g1 = true;
+ cmp->c1 = cpu_cc_dst;
+ break;
+
+ case CC_OP_SUB:
+ switch (cond) {
+ case 6: /* neg */
+ case 14: /* pos */
+ cmp->cond = (cond == 6 ? TCG_COND_LT : TCG_COND_GE);
+ goto do_compare_dst_0;
+
+ case 7: /* overflow */
+ case 15: /* !overflow */
+ goto do_dynamic;
+
+ default:
+ cmp->cond = subcc_cond[cond];
+ cmp->is_bool = false;
+#ifdef TARGET_SPARC64
+ if (!xcc) {
+ /* Note that sign-extension works for unsigned compares as
+ long as both operands are sign-extended. */
+ cmp->g1 = cmp->g2 = false;
+ cmp->c1 = tcg_temp_new();
+ cmp->c2 = tcg_temp_new();
+ tcg_gen_ext32s_tl(cmp->c1, cpu_cc_src);
+ tcg_gen_ext32s_tl(cmp->c2, cpu_cc_src2);
+ break;
+ }
+#endif
+ cmp->g1 = cmp->g2 = true;
+ cmp->c1 = cpu_cc_src;
+ cmp->c2 = cpu_cc_src2;
+ break;
+ }
+ break;
+
+ default:
+ do_dynamic:
+ gen_helper_compute_psr(cpu_env);
+ dc->cc_op = CC_OP_FLAGS;
+ /* FALLTHRU */
+
+ case CC_OP_FLAGS:
+ /* We're going to generate a boolean result. */
+ cmp->cond = TCG_COND_NE;
+ cmp->is_bool = true;
+ cmp->g1 = cmp->g2 = false;
+ cmp->c1 = r_dst = tcg_temp_new();
+ cmp->c2 = tcg_const_tl(0);
+
+ switch (cond) {
+ case 0x0:
+ gen_op_eval_bn(r_dst);
+ break;
+ case 0x1:
+ gen_op_eval_be(r_dst, r_src);
+ break;
+ case 0x2:
+ gen_op_eval_ble(r_dst, r_src);
+ break;
+ case 0x3:
+ gen_op_eval_bl(r_dst, r_src);
+ break;
+ case 0x4:
+ gen_op_eval_bleu(r_dst, r_src);
+ break;
+ case 0x5:
+ gen_op_eval_bcs(r_dst, r_src);
+ break;
+ case 0x6:
+ gen_op_eval_bneg(r_dst, r_src);
+ break;
+ case 0x7:
+ gen_op_eval_bvs(r_dst, r_src);
+ break;
+ case 0x8:
+ gen_op_eval_ba(r_dst);
+ break;
+ case 0x9:
+ gen_op_eval_bne(r_dst, r_src);
+ break;
+ case 0xa:
+ gen_op_eval_bg(r_dst, r_src);
+ break;
+ case 0xb:
+ gen_op_eval_bge(r_dst, r_src);
+ break;
+ case 0xc:
+ gen_op_eval_bgu(r_dst, r_src);
+ break;
+ case 0xd:
+ gen_op_eval_bcc(r_dst, r_src);
+ break;
+ case 0xe:
+ gen_op_eval_bpos(r_dst, r_src);
+ break;
+ case 0xf:
+ gen_op_eval_bvc(r_dst, r_src);
+ break;
+ }
+ break;
+ }
+}
+
+static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
+{
+ unsigned int offset;
+ TCGv r_dst;
+
+ /* For now we still generate a straight boolean result. */
+ cmp->cond = TCG_COND_NE;
+ cmp->is_bool = true;
+ cmp->g1 = cmp->g2 = false;
+ cmp->c1 = r_dst = tcg_temp_new();
+ cmp->c2 = tcg_const_tl(0);
+
+ switch (cc) {
+ default:
+ case 0x0:
+ offset = 0;
+ break;
+ case 0x1:
+ offset = 32 - 10;
+ break;
+ case 0x2:
+ offset = 34 - 10;
+ break;
+ case 0x3:
+ offset = 36 - 10;
+ break;
+ }
+
+ switch (cond) {
+ case 0x0:
+ gen_op_eval_bn(r_dst);
+ break;
+ case 0x1:
+ gen_op_eval_fbne(r_dst, cpu_fsr, offset);
+ break;
+ case 0x2:
+ gen_op_eval_fblg(r_dst, cpu_fsr, offset);
+ break;
+ case 0x3:
+ gen_op_eval_fbul(r_dst, cpu_fsr, offset);
+ break;
+ case 0x4:
+ gen_op_eval_fbl(r_dst, cpu_fsr, offset);
+ break;
+ case 0x5:
+ gen_op_eval_fbug(r_dst, cpu_fsr, offset);
+ break;
+ case 0x6:
+ gen_op_eval_fbg(r_dst, cpu_fsr, offset);
+ break;
+ case 0x7:
+ gen_op_eval_fbu(r_dst, cpu_fsr, offset);
+ break;
+ case 0x8:
+ gen_op_eval_ba(r_dst);
+ break;
+ case 0x9:
+ gen_op_eval_fbe(r_dst, cpu_fsr, offset);
+ break;
+ case 0xa:
+ gen_op_eval_fbue(r_dst, cpu_fsr, offset);
+ break;
+ case 0xb:
+ gen_op_eval_fbge(r_dst, cpu_fsr, offset);
+ break;
+ case 0xc:
+ gen_op_eval_fbuge(r_dst, cpu_fsr, offset);
+ break;
+ case 0xd:
+ gen_op_eval_fble(r_dst, cpu_fsr, offset);
+ break;
+ case 0xe:
+ gen_op_eval_fbule(r_dst, cpu_fsr, offset);
+ break;
+ case 0xf:
+ gen_op_eval_fbo(r_dst, cpu_fsr, offset);
+ break;
+ }
+}
+
+static void gen_cond(TCGv r_dst, unsigned int cc, unsigned int cond,
+ DisasContext *dc)
+{
+ DisasCompare cmp;
+ gen_compare(&cmp, cc, cond, dc);
+
+ /* The interface is to return a boolean in r_dst. */
+ if (cmp.is_bool) {
+ tcg_gen_mov_tl(r_dst, cmp.c1);
+ } else {
+ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+ }
+
+ free_compare(&cmp);
+}
+
+static void gen_fcond(TCGv r_dst, unsigned int cc, unsigned int cond)
+{
+ DisasCompare cmp;
+ gen_fcompare(&cmp, cc, cond);
+
+ /* The interface is to return a boolean in r_dst. */
+ if (cmp.is_bool) {
+ tcg_gen_mov_tl(r_dst, cmp.c1);
+ } else {
+ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+ }
+
+ free_compare(&cmp);
+}
+
+#ifdef TARGET_SPARC64
+// Inverted logic
+static const int gen_tcg_cond_reg[8] = {
+ -1,
+ TCG_COND_NE,
+ TCG_COND_GT,
+ TCG_COND_GE,
+ -1,
+ TCG_COND_EQ,
+ TCG_COND_LE,
+ TCG_COND_LT,
+};
+
+static void gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
+{
+ cmp->cond = tcg_invert_cond(gen_tcg_cond_reg[cond]);
+ cmp->is_bool = false;
+ cmp->g1 = true;
+ cmp->g2 = false;
+ cmp->c1 = r_src;
+ cmp->c2 = tcg_const_tl(0);
+}
+
+static inline void gen_cond_reg(TCGv r_dst, int cond, TCGv r_src)
+{
+ DisasCompare cmp;
+ gen_compare_reg(&cmp, cond, r_src);
+
+ /* The interface is to return a boolean in r_dst. */
+ tcg_gen_setcond_tl(cmp.cond, r_dst, cmp.c1, cmp.c2);
+
+ free_compare(&cmp);
+}
+#endif
+
+static void do_branch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+{
+ unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+#ifdef TARGET_SPARC64
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+#endif
+ if (cond == 0x0) {
+ /* unconditional not taken */
+ if (a) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ } else if (cond == 0x8) {
+ /* unconditional taken */
+ if (a) {
+ dc->pc = target;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = target;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ } else {
+ flush_cond(dc);
+ gen_cond(cpu_cond, cc, cond, dc);
+ if (a) {
+ gen_branch_a(dc, target);
+ } else {
+ gen_branch_n(dc, target);
+ }
+ }
+}
+
+static void do_fbranch(DisasContext *dc, int32_t offset, uint32_t insn, int cc)
+{
+ unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+#ifdef TARGET_SPARC64
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+#endif
+ if (cond == 0x0) {
+ /* unconditional not taken */
+ if (a) {
+ dc->pc = dc->npc + 4;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->pc + 4;
+ }
+ } else if (cond == 0x8) {
+ /* unconditional taken */
+ if (a) {
+ dc->pc = target;
+ dc->npc = dc->pc + 4;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = target;
+ tcg_gen_mov_tl(cpu_pc, cpu_npc);
+ }
+ } else {
+ flush_cond(dc);
+ gen_fcond(cpu_cond, cc, cond);
+ if (a) {
+ gen_branch_a(dc, target);
+ } else {
+ gen_branch_n(dc, target);
+ }
+ }
+}
+
+#ifdef TARGET_SPARC64
+static void do_branch_reg(DisasContext *dc, int32_t offset, uint32_t insn,
+ TCGv r_reg)
+{
+ unsigned int cond = GET_FIELD_SP(insn, 25, 27), a = (insn & (1 << 29));
+ target_ulong target = dc->pc + offset;
+
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+ flush_cond(dc);
+ gen_cond_reg(cpu_cond, cond, r_reg);
+ if (a) {
+ gen_branch_a(dc, target);
+ } else {
+ gen_branch_n(dc, target);
+ }
+}
+
+static inline void gen_op_fcmps(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmps_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmps_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmps_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmpd_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmpd_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmpd_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpq(cpu_fsr, cpu_env);
+ break;
+ case 1:
+ gen_helper_fcmpq_fcc1(cpu_fsr, cpu_env);
+ break;
+ case 2:
+ gen_helper_fcmpq_fcc2(cpu_fsr, cpu_env);
+ break;
+ case 3:
+ gen_helper_fcmpq_fcc3(cpu_fsr, cpu_env);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmpes_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmpes_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmpes_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 1:
+ gen_helper_fcmped_fcc1(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 2:
+ gen_helper_fcmped_fcc2(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ case 3:
+ gen_helper_fcmped_fcc3(cpu_fsr, cpu_env, r_rs1, r_rs2);
+ break;
+ }
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+ switch (fccno) {
+ case 0:
+ gen_helper_fcmpeq(cpu_fsr, cpu_env);
+ break;
+ case 1:
+ gen_helper_fcmpeq_fcc1(cpu_fsr, cpu_env);
+ break;
+ case 2:
+ gen_helper_fcmpeq_fcc2(cpu_fsr, cpu_env);
+ break;
+ case 3:
+ gen_helper_fcmpeq_fcc3(cpu_fsr, cpu_env);
+ break;
+ }
+}
+
+#else
+
+static inline void gen_op_fcmps(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+ gen_helper_fcmps(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpd(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ gen_helper_fcmpd(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpq(int fccno)
+{
+ gen_helper_fcmpq(cpu_fsr, cpu_env);
+}
+
+static inline void gen_op_fcmpes(int fccno, TCGv r_rs1, TCGv r_rs2)
+{
+ gen_helper_fcmpes(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmped(int fccno, TCGv_i64 r_rs1, TCGv_i64 r_rs2)
+{
+ gen_helper_fcmped(cpu_fsr, cpu_env, r_rs1, r_rs2);
+}
+
+static inline void gen_op_fcmpeq(int fccno)
+{
+ gen_helper_fcmpeq(cpu_fsr, cpu_env);
+}
+#endif
+
+static void gen_op_fpexception_im(DisasContext *dc, int fsr_flags)
+{
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_NMASK);
+ tcg_gen_ori_tl(cpu_fsr, cpu_fsr, fsr_flags);
+ gen_exception(dc, TT_FP_EXCP);
+}
+
+static int gen_trap_ifnofpu(DisasContext *dc)
+{
+#if !defined(CONFIG_USER_ONLY)
+ if (!dc->fpu_enabled) {
+ gen_exception(dc, TT_NFPU_INSN);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static inline void gen_op_clear_ieee_excp_and_FTT(void)
+{
+ tcg_gen_andi_tl(cpu_fsr, cpu_fsr, FSR_FTT_CEXC_NMASK);
+}
+
+static inline void gen_fop_FF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i32 dst, src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_FF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 dst, src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, src);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 dst, src1, src2;
+
+ src1 = gen_load_fpr_F(dc, rs1);
+ src2 = gen_load_fpr_F(dc, rs2);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_FFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 dst, src1, src2;
+
+ src1 = gen_load_fpr_F(dc, rs1);
+ src2 = gen_load_fpr_F(dc, rs2);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, src1, src2);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_DD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ src = gen_load_fpr_D(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_DD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src;
+
+ src = gen_load_fpr_D(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, src);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, src1, src2);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_gsr_fop_DDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_gsr, src1, src2);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_DDDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 dst, src0, src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+ src0 = gen_load_fpr_D(dc, rd);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, src0, src1, src2);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_fop_QQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr))
+{
+ gen_op_load_fpr_QT1(QFPREG(rs));
+
+ gen(cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_ne_fop_QQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr))
+{
+ gen_op_load_fpr_QT1(QFPREG(rs));
+
+ gen(cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+#endif
+
+static inline void gen_fop_QQQ(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_ptr))
+{
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+
+ gen(cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static inline void gen_fop_DFF(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src1, src2;
+
+ src1 = gen_load_fpr_F(dc, rs1);
+ src2 = gen_load_fpr_F(dc, rs2);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_fop_QDD(DisasContext *dc, int rd, int rs1, int rs2,
+ void (*gen)(TCGv_ptr, TCGv_i64, TCGv_i64))
+{
+ TCGv_i64 src1, src2;
+
+ src1 = gen_load_fpr_D(dc, rs1);
+ src2 = gen_load_fpr_D(dc, rs2);
+
+ gen(cpu_env, src1, src2);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+#ifdef TARGET_SPARC64
+static inline void gen_fop_DF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+#endif
+
+static inline void gen_ne_fop_DF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr, TCGv_i32))
+{
+ TCGv_i64 dst;
+ TCGv_i32 src;
+
+ src = gen_load_fpr_F(dc, rs);
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env, src);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_fop_FD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr, TCGv_i64))
+{
+ TCGv_i32 dst;
+ TCGv_i64 src;
+
+ src = gen_load_fpr_D(dc, rs);
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env, src);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_FQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i32, TCGv_ptr))
+{
+ TCGv_i32 dst;
+
+ gen_op_load_fpr_QT1(QFPREG(rs));
+ dst = gen_dest_fpr_F(dc);
+
+ gen(dst, cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static inline void gen_fop_DQ(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_i64, TCGv_ptr))
+{
+ TCGv_i64 dst;
+
+ gen_op_load_fpr_QT1(QFPREG(rs));
+ dst = gen_dest_fpr_D(dc, rd);
+
+ gen(dst, cpu_env);
+ gen_helper_check_ieee_exceptions(cpu_fsr, cpu_env);
+
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static inline void gen_ne_fop_QF(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr, TCGv_i32))
+{
+ TCGv_i32 src;
+
+ src = gen_load_fpr_F(dc, rs);
+
+ gen(cpu_env, src);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static inline void gen_ne_fop_QD(DisasContext *dc, int rd, int rs,
+ void (*gen)(TCGv_ptr, TCGv_i64))
+{
+ TCGv_i64 src;
+
+ src = gen_load_fpr_D(dc, rs);
+
+ gen(cpu_env, src);
+
+ gen_op_store_QT0_fpr(QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+}
+
+static void gen_swap(DisasContext *dc, TCGv dst, TCGv src,
+ TCGv addr, int mmu_idx, TCGMemOp memop)
+{
+ gen_address_mask(dc, addr);
+ tcg_gen_atomic_xchg_tl(dst, addr, src, mmu_idx, memop);
+}
+
+static void gen_ldstub(DisasContext *dc, TCGv dst, TCGv addr, int mmu_idx)
+{
+ TCGv m1 = tcg_const_tl(0xff);
+ gen_address_mask(dc, addr);
+ tcg_gen_atomic_xchg_tl(dst, addr, m1, mmu_idx, MO_UB);
+ tcg_temp_free(m1);
+}
+
+/* asi moves */
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+typedef enum {
+ GET_ASI_HELPER,
+ GET_ASI_EXCP,
+ GET_ASI_DIRECT,
+ GET_ASI_DTWINX,
+ GET_ASI_BLOCK,
+ GET_ASI_SHORT,
+ GET_ASI_BCOPY,
+ GET_ASI_BFILL,
+} ASIType;
+
+typedef struct {
+ ASIType type;
+ int asi;
+ int mem_idx;
+ TCGMemOp memop;
+} DisasASI;
+
+static DisasASI get_asi(DisasContext *dc, int insn, TCGMemOp memop)
+{
+ int asi = GET_FIELD(insn, 19, 26);
+ ASIType type = GET_ASI_HELPER;
+ int mem_idx = dc->mem_idx;
+
+#ifndef TARGET_SPARC64
+ /* Before v9, all asis are immediate and privileged. */
+ if (IS_IMM) {
+ gen_exception(dc, TT_ILL_INSN);
+ type = GET_ASI_EXCP;
+ } else if (supervisor(dc)
+ /* Note that LEON accepts ASI_USERDATA in user mode, for
+ use with CASA. Also note that previous versions of
+ QEMU allowed (and old versions of gcc emitted) ASI_P
+ for LEON, which is incorrect. */
+ || (asi == ASI_USERDATA
+ && (dc->def->features & CPU_FEATURE_CASA))) {
+ switch (asi) {
+ case ASI_USERDATA: /* User data access */
+ mem_idx = MMU_USER_IDX;
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_KERNELDATA: /* Supervisor data access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_M_BYPASS: /* MMU passthrough */
+ case ASI_LEON_BYPASS: /* LEON MMU passthrough */
+ mem_idx = MMU_PHYS_IDX;
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_M_BCOPY: /* Block copy, sta access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_BCOPY;
+ break;
+ case ASI_M_BFILL: /* Block fill, stda access */
+ mem_idx = MMU_KERNEL_IDX;
+ type = GET_ASI_BFILL;
+ break;
+ }
+ } else {
+ gen_exception(dc, TT_PRIV_INSN);
+ type = GET_ASI_EXCP;
+ }
+#else
+ if (IS_IMM) {
+ asi = dc->asi;
+ }
+ /* With v9, all asis below 0x80 are privileged. */
+ /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
+ down that bit into DisasContext. For the moment that's ok,
+ since the direct implementations below doesn't have any ASIs
+ in the restricted [0x30, 0x7f] range, and the check will be
+ done properly in the helper. */
+ if (!supervisor(dc) && asi < 0x80) {
+ gen_exception(dc, TT_PRIV_ACT);
+ type = GET_ASI_EXCP;
+ } else {
+ switch (asi) {
+ case ASI_REAL: /* Bypass */
+ case ASI_REAL_IO: /* Bypass, non-cacheable */
+ case ASI_REAL_L: /* Bypass LE */
+ case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
+ case ASI_TWINX_REAL: /* Real address, twinx */
+ case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
+ case ASI_QUAD_LDD_PHYS:
+ case ASI_QUAD_LDD_PHYS_L:
+ mem_idx = MMU_PHYS_IDX;
+ break;
+ case ASI_N: /* Nucleus */
+ case ASI_NL: /* Nucleus LE */
+ case ASI_TWINX_N:
+ case ASI_TWINX_NL:
+ case ASI_NUCLEUS_QUAD_LDD:
+ case ASI_NUCLEUS_QUAD_LDD_L:
+ mem_idx = MMU_NUCLEUS_IDX;
+ break;
+ case ASI_AIUP: /* As if user primary */
+ case ASI_AIUPL: /* As if user primary LE */
+ case ASI_TWINX_AIUP:
+ case ASI_TWINX_AIUP_L:
+ case ASI_BLK_AIUP_4V:
+ case ASI_BLK_AIUP_L_4V:
+ case ASI_BLK_AIUP:
+ case ASI_BLK_AIUPL:
+ mem_idx = MMU_USER_IDX;
+ break;
+ case ASI_AIUS: /* As if user secondary */
+ case ASI_AIUSL: /* As if user secondary LE */
+ case ASI_TWINX_AIUS:
+ case ASI_TWINX_AIUS_L:
+ case ASI_BLK_AIUS_4V:
+ case ASI_BLK_AIUS_L_4V:
+ case ASI_BLK_AIUS:
+ case ASI_BLK_AIUSL:
+ mem_idx = MMU_USER_SECONDARY_IDX;
+ break;
+ case ASI_S: /* Secondary */
+ case ASI_SL: /* Secondary LE */
+ case ASI_TWINX_S:
+ case ASI_TWINX_SL:
+ case ASI_BLK_COMMIT_S:
+ case ASI_BLK_S:
+ case ASI_BLK_SL:
+ case ASI_FL8_S:
+ case ASI_FL8_SL:
+ case ASI_FL16_S:
+ case ASI_FL16_SL:
+ if (mem_idx == MMU_USER_IDX) {
+ mem_idx = MMU_USER_SECONDARY_IDX;
+ } else if (mem_idx == MMU_KERNEL_IDX) {
+ mem_idx = MMU_KERNEL_SECONDARY_IDX;
+ }
+ break;
+ case ASI_P: /* Primary */
+ case ASI_PL: /* Primary LE */
+ case ASI_TWINX_P:
+ case ASI_TWINX_PL:
+ case ASI_BLK_COMMIT_P:
+ case ASI_BLK_P:
+ case ASI_BLK_PL:
+ case ASI_FL8_P:
+ case ASI_FL8_PL:
+ case ASI_FL16_P:
+ case ASI_FL16_PL:
+ break;
+ }
+ switch (asi) {
+ case ASI_REAL:
+ case ASI_REAL_IO:
+ case ASI_REAL_L:
+ case ASI_REAL_IO_L:
+ case ASI_N:
+ case ASI_NL:
+ case ASI_AIUP:
+ case ASI_AIUPL:
+ case ASI_AIUS:
+ case ASI_AIUSL:
+ case ASI_S:
+ case ASI_SL:
+ case ASI_P:
+ case ASI_PL:
+ type = GET_ASI_DIRECT;
+ break;
+ case ASI_TWINX_REAL:
+ case ASI_TWINX_REAL_L:
+ case ASI_TWINX_N:
+ case ASI_TWINX_NL:
+ case ASI_TWINX_AIUP:
+ case ASI_TWINX_AIUP_L:
+ case ASI_TWINX_AIUS:
+ case ASI_TWINX_AIUS_L:
+ case ASI_TWINX_P:
+ case ASI_TWINX_PL:
+ case ASI_TWINX_S:
+ case ASI_TWINX_SL:
+ case ASI_QUAD_LDD_PHYS:
+ case ASI_QUAD_LDD_PHYS_L:
+ case ASI_NUCLEUS_QUAD_LDD:
+ case ASI_NUCLEUS_QUAD_LDD_L:
+ type = GET_ASI_DTWINX;
+ break;
+ case ASI_BLK_COMMIT_P:
+ case ASI_BLK_COMMIT_S:
+ case ASI_BLK_AIUP_4V:
+ case ASI_BLK_AIUP_L_4V:
+ case ASI_BLK_AIUP:
+ case ASI_BLK_AIUPL:
+ case ASI_BLK_AIUS_4V:
+ case ASI_BLK_AIUS_L_4V:
+ case ASI_BLK_AIUS:
+ case ASI_BLK_AIUSL:
+ case ASI_BLK_S:
+ case ASI_BLK_SL:
+ case ASI_BLK_P:
+ case ASI_BLK_PL:
+ type = GET_ASI_BLOCK;
+ break;
+ case ASI_FL8_S:
+ case ASI_FL8_SL:
+ case ASI_FL8_P:
+ case ASI_FL8_PL:
+ memop = MO_UB;
+ type = GET_ASI_SHORT;
+ break;
+ case ASI_FL16_S:
+ case ASI_FL16_SL:
+ case ASI_FL16_P:
+ case ASI_FL16_PL:
+ memop = MO_TEUW;
+ type = GET_ASI_SHORT;
+ break;
+ }
+ /* The little-endian asis all have bit 3 set. */
+ if (asi & 8) {
+ memop ^= MO_BSWAP;
+ }
+ }
+#endif
+
+ return (DisasASI){ type, asi, mem_idx, memop };
+}
+
+static void gen_ld_asi(DisasContext *dc, TCGv dst, TCGv addr,
+ int insn, TCGMemOp memop)
+{
+ DisasASI da = get_asi(dc, insn, memop);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DTWINX: /* Reserved for ldda. */
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_tl(dst, addr, da.mem_idx, da.memop);
+ break;
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(memop);
+
+ save_state(dc);
+#ifdef TARGET_SPARC64
+ gen_helper_ld_asi(dst, cpu_env, addr, r_asi, r_mop);
+#else
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ tcg_gen_trunc_i64_tl(dst, t64);
+ tcg_temp_free_i64(t64);
+ }
+#endif
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+}
+
+static void gen_st_asi(DisasContext *dc, TCGv src, TCGv addr,
+ int insn, TCGMemOp memop)
+{
+ DisasASI da = get_asi(dc, insn, memop);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DTWINX: /* Reserved for stda. */
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_tl(src, addr, da.mem_idx, da.memop);
+ break;
+#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
+ case GET_ASI_BCOPY:
+ /* Copy 32 bytes from the address in SRC to ADDR. */
+ /* ??? The original qemu code suggests 4-byte alignment, dropping
+ the low bits, but the only place I can see this used is in the
+ Linux kernel with 32 byte alignment, which would make more sense
+ as a cacheline-style operation. */
+ {
+ TCGv saddr = tcg_temp_new();
+ TCGv daddr = tcg_temp_new();
+ TCGv four = tcg_const_tl(4);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ int i;
+
+ tcg_gen_andi_tl(saddr, src, -4);
+ tcg_gen_andi_tl(daddr, addr, -4);
+ for (i = 0; i < 32; i += 4) {
+ /* Since the loads and stores are paired, allow the
+ copy to happen in the host endianness. */
+ tcg_gen_qemu_ld_i32(tmp, saddr, da.mem_idx, MO_UL);
+ tcg_gen_qemu_st_i32(tmp, daddr, da.mem_idx, MO_UL);
+ tcg_gen_add_tl(saddr, saddr, four);
+ tcg_gen_add_tl(daddr, daddr, four);
+ }
+
+ tcg_temp_free(saddr);
+ tcg_temp_free(daddr);
+ tcg_temp_free(four);
+ tcg_temp_free_i32(tmp);
+ }
+ break;
+#endif
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(memop & MO_SIZE);
+
+ save_state(dc);
+#ifdef TARGET_SPARC64
+ gen_helper_st_asi(cpu_env, addr, src, r_asi, r_mop);
+#else
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_extu_tl_i64(t64, src);
+ gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ tcg_temp_free_i64(t64);
+ }
+#endif
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+
+ /* A write to a TLB register may alter page maps. End the TB. */
+ dc->npc = DYNAMIC_PC;
+ }
+ break;
+ }
+}
+
+static void gen_swap_asi(DisasContext *dc, TCGv dst, TCGv src,
+ TCGv addr, int insn)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEUL);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DIRECT:
+ gen_swap(dc, dst, src, addr, da.mem_idx, da.memop);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+
+static void gen_cas_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEUL);
+ TCGv oldv;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ return;
+ case GET_ASI_DIRECT:
+ oldv = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
+ da.mem_idx, da.memop);
+ gen_store_gpr(dc, rd, oldv);
+ tcg_temp_free(oldv);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+
+static void gen_ldstub_asi(DisasContext *dc, TCGv dst, TCGv addr, int insn)
+{
+ DisasASI da = get_asi(dc, insn, MO_UB);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DIRECT:
+ gen_ldstub(dc, dst, addr, da.mem_idx);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+#endif
+
+#ifdef TARGET_SPARC64
+static void gen_ldf_asi(DisasContext *dc, TCGv addr,
+ int insn, int size, int rd)
+{
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ TCGv_i32 d32;
+ TCGv_i64 d64;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ switch (size) {
+ case 4:
+ d32 = gen_dest_fpr_F(dc);
+ tcg_gen_qemu_ld_i32(d32, addr, da.mem_idx, da.memop);
+ gen_store_fpr_F(dc, rd, d32);
+ break;
+ case 8:
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN_4);
+ break;
+ case 16:
+ d64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(d64, addr, da.mem_idx, da.memop | MO_ALIGN_4);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd/2+1], addr, da.mem_idx,
+ da.memop | MO_ALIGN_4);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
+ tcg_temp_free_i64(d64);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case GET_ASI_BLOCK:
+ /* Valid for lddfa on aligned registers only. */
+ if (size == 8 && (rd & 7) == 0) {
+ TCGMemOp memop;
+ TCGv eight;
+ int i;
+
+ gen_address_mask(dc, addr);
+
+ /* The first operation checks required alignment. */
+ memop = da.memop | MO_ALIGN_64;
+ eight = tcg_const_tl(8);
+ for (i = 0; ; ++i) {
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
+ da.mem_idx, memop);
+ if (i == 7) {
+ break;
+ }
+ tcg_gen_add_tl(addr, addr, eight);
+ memop = da.memop;
+ }
+ tcg_temp_free(eight);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ case GET_ASI_SHORT:
+ /* Valid for lddfa only. */
+ if (size == 8) {
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(da.memop);
+
+ save_state(dc);
+ /* According to the table in the UA2011 manual, the only
+ other asis that are valid for ldfa/lddfa/ldqfa are
+ the NO_FAULT asis. We still need a helper for these,
+ but we can just use the integer asi helper for them. */
+ switch (size) {
+ case 4:
+ d64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+ d32 = gen_dest_fpr_F(dc);
+ tcg_gen_extrl_i64_i32(d32, d64);
+ tcg_temp_free_i64(d64);
+ gen_store_fpr_F(dc, rd, d32);
+ break;
+ case 8:
+ gen_helper_ld_asi(cpu_fpr[rd / 2], cpu_env, addr, r_asi, r_mop);
+ break;
+ case 16:
+ d64 = tcg_temp_new_i64();
+ gen_helper_ld_asi(d64, cpu_env, addr, r_asi, r_mop);
+ tcg_gen_addi_tl(addr, addr, 8);
+ gen_helper_ld_asi(cpu_fpr[rd/2+1], cpu_env, addr, r_asi, r_mop);
+ tcg_gen_mov_i64(cpu_fpr[rd / 2], d64);
+ tcg_temp_free_i64(d64);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+}
+
+static void gen_stf_asi(DisasContext *dc, TCGv addr,
+ int insn, int size, int rd)
+{
+ DisasASI da = get_asi(dc, insn, (size == 4 ? MO_TEUL : MO_TEQ));
+ TCGv_i32 d32;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ switch (size) {
+ case 4:
+ d32 = gen_load_fpr_F(dc, rd);
+ tcg_gen_qemu_st_i32(d32, addr, da.mem_idx, da.memop);
+ break;
+ case 8:
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN_4);
+ break;
+ case 16:
+ /* Only 4-byte alignment required. However, it is legal for the
+ cpu to signal the alignment fault, and the OS trap handler is
+ required to fix it up. Requiring 16-byte alignment here avoids
+ having to probe the second page before performing the first
+ write. */
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx,
+ da.memop | MO_ALIGN_16);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd/2+1], addr, da.mem_idx, da.memop);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ break;
+
+ case GET_ASI_BLOCK:
+ /* Valid for stdfa on aligned registers only. */
+ if (size == 8 && (rd & 7) == 0) {
+ TCGMemOp memop;
+ TCGv eight;
+ int i;
+
+ gen_address_mask(dc, addr);
+
+ /* The first operation checks required alignment. */
+ memop = da.memop | MO_ALIGN_64;
+ eight = tcg_const_tl(8);
+ for (i = 0; ; ++i) {
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
+ da.mem_idx, memop);
+ if (i == 7) {
+ break;
+ }
+ tcg_gen_add_tl(addr, addr, eight);
+ memop = da.memop;
+ }
+ tcg_temp_free(eight);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ case GET_ASI_SHORT:
+ /* Valid for stdfa only. */
+ if (size == 8) {
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(cpu_fpr[rd / 2], addr, da.mem_idx, da.memop);
+ } else {
+ gen_exception(dc, TT_ILL_INSN);
+ }
+ break;
+
+ default:
+ /* According to the table in the UA2011 manual, the only
+ other asis that are valid for ldfa/lddfa/ldqfa are
+ the PST* asis, which aren't currently handled. */
+ gen_exception(dc, TT_ILL_INSN);
+ break;
+ }
+}
+
+static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv_i64 hi = gen_dest_gpr(dc, rd);
+ TCGv_i64 lo = gen_dest_gpr(dc, rd + 1);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ return;
+
+ case GET_ASI_DTWINX:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
+ break;
+
+ case GET_ASI_DIRECT:
+ {
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(tmp, addr, da.mem_idx, da.memop);
+
+ /* Note that LE ldda acts as if each 32-bit register
+ result is byte swapped. Having just performed one
+ 64-bit bswap, we need now to swap the writebacks. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr32_i64(lo, hi, tmp);
+ } else {
+ tcg_gen_extr32_i64(hi, lo, tmp);
+ }
+ tcg_temp_free_i64(tmp);
+ }
+ break;
+
+ default:
+ /* ??? In theory we've handled all of the ASIs that are valid
+ for ldda, and this should raise DAE_invalid_asi. However,
+ real hardware allows others. This can be seen with e.g.
+ FreeBSD 10.3 wrt ASI_IC_TAG. */
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i64 tmp = tcg_temp_new_i64();
+
+ save_state(dc);
+ gen_helper_ld_asi(tmp, cpu_env, addr, r_asi, r_mop);
+ tcg_temp_free_i32(r_asi);
+ tcg_temp_free_i32(r_mop);
+
+ /* See above. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_extr32_i64(lo, hi, tmp);
+ } else {
+ tcg_gen_extr32_i64(hi, lo, tmp);
+ }
+ tcg_temp_free_i64(tmp);
+ }
+ break;
+ }
+
+ gen_store_gpr(dc, rd, hi);
+ gen_store_gpr(dc, rd + 1, lo);
+}
+
+static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv lo = gen_load_gpr(dc, rd + 1);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+
+ case GET_ASI_DTWINX:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
+ tcg_gen_addi_tl(addr, addr, 8);
+ tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
+ break;
+
+ case GET_ASI_DIRECT:
+ {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /* Note that LE stda acts as if each 32-bit register result is
+ byte swapped. We will perform one 64-bit LE store, so now
+ we must swap the order of the construction. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat32_i64(t64, lo, hi);
+ } else {
+ tcg_gen_concat32_i64(t64, hi, lo);
+ }
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+
+ default:
+ /* ??? In theory we've handled all of the ASIs that are valid
+ for stda, and this should raise DAE_invalid_asi. */
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(da.memop);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ /* See above. */
+ if ((da.memop & MO_BSWAP) == MO_TE) {
+ tcg_gen_concat32_i64(t64, lo, hi);
+ } else {
+ tcg_gen_concat32_i64(t64, hi, lo);
+ }
+
+ save_state(dc);
+ gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+ }
+}
+
+static void gen_casx_asi(DisasContext *dc, TCGv addr, TCGv cmpv,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv oldv;
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ return;
+ case GET_ASI_DIRECT:
+ oldv = tcg_temp_new();
+ tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, gen_load_gpr(dc, rd),
+ da.mem_idx, da.memop);
+ gen_store_gpr(dc, rd, oldv);
+ tcg_temp_free(oldv);
+ break;
+ default:
+ /* ??? Should be DAE_invalid_asi. */
+ gen_exception(dc, TT_DATA_ACCESS);
+ break;
+ }
+}
+
+#elif !defined(CONFIG_USER_ONLY)
+static void gen_ldda_asi(DisasContext *dc, TCGv addr, int insn, int rd)
+{
+ /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
+ whereby "rd + 1" elicits "error: array subscript is above array".
+ Since we have already asserted that rd is even, the semantics
+ are unchanged. */
+ TCGv lo = gen_dest_gpr(dc, rd | 1);
+ TCGv hi = gen_dest_gpr(dc, rd);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ tcg_temp_free_i64(t64);
+ return;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_ld_i64(t64, addr, da.mem_idx, da.memop);
+ break;
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+
+ save_state(dc);
+ gen_helper_ld_asi(t64, cpu_env, addr, r_asi, r_mop);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+
+ tcg_gen_extr_i64_i32(lo, hi, t64);
+ tcg_temp_free_i64(t64);
+ gen_store_gpr(dc, rd | 1, lo);
+ gen_store_gpr(dc, rd, hi);
+}
+
+static void gen_stda_asi(DisasContext *dc, TCGv hi, TCGv addr,
+ int insn, int rd)
+{
+ DisasASI da = get_asi(dc, insn, MO_TEQ);
+ TCGv lo = gen_load_gpr(dc, rd + 1);
+ TCGv_i64 t64 = tcg_temp_new_i64();
+
+ tcg_gen_concat_tl_i64(t64, lo, hi);
+
+ switch (da.type) {
+ case GET_ASI_EXCP:
+ break;
+ case GET_ASI_DIRECT:
+ gen_address_mask(dc, addr);
+ tcg_gen_qemu_st_i64(t64, addr, da.mem_idx, da.memop);
+ break;
+ case GET_ASI_BFILL:
+ /* Store 32 bytes of T64 to ADDR. */
+ /* ??? The original qemu code suggests 8-byte alignment, dropping
+ the low bits, but the only place I can see this used is in the
+ Linux kernel with 32 byte alignment, which would make more sense
+ as a cacheline-style operation. */
+ {
+ TCGv d_addr = tcg_temp_new();
+ TCGv eight = tcg_const_tl(8);
+ int i;
+
+ tcg_gen_andi_tl(d_addr, addr, -8);
+ for (i = 0; i < 32; i += 8) {
+ tcg_gen_qemu_st_i64(t64, d_addr, da.mem_idx, da.memop);
+ tcg_gen_add_tl(d_addr, d_addr, eight);
+ }
+
+ tcg_temp_free(d_addr);
+ tcg_temp_free(eight);
+ }
+ break;
+ default:
+ {
+ TCGv_i32 r_asi = tcg_const_i32(da.asi);
+ TCGv_i32 r_mop = tcg_const_i32(MO_Q);
+
+ save_state(dc);
+ gen_helper_st_asi(cpu_env, addr, t64, r_asi, r_mop);
+ tcg_temp_free_i32(r_mop);
+ tcg_temp_free_i32(r_asi);
+ }
+ break;
+ }
+
+ tcg_temp_free_i64(t64);
+}
+#endif
+
+static TCGv get_src1(DisasContext *dc, unsigned int insn)
+{
+ unsigned int rs1 = GET_FIELD(insn, 13, 17);
+ return gen_load_gpr(dc, rs1);
+}
+
+static TCGv get_src2(DisasContext *dc, unsigned int insn)
+{
+ if (IS_IMM) { /* immediate */
+ target_long simm = GET_FIELDs(insn, 19, 31);
+ TCGv t = get_temp_tl(dc);
+ tcg_gen_movi_tl(t, simm);
+ return t;
+ } else { /* register */
+ unsigned int rs2 = GET_FIELD(insn, 27, 31);
+ return gen_load_gpr(dc, rs2);
+ }
+}
+
+#ifdef TARGET_SPARC64
+static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+ TCGv_i32 c32, zero, dst, s1, s2;
+
+ /* We have two choices here: extend the 32 bit data and use movcond_i64,
+ or fold the comparison down to 32 bits and use movcond_i32. Choose
+ the later. */
+ c32 = tcg_temp_new_i32();
+ if (cmp->is_bool) {
+ tcg_gen_extrl_i64_i32(c32, cmp->c1);
+ } else {
+ TCGv_i64 c64 = tcg_temp_new_i64();
+ tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2);
+ tcg_gen_extrl_i64_i32(c32, c64);
+ tcg_temp_free_i64(c64);
+ }
+
+ s1 = gen_load_fpr_F(dc, rs);
+ s2 = gen_load_fpr_F(dc, rd);
+ dst = gen_dest_fpr_F(dc);
+ zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
+
+ tcg_temp_free_i32(c32);
+ tcg_temp_free_i32(zero);
+ gen_store_fpr_F(dc, rd, dst);
+}
+
+static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+ TCGv_i64 dst = gen_dest_fpr_D(dc, rd);
+ tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, cmp->c2,
+ gen_load_fpr_D(dc, rs),
+ gen_load_fpr_D(dc, rd));
+ gen_store_fpr_D(dc, rd, dst);
+}
+
+static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
+{
+ int qd = QFPREG(rd);
+ int qs = QFPREG(rs);
+
+ tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2], cmp->c1, cmp->c2,
+ cpu_fpr[qs / 2], cpu_fpr[qd / 2]);
+ tcg_gen_movcond_i64(cmp->cond, cpu_fpr[qd / 2 + 1], cmp->c1, cmp->c2,
+ cpu_fpr[qs / 2 + 1], cpu_fpr[qd / 2 + 1]);
+
+ gen_update_fprs_dirty(dc, qd);
+}
+
+#ifndef CONFIG_USER_ONLY
+static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr, TCGv_env cpu_env)
+{
+ TCGv_i32 r_tl = tcg_temp_new_i32();
+
+ /* load env->tl into r_tl */
+ tcg_gen_ld_i32(r_tl, cpu_env, offsetof(CPUSPARCState, tl));
+
+ /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
+ tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
+
+ /* calculate offset to current trap state from env->ts, reuse r_tl */
+ tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
+ tcg_gen_addi_ptr(r_tsptr, cpu_env, offsetof(CPUSPARCState, ts));
+
+ /* tsptr = env->ts[env->tl & MAXTL_MASK] */
+ {
+ TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
+ tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
+ tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
+ tcg_temp_free_ptr(r_tl_tmp);
+ }
+
+ tcg_temp_free_i32(r_tl);
+}
+#endif
+
+static void gen_edge(DisasContext *dc, TCGv dst, TCGv s1, TCGv s2,
+ int width, bool cc, bool left)
+{
+ TCGv lo1, lo2, t1, t2;
+ uint64_t amask, tabl, tabr;
+ int shift, imask, omask;
+
+ if (cc) {
+ tcg_gen_mov_tl(cpu_cc_src, s1);
+ tcg_gen_mov_tl(cpu_cc_src2, s2);
+ tcg_gen_sub_tl(cpu_cc_dst, s1, s2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+ dc->cc_op = CC_OP_SUB;
+ }
+
+ /* Theory of operation: there are two tables, left and right (not to
+ be confused with the left and right versions of the opcode). These
+ are indexed by the low 3 bits of the inputs. To make things "easy",
+ these tables are loaded into two constants, TABL and TABR below.
+ The operation index = (input & imask) << shift calculates the index
+ into the constant, while val = (table >> index) & omask calculates
+ the value we're looking for. */
+ switch (width) {
+ case 8:
+ imask = 0x7;
+ shift = 3;
+ omask = 0xff;
+ if (left) {
+ tabl = 0x80c0e0f0f8fcfeffULL;
+ tabr = 0xff7f3f1f0f070301ULL;
+ } else {
+ tabl = 0x0103070f1f3f7fffULL;
+ tabr = 0xfffefcf8f0e0c080ULL;
+ }
+ break;
+ case 16:
+ imask = 0x6;
+ shift = 1;
+ omask = 0xf;
+ if (left) {
+ tabl = 0x8cef;
+ tabr = 0xf731;
+ } else {
+ tabl = 0x137f;
+ tabr = 0xfec8;
+ }
+ break;
+ case 32:
+ imask = 0x4;
+ shift = 0;
+ omask = 0x3;
+ if (left) {
+ tabl = (2 << 2) | 3;
+ tabr = (3 << 2) | 1;
+ } else {
+ tabl = (1 << 2) | 3;
+ tabr = (3 << 2) | 2;
+ }
+ break;
+ default:
+ abort();
+ }
+
+ lo1 = tcg_temp_new();
+ lo2 = tcg_temp_new();
+ tcg_gen_andi_tl(lo1, s1, imask);
+ tcg_gen_andi_tl(lo2, s2, imask);
+ tcg_gen_shli_tl(lo1, lo1, shift);
+ tcg_gen_shli_tl(lo2, lo2, shift);
+
+ t1 = tcg_const_tl(tabl);
+ t2 = tcg_const_tl(tabr);
+ tcg_gen_shr_tl(lo1, t1, lo1);
+ tcg_gen_shr_tl(lo2, t2, lo2);
+ tcg_gen_andi_tl(dst, lo1, omask);
+ tcg_gen_andi_tl(lo2, lo2, omask);
+
+ amask = -8;
+ if (AM_CHECK(dc)) {
+ amask &= 0xffffffffULL;
+ }
+ tcg_gen_andi_tl(s1, s1, amask);
+ tcg_gen_andi_tl(s2, s2, amask);
+
+ /* We want to compute
+ dst = (s1 == s2 ? lo1 : lo1 & lo2).
+ We've already done dst = lo1, so this reduces to
+ dst &= (s1 == s2 ? -1 : lo2)
+ Which we perform by
+ lo2 |= -(s1 == s2)
+ dst &= lo2
+ */
+ tcg_gen_setcond_tl(TCG_COND_EQ, t1, s1, s2);
+ tcg_gen_neg_tl(t1, t1);
+ tcg_gen_or_tl(lo2, lo2, t1);
+ tcg_gen_and_tl(dst, dst, lo2);
+
+ tcg_temp_free(lo1);
+ tcg_temp_free(lo2);
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+}
+
+static void gen_alignaddr(TCGv dst, TCGv s1, TCGv s2, bool left)
+{
+ TCGv tmp = tcg_temp_new();
+
+ tcg_gen_add_tl(tmp, s1, s2);
+ tcg_gen_andi_tl(dst, tmp, -8);
+ if (left) {
+ tcg_gen_neg_tl(tmp, tmp);
+ }
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
+
+ tcg_temp_free(tmp);
+}
+
+static void gen_faligndata(TCGv dst, TCGv gsr, TCGv s1, TCGv s2)
+{
+ TCGv t1, t2, shift;
+
+ t1 = tcg_temp_new();
+ t2 = tcg_temp_new();
+ shift = tcg_temp_new();
+
+ tcg_gen_andi_tl(shift, gsr, 7);
+ tcg_gen_shli_tl(shift, shift, 3);
+ tcg_gen_shl_tl(t1, s1, shift);
+
+ /* A shift of 64 does not produce 0 in TCG. Divide this into a
+ shift of (up to 63) followed by a constant shift of 1. */
+ tcg_gen_xori_tl(shift, shift, 63);
+ tcg_gen_shr_tl(t2, s2, shift);
+ tcg_gen_shri_tl(t2, t2, 1);
+
+ tcg_gen_or_tl(dst, t1, t2);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(shift);
+}
+#endif
+
+#define CHECK_IU_FEATURE(dc, FEATURE) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
+ goto illegal_insn;
+#define CHECK_FPU_FEATURE(dc, FEATURE) \
+ if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
+ goto nfpu_insn;
+
+/* before an instruction, dc->pc must be static */
+static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
+{
+ unsigned int opc, rs1, rs2, rd;
+ TCGv cpu_src1, cpu_src2;
+ TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
+ TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
+ target_long simm;
+
+ opc = GET_FIELD(insn, 0, 1);
+ rd = GET_FIELD(insn, 2, 6);
+
+ switch (opc) {
+ case 0: /* branches/sethi */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 9);
+ int32_t target;
+ switch (xop) {
+#ifdef TARGET_SPARC64
+ case 0x1: /* V9 BPcc */
+ {
+ int cc;
+
+ target = GET_FIELD_SP(insn, 0, 18);
+ target = sign_extend(target, 19);
+ target <<= 2;
+ cc = GET_FIELD_SP(insn, 20, 21);
+ if (cc == 0)
+ do_branch(dc, target, insn, 0);
+ else if (cc == 2)
+ do_branch(dc, target, insn, 1);
+ else
+ goto illegal_insn;
+ goto jmp_insn;
+ }
+ case 0x3: /* V9 BPr */
+ {
+ target = GET_FIELD_SP(insn, 0, 13) |
+ (GET_FIELD_SP(insn, 20, 21) << 14);
+ target = sign_extend(target, 16);
+ target <<= 2;
+ cpu_src1 = get_src1(dc, insn);
+ do_branch_reg(dc, target, insn, cpu_src1);
+ goto jmp_insn;
+ }
+ case 0x5: /* V9 FBPcc */
+ {
+ int cc = GET_FIELD_SP(insn, 20, 21);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ target = GET_FIELD_SP(insn, 0, 18);
+ target = sign_extend(target, 19);
+ target <<= 2;
+ do_fbranch(dc, target, insn, cc);
+ goto jmp_insn;
+ }
+#else
+ case 0x7: /* CBN+x */
+ {
+ goto ncp_insn;
+ }
+#endif
+ case 0x2: /* BN+x */
+ {
+ target = GET_FIELD(insn, 10, 31);
+ target = sign_extend(target, 22);
+ target <<= 2;
+ do_branch(dc, target, insn, 0);
+ goto jmp_insn;
+ }
+ case 0x6: /* FBN+x */
+ {
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ target = GET_FIELD(insn, 10, 31);
+ target = sign_extend(target, 22);
+ target <<= 2;
+ do_fbranch(dc, target, insn, 0);
+ goto jmp_insn;
+ }
+ case 0x4: /* SETHI */
+ /* Special-case %g0 because that's the canonical nop. */
+ if (rd) {
+ uint32_t value = GET_FIELD(insn, 10, 31);
+ TCGv t = gen_dest_gpr(dc, rd);
+ tcg_gen_movi_tl(t, value << 10);
+ gen_store_gpr(dc, rd, t);
+ }
+ break;
+ case 0x0: /* UNIMPL */
+ default:
+ goto illegal_insn;
+ }
+ break;
+ }
+ break;
+ case 1: /*CALL*/
+ {
+ target_long target = GET_FIELDs(insn, 2, 31) << 2;
+ TCGv o7 = gen_dest_gpr(dc, 15);
+
+ tcg_gen_movi_tl(o7, dc->pc);
+ gen_store_gpr(dc, 15, o7);
+ target += dc->pc;
+ gen_mov_pc_npc(dc);
+#ifdef TARGET_SPARC64
+ if (unlikely(AM_CHECK(dc))) {
+ target &= 0xffffffffULL;
+ }
+#endif
+ dc->npc = target;
+ }
+ goto jmp_insn;
+ case 2: /* FPU & Logical Operations */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 12);
+ TCGv cpu_dst = get_temp_tl(dc);
+ TCGv cpu_tmp0;
+
+ if (xop == 0x3a) { /* generate trap */
+ int cond = GET_FIELD(insn, 3, 6);
+ TCGv_i32 trap;
+ TCGLabel *l1 = NULL;
+ int mask;
+
+ if (cond == 0) {
+ /* Trap never. */
+ break;
+ }
+
+ save_state(dc);
+
+ if (cond != 8) {
+ /* Conditional trap. */
+ DisasCompare cmp;
+#ifdef TARGET_SPARC64
+ /* V9 icc/xcc */
+ int cc = GET_FIELD_SP(insn, 11, 12);
+ if (cc == 0) {
+ gen_compare(&cmp, 0, cond, dc);
+ } else if (cc == 2) {
+ gen_compare(&cmp, 1, cond, dc);
+ } else {
+ goto illegal_insn;
+ }
+#else
+ gen_compare(&cmp, 0, cond, dc);
+#endif
+ l1 = gen_new_label();
+ tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
+ cmp.c1, cmp.c2, l1);
+ free_compare(&cmp);
+ }
+
+ mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
+ ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
+
+ /* Don't use the normal temporaries, as they may well have
+ gone out of scope with the branch above. While we're
+ doing that we might as well pre-truncate to 32-bit. */
+ trap = tcg_temp_new_i32();
+
+ rs1 = GET_FIELD_SP(insn, 14, 18);
+ if (IS_IMM) {
+ rs2 = GET_FIELD_SP(insn, 0, 6);
+ if (rs1 == 0) {
+ tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
+ /* Signal that the trap value is fully constant. */
+ mask = 0;
+ } else {
+ TCGv t1 = gen_load_gpr(dc, rs1);
+ tcg_gen_trunc_tl_i32(trap, t1);
+ tcg_gen_addi_i32(trap, trap, rs2);
+ }
+ } else {
+ TCGv t1, t2;
+ rs2 = GET_FIELD_SP(insn, 0, 4);
+ t1 = gen_load_gpr(dc, rs1);
+ t2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(t1, t1, t2);
+ tcg_gen_trunc_tl_i32(trap, t1);
+ }
+ if (mask != 0) {
+ tcg_gen_andi_i32(trap, trap, mask);
+ tcg_gen_addi_i32(trap, trap, TT_TRAP);
+ }
+
+ gen_helper_raise_exception(cpu_env, trap);
+ tcg_temp_free_i32(trap);
+
+ if (cond == 8) {
+ /* An unconditional trap ends the TB. */
+ dc->is_br = 1;
+ goto jmp_insn;
+ } else {
+ /* A conditional trap falls through to the next insn. */
+ gen_set_label(l1);
+ break;
+ }
+ } else if (xop == 0x28) {
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch(rs1) {
+ case 0: /* rdy */
+#ifndef TARGET_SPARC64
+ case 0x01 ... 0x0e: /* undefined in the SPARCv8
+ manual, rdy on the microSPARC
+ II */
+ case 0x0f: /* stbar in the SPARCv8 manual,
+ rdy on the microSPARC II */
+ case 0x10 ... 0x1f: /* implementation-dependent in the
+ SPARCv8 manual, rdy on the
+ microSPARC II */
+ /* Read Asr17 */
+ if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
+ TCGv t = gen_dest_gpr(dc, rd);
+ /* Read Asr17 for a Leon3 monoprocessor */
+ tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
+ gen_store_gpr(dc, rd, t);
+ break;
+ }
+#endif
+ gen_store_gpr(dc, rd, cpu_y);
+ break;
+#ifdef TARGET_SPARC64
+ case 0x2: /* V9 rdccr */
+ update_psr(dc);
+ gen_helper_rdccr(cpu_dst, cpu_env);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x3: /* V9 rdasi */
+ tcg_gen_movi_tl(cpu_dst, dc->asi);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x4: /* V9 rdtick */
+ {
+ TCGv_ptr r_tickptr;
+ TCGv_i32 r_const;
+
+ r_tickptr = tcg_temp_new_ptr();
+ r_const = tcg_const_i32(dc->mem_idx);
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+ r_const);
+ tcg_temp_free_ptr(r_tickptr);
+ tcg_temp_free_i32(r_const);
+ gen_store_gpr(dc, rd, cpu_dst);
+ }
+ break;
+ case 0x5: /* V9 rdpc */
+ {
+ TCGv t = gen_dest_gpr(dc, rd);
+ if (unlikely(AM_CHECK(dc))) {
+ tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
+ } else {
+ tcg_gen_movi_tl(t, dc->pc);
+ }
+ gen_store_gpr(dc, rd, t);
+ }
+ break;
+ case 0x6: /* V9 rdfprs */
+ tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0xf: /* V9 membar */
+ break; /* no effect */
+ case 0x13: /* Graphics Status */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_store_gpr(dc, rd, cpu_gsr);
+ break;
+ case 0x16: /* Softint */
+ tcg_gen_ld32s_tl(cpu_dst, cpu_env,
+ offsetof(CPUSPARCState, softint));
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x17: /* Tick compare */
+ gen_store_gpr(dc, rd, cpu_tick_cmpr);
+ break;
+ case 0x18: /* System tick */
+ {
+ TCGv_ptr r_tickptr;
+ TCGv_i32 r_const;
+
+ r_tickptr = tcg_temp_new_ptr();
+ r_const = tcg_const_i32(dc->mem_idx);
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, stick));
+ gen_helper_tick_get_count(cpu_dst, cpu_env, r_tickptr,
+ r_const);
+ tcg_temp_free_ptr(r_tickptr);
+ tcg_temp_free_i32(r_const);
+ gen_store_gpr(dc, rd, cpu_dst);
+ }
+ break;
+ case 0x19: /* System tick compare */
+ gen_store_gpr(dc, rd, cpu_stick_cmpr);
+ break;
+ case 0x10: /* Performance Control */
+ case 0x11: /* Performance Instrumentation Counter */
+ case 0x12: /* Dispatch Control */
+ case 0x14: /* Softint set, WO */
+ case 0x15: /* Softint clear, WO */
+#endif
+ default:
+ goto illegal_insn;
+ }
+#if !defined(CONFIG_USER_ONLY)
+ } else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
+#ifndef TARGET_SPARC64
+ if (!supervisor(dc)) {
+ goto priv_insn;
+ }
+ update_psr(dc);
+ gen_helper_rdpsr(cpu_dst, cpu_env);
+#else
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch (rs1) {
+ case 0: // hpstate
+ // gen_op_rdhpstate();
+ break;
+ case 1: // htstate
+ // gen_op_rdhtstate();
+ break;
+ case 3: // hintp
+ tcg_gen_mov_tl(cpu_dst, cpu_hintp);
+ break;
+ case 5: // htba
+ tcg_gen_mov_tl(cpu_dst, cpu_htba);
+ break;
+ case 6: // hver
+ tcg_gen_mov_tl(cpu_dst, cpu_hver);
+ break;
+ case 31: // hstick_cmpr
+ tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
+ break;
+ default:
+ goto illegal_insn;
+ }
+#endif
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ } else if (xop == 0x2a) { /* rdwim / V9 rdpr */
+ if (!supervisor(dc)) {
+ goto priv_insn;
+ }
+ cpu_tmp0 = get_temp_tl(dc);
+#ifdef TARGET_SPARC64
+ rs1 = GET_FIELD(insn, 13, 17);
+ switch (rs1) {
+ case 0: // tpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 1: // tnpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tnpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 2: // tstate
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tstate));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 3: // tt
+ {
+ TCGv_ptr r_tsptr = tcg_temp_new_ptr();
+
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_ld32s_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tt));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 4: // tick
+ {
+ TCGv_ptr r_tickptr;
+ TCGv_i32 r_const;
+
+ r_tickptr = tcg_temp_new_ptr();
+ r_const = tcg_const_i32(dc->mem_idx);
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ gen_helper_tick_get_count(cpu_tmp0, cpu_env,
+ r_tickptr, r_const);
+ tcg_temp_free_ptr(r_tickptr);
+ tcg_temp_free_i32(r_const);
+ }
+ break;
+ case 5: // tba
+ tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
+ break;
+ case 6: // pstate
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, pstate));
+ break;
+ case 7: // tl
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, tl));
+ break;
+ case 8: // pil
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, psrpil));
+ break;
+ case 9: // cwp
+ gen_helper_rdcwp(cpu_tmp0, cpu_env);
+ break;
+ case 10: // cansave
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, cansave));
+ break;
+ case 11: // canrestore
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, canrestore));
+ break;
+ case 12: // cleanwin
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, cleanwin));
+ break;
+ case 13: // otherwin
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, otherwin));
+ break;
+ case 14: // wstate
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, wstate));
+ break;
+ case 16: // UA2005 gl
+ CHECK_IU_FEATURE(dc, GL);
+ tcg_gen_ld32s_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, gl));
+ break;
+ case 26: // UA2005 strand status
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
+ break;
+ case 31: // ver
+ tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
+ break;
+ case 15: // fq
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
+#endif
+ gen_store_gpr(dc, rd, cpu_tmp0);
+ break;
+ } else if (xop == 0x2b) { /* rdtbr / V9 flushw */
+#ifdef TARGET_SPARC64
+ gen_helper_flushw(cpu_env);
+#else
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_store_gpr(dc, rd, cpu_tbr);
+#endif
+ break;
+#endif
+ } else if (xop == 0x34) { /* FPU Operations */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_op_clear_ieee_excp_and_FTT();
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ xop = GET_FIELD(insn, 18, 26);
+
+ switch (xop) {
+ case 0x1: /* fmovs */
+ cpu_src1_32 = gen_load_fpr_F(dc, rs2);
+ gen_store_fpr_F(dc, rd, cpu_src1_32);
+ break;
+ case 0x5: /* fnegs */
+ gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
+ break;
+ case 0x9: /* fabss */
+ gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
+ break;
+ case 0x29: /* fsqrts */
+ CHECK_FPU_FEATURE(dc, FSQRT);
+ gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
+ break;
+ case 0x2a: /* fsqrtd */
+ CHECK_FPU_FEATURE(dc, FSQRT);
+ gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
+ break;
+ case 0x2b: /* fsqrtq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
+ break;
+ case 0x41: /* fadds */
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
+ break;
+ case 0x42: /* faddd */
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
+ break;
+ case 0x43: /* faddq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
+ break;
+ case 0x45: /* fsubs */
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
+ break;
+ case 0x46: /* fsubd */
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
+ break;
+ case 0x47: /* fsubq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
+ break;
+ case 0x49: /* fmuls */
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
+ break;
+ case 0x4a: /* fmuld */
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
+ break;
+ case 0x4b: /* fmulq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ CHECK_FPU_FEATURE(dc, FMUL);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
+ break;
+ case 0x4d: /* fdivs */
+ gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
+ break;
+ case 0x4e: /* fdivd */
+ gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
+ break;
+ case 0x4f: /* fdivq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
+ break;
+ case 0x69: /* fsmuld */
+ CHECK_FPU_FEATURE(dc, FSMULD);
+ gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
+ break;
+ case 0x6e: /* fdmulq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
+ break;
+ case 0xc4: /* fitos */
+ gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
+ break;
+ case 0xc6: /* fdtos */
+ gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
+ break;
+ case 0xc7: /* fqtos */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
+ break;
+ case 0xc8: /* fitod */
+ gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
+ break;
+ case 0xc9: /* fstod */
+ gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
+ break;
+ case 0xcb: /* fqtod */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
+ break;
+ case 0xcc: /* fitoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
+ break;
+ case 0xcd: /* fstoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
+ break;
+ case 0xce: /* fdtoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
+ break;
+ case 0xd1: /* fstoi */
+ gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
+ break;
+ case 0xd2: /* fdtoi */
+ gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
+ break;
+ case 0xd3: /* fqtoi */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
+ break;
+#ifdef TARGET_SPARC64
+ case 0x2: /* V9 fmovd */
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ gen_store_fpr_D(dc, rd, cpu_src1_64);
+ break;
+ case 0x3: /* V9 fmovq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_move_Q(dc, rd, rs2);
+ break;
+ case 0x6: /* V9 fnegd */
+ gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
+ break;
+ case 0x7: /* V9 fnegq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
+ break;
+ case 0xa: /* V9 fabsd */
+ gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
+ break;
+ case 0xb: /* V9 fabsq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
+ break;
+ case 0x81: /* V9 fstox */
+ gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
+ break;
+ case 0x82: /* V9 fdtox */
+ gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
+ break;
+ case 0x83: /* V9 fqtox */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
+ break;
+ case 0x84: /* V9 fxtos */
+ gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
+ break;
+ case 0x88: /* V9 fxtod */
+ gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
+ break;
+ case 0x8c: /* V9 fxtoq */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop == 0x35) { /* FPU Operations */
+#ifdef TARGET_SPARC64
+ int cond;
+#endif
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_op_clear_ieee_excp_and_FTT();
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ xop = GET_FIELD(insn, 18, 26);
+
+#ifdef TARGET_SPARC64
+#define FMOVR(sz) \
+ do { \
+ DisasCompare cmp; \
+ cond = GET_FIELD_SP(insn, 10, 12); \
+ cpu_src1 = get_src1(dc, insn); \
+ gen_compare_reg(&cmp, cond, cpu_src1); \
+ gen_fmov##sz(dc, &cmp, rd, rs2); \
+ free_compare(&cmp); \
+ } while (0)
+
+ if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
+ FMOVR(s);
+ break;
+ } else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
+ FMOVR(d);
+ break;
+ } else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVR(q);
+ break;
+ }
+#undef FMOVR
+#endif
+ switch (xop) {
+#ifdef TARGET_SPARC64
+#define FMOVCC(fcc, sz) \
+ do { \
+ DisasCompare cmp; \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_fcompare(&cmp, fcc, cond); \
+ gen_fmov##sz(dc, &cmp, rd, rs2); \
+ free_compare(&cmp); \
+ } while (0)
+
+ case 0x001: /* V9 fmovscc %fcc0 */
+ FMOVCC(0, s);
+ break;
+ case 0x002: /* V9 fmovdcc %fcc0 */
+ FMOVCC(0, d);
+ break;
+ case 0x003: /* V9 fmovqcc %fcc0 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(0, q);
+ break;
+ case 0x041: /* V9 fmovscc %fcc1 */
+ FMOVCC(1, s);
+ break;
+ case 0x042: /* V9 fmovdcc %fcc1 */
+ FMOVCC(1, d);
+ break;
+ case 0x043: /* V9 fmovqcc %fcc1 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(1, q);
+ break;
+ case 0x081: /* V9 fmovscc %fcc2 */
+ FMOVCC(2, s);
+ break;
+ case 0x082: /* V9 fmovdcc %fcc2 */
+ FMOVCC(2, d);
+ break;
+ case 0x083: /* V9 fmovqcc %fcc2 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(2, q);
+ break;
+ case 0x0c1: /* V9 fmovscc %fcc3 */
+ FMOVCC(3, s);
+ break;
+ case 0x0c2: /* V9 fmovdcc %fcc3 */
+ FMOVCC(3, d);
+ break;
+ case 0x0c3: /* V9 fmovqcc %fcc3 */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(3, q);
+ break;
+#undef FMOVCC
+#define FMOVCC(xcc, sz) \
+ do { \
+ DisasCompare cmp; \
+ cond = GET_FIELD_SP(insn, 14, 17); \
+ gen_compare(&cmp, xcc, cond, dc); \
+ gen_fmov##sz(dc, &cmp, rd, rs2); \
+ free_compare(&cmp); \
+ } while (0)
+
+ case 0x101: /* V9 fmovscc %icc */
+ FMOVCC(0, s);
+ break;
+ case 0x102: /* V9 fmovdcc %icc */
+ FMOVCC(0, d);
+ break;
+ case 0x103: /* V9 fmovqcc %icc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(0, q);
+ break;
+ case 0x181: /* V9 fmovscc %xcc */
+ FMOVCC(1, s);
+ break;
+ case 0x182: /* V9 fmovdcc %xcc */
+ FMOVCC(1, d);
+ break;
+ case 0x183: /* V9 fmovqcc %xcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ FMOVCC(1, q);
+ break;
+#undef FMOVCC
+#endif
+ case 0x51: /* fcmps, V9 %fcc */
+ cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+ cpu_src2_32 = gen_load_fpr_F(dc, rs2);
+ gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
+ break;
+ case 0x52: /* fcmpd, V9 %fcc */
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
+ break;
+ case 0x53: /* fcmpq, V9 %fcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_op_fcmpq(rd & 3);
+ break;
+ case 0x55: /* fcmpes, V9 %fcc */
+ cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+ cpu_src2_32 = gen_load_fpr_F(dc, rs2);
+ gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
+ break;
+ case 0x56: /* fcmped, V9 %fcc */
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
+ break;
+ case 0x57: /* fcmpeq, V9 %fcc */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_op_load_fpr_QT0(QFPREG(rs1));
+ gen_op_load_fpr_QT1(QFPREG(rs2));
+ gen_op_fcmpeq(rd & 3);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop == 0x2) {
+ TCGv dst = gen_dest_gpr(dc, rd);
+ rs1 = GET_FIELD(insn, 13, 17);
+ if (rs1 == 0) {
+ /* clr/mov shortcut : or %g0, x, y -> mov x, y */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_movi_tl(dst, simm);
+ gen_store_gpr(dc, rd, dst);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 == 0) {
+ tcg_gen_movi_tl(dst, 0);
+ gen_store_gpr(dc, rd, dst);
+ } else {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_store_gpr(dc, rd, cpu_src2);
+ }
+ }
+ } else {
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_ori_tl(dst, cpu_src1, simm);
+ gen_store_gpr(dc, rd, dst);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 == 0) {
+ /* mov shortcut: or x, %g0, y -> mov x, y */
+ gen_store_gpr(dc, rd, cpu_src1);
+ } else {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, dst);
+ }
+ }
+ }
+#ifdef TARGET_SPARC64
+ } else if (xop == 0x25) { /* sll, V9 sllx */
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ }
+ tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ } else if (xop == 0x26) { /* srl, V9 srlx */
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
+ tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
+ }
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ } else if (xop == 0x27) { /* sra, V9 srax */
+ cpu_src1 = get_src1(dc, insn);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ if (insn & (1 << 12)) {
+ tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
+ } else {
+ tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
+ tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (insn & (1 << 12)) {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
+ tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
+ } else {
+ tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
+ tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
+ }
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+#endif
+ } else if (xop < 0x36) {
+ if (xop < 0x20) {
+ cpu_src1 = get_src1(dc, insn);
+ cpu_src2 = get_src2(dc, insn);
+ switch (xop & ~0x10) {
+ case 0x0: /* add */
+ if (xop & 0x10) {
+ gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ } else {
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+ case 0x1: /* and */
+ tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x2: /* or */
+ tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x3: /* xor */
+ tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x4: /* sub */
+ if (xop & 0x10) {
+ gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
+ dc->cc_op = CC_OP_SUB;
+ } else {
+ tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
+ }
+ break;
+ case 0x5: /* andn */
+ tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x6: /* orn */
+ tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x7: /* xorn */
+ tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0x8: /* addx, V9 addc */
+ gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+ (xop & 0x10));
+ break;
+#ifdef TARGET_SPARC64
+ case 0x9: /* V9 mulx */
+ tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
+ break;
+#endif
+ case 0xa: /* umul */
+ CHECK_IU_FEATURE(dc, MUL);
+ gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0xb: /* smul */
+ CHECK_IU_FEATURE(dc, MUL);
+ gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
+ if (xop & 0x10) {
+ tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
+ dc->cc_op = CC_OP_LOGIC;
+ }
+ break;
+ case 0xc: /* subx, V9 subc */
+ gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
+ (xop & 0x10));
+ break;
+#ifdef TARGET_SPARC64
+ case 0xd: /* V9 udivx */
+ gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+ break;
+#endif
+ case 0xe: /* udiv */
+ CHECK_IU_FEATURE(dc, DIV);
+ if (xop & 0x10) {
+ gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ dc->cc_op = CC_OP_DIV;
+ } else {
+ gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ }
+ break;
+ case 0xf: /* sdiv */
+ CHECK_IU_FEATURE(dc, DIV);
+ if (xop & 0x10) {
+ gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ dc->cc_op = CC_OP_DIV;
+ } else {
+ gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
+ cpu_src2);
+ }
+ break;
+ default:
+ goto illegal_insn;
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ } else {
+ cpu_src1 = get_src1(dc, insn);
+ cpu_src2 = get_src2(dc, insn);
+ switch (xop) {
+ case 0x20: /* taddcc */
+ gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
+ dc->cc_op = CC_OP_TADD;
+ break;
+ case 0x21: /* tsubcc */
+ gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
+ dc->cc_op = CC_OP_TSUB;
+ break;
+ case 0x22: /* taddcctv */
+ gen_helper_taddcctv(cpu_dst, cpu_env,
+ cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ dc->cc_op = CC_OP_TADDTV;
+ break;
+ case 0x23: /* tsubcctv */
+ gen_helper_tsubcctv(cpu_dst, cpu_env,
+ cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ dc->cc_op = CC_OP_TSUBTV;
+ break;
+ case 0x24: /* mulscc */
+ update_psr(dc);
+ gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
+ dc->cc_op = CC_OP_ADD;
+ break;
+#ifndef TARGET_SPARC64
+ case 0x25: /* sll */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x26: /* srl */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x27: /* sra */
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 20, 31);
+ tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
+ } else { /* register */
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
+ tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
+ }
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+#endif
+ case 0x30:
+ {
+ cpu_tmp0 = get_temp_tl(dc);
+ switch(rd) {
+ case 0: /* wry */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
+ break;
+#ifndef TARGET_SPARC64
+ case 0x01 ... 0x0f: /* undefined in the
+ SPARCv8 manual, nop
+ on the microSPARC
+ II */
+ case 0x10 ... 0x1f: /* implementation-dependent
+ in the SPARCv8
+ manual, nop on the
+ microSPARC II */
+ if ((rd == 0x13) && (dc->def->features &
+ CPU_FEATURE_POWERDOWN)) {
+ /* LEON3 power-down */
+ save_state(dc);
+ gen_helper_power_down(cpu_env);
+ }
+ break;
+#else
+ case 0x2: /* V9 wrccr */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_wrccr(cpu_env, cpu_tmp0);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ break;
+ case 0x3: /* V9 wrasi */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xff);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, asi));
+ /* End TB to notice changed ASI. */
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ break;
+ case 0x6: /* V9 wrfprs */
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ tcg_gen_trunc_tl_i32(cpu_fprs, cpu_tmp0);
+ dc->fprs_dirty = 0;
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ break;
+ case 0xf: /* V9 sir, nop if user */
+#if !defined(CONFIG_USER_ONLY)
+ if (supervisor(dc)) {
+ ; // XXX
+ }
+#endif
+ break;
+ case 0x13: /* Graphics Status */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
+ break;
+ case 0x14: /* Softint set */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_set_softint(cpu_env, cpu_tmp0);
+ break;
+ case 0x15: /* Softint clear */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_clear_softint(cpu_env, cpu_tmp0);
+ break;
+ case 0x16: /* Softint write */
+ if (!supervisor(dc))
+ goto illegal_insn;
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_write_softint(cpu_env, cpu_tmp0);
+ break;
+ case 0x17: /* Tick compare */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_tick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 0x18: /* System tick */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, stick));
+ gen_helper_tick_set_count(r_tickptr,
+ cpu_tmp0);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 0x19: /* System tick compare */
+#if !defined(CONFIG_USER_ONLY)
+ if (!supervisor(dc))
+ goto illegal_insn;
+#endif
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
+ cpu_src2);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, stick));
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_stick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+
+ case 0x10: /* Performance Control */
+ case 0x11: /* Performance Instrumentation
+ Counter */
+ case 0x12: /* Dispatch Control */
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+#if !defined(CONFIG_USER_ONLY)
+ case 0x31: /* wrpsr, V9 saved, restored */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+#ifdef TARGET_SPARC64
+ switch (rd) {
+ case 0:
+ gen_helper_saved(cpu_env);
+ break;
+ case 1:
+ gen_helper_restored(cpu_env);
+ break;
+ case 2: /* UA2005 allclean */
+ case 3: /* UA2005 otherw */
+ case 4: /* UA2005 normalw */
+ case 5: /* UA2005 invalw */
+ // XXX
+ default:
+ goto illegal_insn;
+ }
+#else
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ gen_helper_wrpsr(cpu_env, cpu_tmp0);
+ tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
+ dc->cc_op = CC_OP_FLAGS;
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+#endif
+ }
+ break;
+ case 0x32: /* wrwim, V9 wrpr */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+#ifdef TARGET_SPARC64
+ switch (rd) {
+ case 0: // tpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 1: // tnpc
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tnpc));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 2: // tstate
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state,
+ tstate));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 3: // tt
+ {
+ TCGv_ptr r_tsptr;
+
+ r_tsptr = tcg_temp_new_ptr();
+ gen_load_trap_state_at_tl(r_tsptr, cpu_env);
+ tcg_gen_st32_tl(cpu_tmp0, r_tsptr,
+ offsetof(trap_state, tt));
+ tcg_temp_free_ptr(r_tsptr);
+ }
+ break;
+ case 4: // tick
+ {
+ TCGv_ptr r_tickptr;
+
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, tick));
+ gen_helper_tick_set_count(r_tickptr,
+ cpu_tmp0);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 5: // tba
+ tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
+ break;
+ case 6: // pstate
+ save_state(dc);
+ gen_helper_wrpstate(cpu_env, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 7: // tl
+ save_state(dc);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, tl));
+ dc->npc = DYNAMIC_PC;
+ break;
+ case 8: // pil
+ gen_helper_wrpil(cpu_env, cpu_tmp0);
+ break;
+ case 9: // cwp
+ gen_helper_wrcwp(cpu_env, cpu_tmp0);
+ break;
+ case 10: // cansave
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ cansave));
+ break;
+ case 11: // canrestore
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ canrestore));
+ break;
+ case 12: // cleanwin
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ cleanwin));
+ break;
+ case 13: // otherwin
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ otherwin));
+ break;
+ case 14: // wstate
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState,
+ wstate));
+ break;
+ case 16: // UA2005 gl
+ CHECK_IU_FEATURE(dc, GL);
+ tcg_gen_st32_tl(cpu_tmp0, cpu_env,
+ offsetof(CPUSPARCState, gl));
+ break;
+ case 26: // UA2005 strand status
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
+ break;
+ default:
+ goto illegal_insn;
+ }
+#else
+ tcg_gen_trunc_tl_i32(cpu_wim, cpu_tmp0);
+ if (dc->def->nwindows != 32) {
+ tcg_gen_andi_tl(cpu_wim, cpu_wim,
+ (1 << dc->def->nwindows) - 1);
+ }
+#endif
+ }
+ break;
+ case 0x33: /* wrtbr, UA2005 wrhpr */
+ {
+#ifndef TARGET_SPARC64
+ if (!supervisor(dc))
+ goto priv_insn;
+ tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
+#else
+ CHECK_IU_FEATURE(dc, HYPV);
+ if (!hypervisor(dc))
+ goto priv_insn;
+ cpu_tmp0 = get_temp_tl(dc);
+ tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ switch (rd) {
+ case 0: // hpstate
+ // XXX gen_op_wrhpstate();
+ save_state(dc);
+ gen_op_next_insn();
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ break;
+ case 1: // htstate
+ // XXX gen_op_wrhtstate();
+ break;
+ case 3: // hintp
+ tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
+ break;
+ case 5: // htba
+ tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
+ break;
+ case 31: // hstick_cmpr
+ {
+ TCGv_ptr r_tickptr;
+
+ tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
+ r_tickptr = tcg_temp_new_ptr();
+ tcg_gen_ld_ptr(r_tickptr, cpu_env,
+ offsetof(CPUSPARCState, hstick));
+ gen_helper_tick_set_limit(r_tickptr,
+ cpu_hstick_cmpr);
+ tcg_temp_free_ptr(r_tickptr);
+ }
+ break;
+ case 6: // hver readonly
+ default:
+ goto illegal_insn;
+ }
+#endif
+ }
+ break;
+#endif
+#ifdef TARGET_SPARC64
+ case 0x2c: /* V9 movcc */
+ {
+ int cc = GET_FIELD_SP(insn, 11, 12);
+ int cond = GET_FIELD_SP(insn, 14, 17);
+ DisasCompare cmp;
+ TCGv dst;
+
+ if (insn & (1 << 18)) {
+ if (cc == 0) {
+ gen_compare(&cmp, 0, cond, dc);
+ } else if (cc == 2) {
+ gen_compare(&cmp, 1, cond, dc);
+ } else {
+ goto illegal_insn;
+ }
+ } else {
+ gen_fcompare(&cmp, cc, cond);
+ }
+
+ /* The get_src2 above loaded the normal 13-bit
+ immediate field, not the 11-bit field we have
+ in movcc. But it did handle the reg case. */
+ if (IS_IMM) {
+ simm = GET_FIELD_SPs(insn, 0, 10);
+ tcg_gen_movi_tl(cpu_src2, simm);
+ }
+
+ dst = gen_load_gpr(dc, rd);
+ tcg_gen_movcond_tl(cmp.cond, dst,
+ cmp.c1, cmp.c2,
+ cpu_src2, dst);
+ free_compare(&cmp);
+ gen_store_gpr(dc, rd, dst);
+ break;
+ }
+ case 0x2d: /* V9 sdivx */
+ gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x2e: /* V9 popc */
+ gen_helper_popc(cpu_dst, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x2f: /* V9 movr */
+ {
+ int cond = GET_FIELD_SP(insn, 10, 12);
+ DisasCompare cmp;
+ TCGv dst;
+
+ gen_compare_reg(&cmp, cond, cpu_src1);
+
+ /* The get_src2 above loaded the normal 13-bit
+ immediate field, not the 10-bit field we have
+ in movr. But it did handle the reg case. */
+ if (IS_IMM) {
+ simm = GET_FIELD_SPs(insn, 0, 9);
+ tcg_gen_movi_tl(cpu_src2, simm);
+ }
+
+ dst = gen_load_gpr(dc, rd);
+ tcg_gen_movcond_tl(cmp.cond, dst,
+ cmp.c1, cmp.c2,
+ cpu_src2, dst);
+ free_compare(&cmp);
+ gen_store_gpr(dc, rd, dst);
+ break;
+ }
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ } else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
+#ifdef TARGET_SPARC64
+ int opf = GET_FIELD_SP(insn, 5, 13);
+ rs1 = GET_FIELD(insn, 13, 17);
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+
+ switch (opf) {
+ case 0x000: /* VIS I edge8cc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x001: /* VIS II edge8n */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x002: /* VIS I edge8lcc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x003: /* VIS II edge8ln */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x004: /* VIS I edge16cc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x005: /* VIS II edge16n */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x006: /* VIS I edge16lcc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x007: /* VIS II edge16ln */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x008: /* VIS I edge32cc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x009: /* VIS II edge32n */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x00a: /* VIS I edge32lcc */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x00b: /* VIS II edge32ln */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x010: /* VIS I array8 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x012: /* VIS I array16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x014: /* VIS I array32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x018: /* VIS I alignaddr */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x01a: /* VIS I alignaddrl */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x019: /* VIS II bmask */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ cpu_src1 = gen_load_gpr(dc, rs1);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
+ tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x020: /* VIS I fcmple16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x022: /* VIS I fcmpne16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x024: /* VIS I fcmple32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x026: /* VIS I fcmpne32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x028: /* VIS I fcmpgt16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x02a: /* VIS I fcmpeq16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x02c: /* VIS I fcmpgt32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x02e: /* VIS I fcmpeq32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ cpu_src2_64 = gen_load_fpr_D(dc, rs2);
+ gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
+ gen_store_gpr(dc, rd, cpu_dst);
+ break;
+ case 0x031: /* VIS I fmul8x16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
+ break;
+ case 0x033: /* VIS I fmul8x16au */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
+ break;
+ case 0x035: /* VIS I fmul8x16al */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
+ break;
+ case 0x036: /* VIS I fmul8sux16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
+ break;
+ case 0x037: /* VIS I fmul8ulx16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
+ break;
+ case 0x038: /* VIS I fmuld8sux16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
+ break;
+ case 0x039: /* VIS I fmuld8ulx16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
+ break;
+ case 0x03a: /* VIS I fpack32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
+ break;
+ case 0x03b: /* VIS I fpack16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x03d: /* VIS I fpackfix */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x03e: /* VIS I pdist */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
+ break;
+ case 0x048: /* VIS I faligndata */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
+ break;
+ case 0x04b: /* VIS I fpmerge */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
+ break;
+ case 0x04c: /* VIS II bshuffle */
+ CHECK_FPU_FEATURE(dc, VIS2);
+ gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
+ break;
+ case 0x04d: /* VIS I fexpand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
+ break;
+ case 0x050: /* VIS I fpadd16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
+ break;
+ case 0x051: /* VIS I fpadd16s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
+ break;
+ case 0x052: /* VIS I fpadd32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
+ break;
+ case 0x053: /* VIS I fpadd32s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
+ break;
+ case 0x054: /* VIS I fpsub16 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
+ break;
+ case 0x055: /* VIS I fpsub16s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
+ break;
+ case 0x056: /* VIS I fpsub32 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
+ break;
+ case 0x057: /* VIS I fpsub32s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
+ break;
+ case 0x060: /* VIS I fzero */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+ tcg_gen_movi_i64(cpu_dst_64, 0);
+ gen_store_fpr_D(dc, rd, cpu_dst_64);
+ break;
+ case 0x061: /* VIS I fzeros */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ tcg_gen_movi_i32(cpu_dst_32, 0);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x062: /* VIS I fnor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
+ break;
+ case 0x063: /* VIS I fnors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
+ break;
+ case 0x064: /* VIS I fandnot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
+ break;
+ case 0x065: /* VIS I fandnot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
+ break;
+ case 0x066: /* VIS I fnot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
+ break;
+ case 0x067: /* VIS I fnot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
+ break;
+ case 0x068: /* VIS I fandnot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
+ break;
+ case 0x069: /* VIS I fandnot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
+ break;
+ case 0x06a: /* VIS I fnot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
+ break;
+ case 0x06b: /* VIS I fnot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
+ break;
+ case 0x06c: /* VIS I fxor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
+ break;
+ case 0x06d: /* VIS I fxors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
+ break;
+ case 0x06e: /* VIS I fnand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
+ break;
+ case 0x06f: /* VIS I fnands */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
+ break;
+ case 0x070: /* VIS I fand */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
+ break;
+ case 0x071: /* VIS I fands */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
+ break;
+ case 0x072: /* VIS I fxnor */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
+ break;
+ case 0x073: /* VIS I fxnors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
+ break;
+ case 0x074: /* VIS I fsrc1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs1);
+ gen_store_fpr_D(dc, rd, cpu_src1_64);
+ break;
+ case 0x075: /* VIS I fsrc1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_32 = gen_load_fpr_F(dc, rs1);
+ gen_store_fpr_F(dc, rd, cpu_src1_32);
+ break;
+ case 0x076: /* VIS I fornot2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
+ break;
+ case 0x077: /* VIS I fornot2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
+ break;
+ case 0x078: /* VIS I fsrc2 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_64 = gen_load_fpr_D(dc, rs2);
+ gen_store_fpr_D(dc, rd, cpu_src1_64);
+ break;
+ case 0x079: /* VIS I fsrc2s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_src1_32 = gen_load_fpr_F(dc, rs2);
+ gen_store_fpr_F(dc, rd, cpu_src1_32);
+ break;
+ case 0x07a: /* VIS I fornot1 */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
+ break;
+ case 0x07b: /* VIS I fornot1s */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
+ break;
+ case 0x07c: /* VIS I for */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
+ break;
+ case 0x07d: /* VIS I fors */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
+ break;
+ case 0x07e: /* VIS I fone */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+ tcg_gen_movi_i64(cpu_dst_64, -1);
+ gen_store_fpr_D(dc, rd, cpu_dst_64);
+ break;
+ case 0x07f: /* VIS I fones */
+ CHECK_FPU_FEATURE(dc, VIS1);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ tcg_gen_movi_i32(cpu_dst_32, -1);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x080: /* VIS I shutdown */
+ case 0x081: /* VIS II siam */
+ // XXX
+ goto illegal_insn;
+ default:
+ goto illegal_insn;
+ }
+#else
+ goto ncp_insn;
+#endif
+ } else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
+#ifdef TARGET_SPARC64
+ goto illegal_insn;
+#else
+ goto ncp_insn;
+#endif
+#ifdef TARGET_SPARC64
+ } else if (xop == 0x39) { /* V9 return */
+ save_state(dc);
+ cpu_src1 = get_src1(dc, insn);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2) {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ } else {
+ tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
+ }
+ }
+ gen_helper_restore(cpu_env);
+ gen_mov_pc_npc(dc);
+ gen_check_align(cpu_tmp0, 3);
+ tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ goto jmp_insn;
+#endif
+ } else {
+ cpu_src1 = get_src1(dc, insn);
+ cpu_tmp0 = get_temp_tl(dc);
+ if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ tcg_gen_addi_tl(cpu_tmp0, cpu_src1, simm);
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2) {
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ tcg_gen_add_tl(cpu_tmp0, cpu_src1, cpu_src2);
+ } else {
+ tcg_gen_mov_tl(cpu_tmp0, cpu_src1);
+ }
+ }
+ switch (xop) {
+ case 0x38: /* jmpl */
+ {
+ TCGv t = gen_dest_gpr(dc, rd);
+ tcg_gen_movi_tl(t, dc->pc);
+ gen_store_gpr(dc, rd, t);
+
+ gen_mov_pc_npc(dc);
+ gen_check_align(cpu_tmp0, 3);
+ gen_address_mask(dc, cpu_tmp0);
+ tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ }
+ goto jmp_insn;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ case 0x39: /* rett, V9 return */
+ {
+ if (!supervisor(dc))
+ goto priv_insn;
+ gen_mov_pc_npc(dc);
+ gen_check_align(cpu_tmp0, 3);
+ tcg_gen_mov_tl(cpu_npc, cpu_tmp0);
+ dc->npc = DYNAMIC_PC;
+ gen_helper_rett(cpu_env);
+ }
+ goto jmp_insn;
+#endif
+ case 0x3b: /* flush */
+ if (!((dc)->def->features & CPU_FEATURE_FLUSH))
+ goto unimp_flush;
+ /* nop */
+ break;
+ case 0x3c: /* save */
+ gen_helper_save(cpu_env);
+ gen_store_gpr(dc, rd, cpu_tmp0);
+ break;
+ case 0x3d: /* restore */
+ gen_helper_restore(cpu_env);
+ gen_store_gpr(dc, rd, cpu_tmp0);
+ break;
+#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
+ case 0x3e: /* V9 done/retry */
+ {
+ switch (rd) {
+ case 0:
+ if (!supervisor(dc))
+ goto priv_insn;
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ gen_helper_done(cpu_env);
+ goto jmp_insn;
+ case 1:
+ if (!supervisor(dc))
+ goto priv_insn;
+ dc->npc = DYNAMIC_PC;
+ dc->pc = DYNAMIC_PC;
+ gen_helper_retry(cpu_env);
+ goto jmp_insn;
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ }
+ break;
+ }
+ break;
+ case 3: /* load/store instructions */
+ {
+ unsigned int xop = GET_FIELD(insn, 7, 12);
+ /* ??? gen_address_mask prevents us from using a source
+ register directly. Always generate a temporary. */
+ TCGv cpu_addr = get_temp_tl(dc);
+
+ tcg_gen_mov_tl(cpu_addr, get_src1(dc, insn));
+ if (xop == 0x3c || xop == 0x3e) {
+ /* V9 casa/casxa : no offset */
+ } else if (IS_IMM) { /* immediate */
+ simm = GET_FIELDs(insn, 19, 31);
+ if (simm != 0) {
+ tcg_gen_addi_tl(cpu_addr, cpu_addr, simm);
+ }
+ } else { /* register */
+ rs2 = GET_FIELD(insn, 27, 31);
+ if (rs2 != 0) {
+ tcg_gen_add_tl(cpu_addr, cpu_addr, gen_load_gpr(dc, rs2));
+ }
+ }
+ if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
+ (xop > 0x17 && xop <= 0x1d ) ||
+ (xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
+ TCGv cpu_val = gen_dest_gpr(dc, rd);
+
+ switch (xop) {
+ case 0x0: /* ld, V9 lduw, load unsigned word */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x1: /* ldub, load unsigned byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x2: /* lduh, load unsigned halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x3: /* ldd, load double word */
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ TCGv_i64 t64;
+
+ gen_address_mask(dc, cpu_addr);
+ t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld64(t64, cpu_addr, dc->mem_idx);
+ tcg_gen_trunc_i64_tl(cpu_val, t64);
+ tcg_gen_ext32u_tl(cpu_val, cpu_val);
+ gen_store_gpr(dc, rd + 1, cpu_val);
+ tcg_gen_shri_i64(t64, t64, 32);
+ tcg_gen_trunc_i64_tl(cpu_val, t64);
+ tcg_temp_free_i64(t64);
+ tcg_gen_ext32u_tl(cpu_val, cpu_val);
+ }
+ break;
+ case 0x9: /* ldsb, load signed byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0xa: /* ldsh, load signed halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0xd: /* ldstub */
+ gen_ldstub(dc, cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x0f:
+ /* swap, swap register with memory. Also atomically */
+ CHECK_IU_FEATURE(dc, SWAP);
+ cpu_src1 = gen_load_gpr(dc, rd);
+ gen_swap(dc, cpu_val, cpu_src1, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x10: /* lda, V9 lduwa, load word alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
+ break;
+ case 0x11: /* lduba, load unsigned byte alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
+ break;
+ case 0x12: /* lduha, load unsigned halfword alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
+ break;
+ case 0x13: /* ldda, load double word alternate */
+ if (rd & 1) {
+ goto illegal_insn;
+ }
+ gen_ldda_asi(dc, cpu_addr, insn, rd);
+ goto skip_move;
+ case 0x19: /* ldsba, load signed byte alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_SB);
+ break;
+ case 0x1a: /* ldsha, load signed halfword alternate */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESW);
+ break;
+ case 0x1d: /* ldstuba -- XXX: should be atomically */
+ gen_ldstub_asi(dc, cpu_val, cpu_addr, insn);
+ break;
+ case 0x1f: /* swapa, swap reg with alt. memory. Also
+ atomically */
+ CHECK_IU_FEATURE(dc, SWAP);
+ cpu_src1 = gen_load_gpr(dc, rd);
+ gen_swap_asi(dc, cpu_val, cpu_src1, cpu_addr, insn);
+ break;
+
+#ifndef TARGET_SPARC64
+ case 0x30: /* ldc */
+ case 0x31: /* ldcsr */
+ case 0x33: /* lddc */
+ goto ncp_insn;
+#endif
+#endif
+#ifdef TARGET_SPARC64
+ case 0x08: /* V9 ldsw */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x0b: /* V9 ldx */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x18: /* V9 ldswa */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TESL);
+ break;
+ case 0x1b: /* V9 ldxa */
+ gen_ld_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ break;
+ case 0x2d: /* V9 prefetch, no effect */
+ goto skip_move;
+ case 0x30: /* V9 ldfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_ldf_asi(dc, cpu_addr, insn, 4, rd);
+ gen_update_fprs_dirty(dc, rd);
+ goto skip_move;
+ case 0x33: /* V9 lddfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_ldf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
+ gen_update_fprs_dirty(dc, DFPREG(rd));
+ goto skip_move;
+ case 0x3d: /* V9 prefetcha, no effect */
+ goto skip_move;
+ case 0x32: /* V9 ldqfa */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_ldf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
+ gen_update_fprs_dirty(dc, QFPREG(rd));
+ goto skip_move;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ gen_store_gpr(dc, rd, cpu_val);
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ skip_move: ;
+#endif
+ } else if (xop >= 0x20 && xop < 0x24) {
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ switch (xop) {
+ case 0x20: /* ldf, load fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_dst_32 = gen_dest_fpr_F(dc);
+ tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ gen_store_fpr_F(dc, rd, cpu_dst_32);
+ break;
+ case 0x21: /* ldfsr, V9 ldxfsr */
+#ifdef TARGET_SPARC64
+ gen_address_mask(dc, cpu_addr);
+ if (rd == 1) {
+ TCGv_i64 t64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(t64, cpu_addr,
+ dc->mem_idx, MO_TEQ);
+ gen_helper_ldxfsr(cpu_fsr, cpu_env, cpu_fsr, t64);
+ tcg_temp_free_i64(t64);
+ break;
+ }
+#endif
+ cpu_dst_32 = get_temp_i32(dc);
+ tcg_gen_qemu_ld_i32(cpu_dst_32, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ gen_helper_ldfsr(cpu_fsr, cpu_env, cpu_fsr, cpu_dst_32);
+ break;
+ case 0x22: /* ldqf, load quad fpreg */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_address_mask(dc, cpu_addr);
+ cpu_src1_64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
+ cpu_src2_64 = tcg_temp_new_i64();
+ tcg_gen_qemu_ld_i64(cpu_src2_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ gen_store_fpr_Q(dc, rd, cpu_src1_64, cpu_src2_64);
+ tcg_temp_free_i64(cpu_src1_64);
+ tcg_temp_free_i64(cpu_src2_64);
+ break;
+ case 0x23: /* lddf, load double fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_dst_64 = gen_dest_fpr_D(dc, rd);
+ tcg_gen_qemu_ld_i64(cpu_dst_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ gen_store_fpr_D(dc, rd, cpu_dst_64);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
+ xop == 0xe || xop == 0x1e) {
+ TCGv cpu_val = gen_load_gpr(dc, rd);
+
+ switch (xop) {
+ case 0x4: /* st, store word */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x5: /* stb, store byte */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x6: /* sth, store halfword */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x7: /* std, store double word */
+ if (rd & 1)
+ goto illegal_insn;
+ else {
+ TCGv_i64 t64;
+ TCGv lo;
+
+ gen_address_mask(dc, cpu_addr);
+ lo = gen_load_gpr(dc, rd + 1);
+ t64 = tcg_temp_new_i64();
+ tcg_gen_concat_tl_i64(t64, lo, cpu_val);
+ tcg_gen_qemu_st64(t64, cpu_addr, dc->mem_idx);
+ tcg_temp_free_i64(t64);
+ }
+ break;
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x14: /* sta, V9 stwa, store word alternate */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUL);
+ break;
+ case 0x15: /* stba, store byte alternate */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_UB);
+ break;
+ case 0x16: /* stha, store halfword alternate */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEUW);
+ break;
+ case 0x17: /* stda, store double word alternate */
+ if (rd & 1) {
+ goto illegal_insn;
+ }
+ gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
+ break;
+#endif
+#ifdef TARGET_SPARC64
+ case 0x0e: /* V9 stx */
+ gen_address_mask(dc, cpu_addr);
+ tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
+ break;
+ case 0x1e: /* V9 stxa */
+ gen_st_asi(dc, cpu_val, cpu_addr, insn, MO_TEQ);
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop > 0x23 && xop < 0x28) {
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ switch (xop) {
+ case 0x24: /* stf, store fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_src1_32 = gen_load_fpr_F(dc, rd);
+ tcg_gen_qemu_st_i32(cpu_src1_32, cpu_addr,
+ dc->mem_idx, MO_TEUL);
+ break;
+ case 0x25: /* stfsr, V9 stxfsr */
+ {
+#ifdef TARGET_SPARC64
+ gen_address_mask(dc, cpu_addr);
+ if (rd == 1) {
+ tcg_gen_qemu_st64(cpu_fsr, cpu_addr, dc->mem_idx);
+ break;
+ }
+#endif
+ tcg_gen_qemu_st32(cpu_fsr, cpu_addr, dc->mem_idx);
+ }
+ break;
+ case 0x26:
+#ifdef TARGET_SPARC64
+ /* V9 stqf, store quad fpreg */
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ gen_address_mask(dc, cpu_addr);
+ /* ??? While stqf only requires 4-byte alignment, it is
+ legal for the cpu to signal the unaligned exception.
+ The OS trap handler is then required to fix it up.
+ For qemu, this avoids having to probe the second page
+ before performing the first write. */
+ cpu_src1_64 = gen_load_fpr_Q0(dc, rd);
+ tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
+ dc->mem_idx, MO_TEQ | MO_ALIGN_16);
+ tcg_gen_addi_tl(cpu_addr, cpu_addr, 8);
+ cpu_src2_64 = gen_load_fpr_Q1(dc, rd);
+ tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr,
+ dc->mem_idx, MO_TEQ);
+ break;
+#else /* !TARGET_SPARC64 */
+ /* stdfq, store floating point queue */
+#if defined(CONFIG_USER_ONLY)
+ goto illegal_insn;
+#else
+ if (!supervisor(dc))
+ goto priv_insn;
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ goto nfq_insn;
+#endif
+#endif
+ case 0x27: /* stdf, store double fpreg */
+ gen_address_mask(dc, cpu_addr);
+ cpu_src1_64 = gen_load_fpr_D(dc, rd);
+ tcg_gen_qemu_st_i64(cpu_src1_64, cpu_addr, dc->mem_idx,
+ MO_TEQ | MO_ALIGN_4);
+ break;
+ default:
+ goto illegal_insn;
+ }
+ } else if (xop > 0x33 && xop < 0x3f) {
+ switch (xop) {
+#ifdef TARGET_SPARC64
+ case 0x34: /* V9 stfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_stf_asi(dc, cpu_addr, insn, 4, rd);
+ break;
+ case 0x36: /* V9 stqfa */
+ {
+ CHECK_FPU_FEATURE(dc, FLOAT128);
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
+ }
+ break;
+ case 0x37: /* V9 stdfa */
+ if (gen_trap_ifnofpu(dc)) {
+ goto jmp_insn;
+ }
+ gen_stf_asi(dc, cpu_addr, insn, 8, DFPREG(rd));
+ break;
+ case 0x3e: /* V9 casxa */
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
+ break;
+#else
+ case 0x34: /* stc */
+ case 0x35: /* stcsr */
+ case 0x36: /* stdcq */
+ case 0x37: /* stdc */
+ goto ncp_insn;
+#endif
+#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
+ case 0x3c: /* V9 or LEON3 casa */
+#ifndef TARGET_SPARC64
+ CHECK_IU_FEATURE(dc, CASA);
+#endif
+ rs2 = GET_FIELD(insn, 27, 31);
+ cpu_src2 = gen_load_gpr(dc, rs2);
+ gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
+ break;
+#endif
+ default:
+ goto illegal_insn;
+ }
+ } else {
+ goto illegal_insn;
+ }
+ }
+ break;
+ }
+ /* default case for non jump instructions */
+ if (dc->npc == DYNAMIC_PC) {
+ dc->pc = DYNAMIC_PC;
+ gen_op_next_insn();
+ } else if (dc->npc == JUMP_PC) {
+ /* we can do a static jump */
+ gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
+ dc->is_br = 1;
+ } else {
+ dc->pc = dc->npc;
+ dc->npc = dc->npc + 4;
+ }
+ jmp_insn:
+ goto egress;
+ illegal_insn:
+ gen_exception(dc, TT_ILL_INSN);
+ goto egress;
+ unimp_flush:
+ gen_exception(dc, TT_UNIMP_FLUSH);
+ goto egress;
+#if !defined(CONFIG_USER_ONLY)
+ priv_insn:
+ gen_exception(dc, TT_PRIV_INSN);
+ goto egress;
+#endif
+ nfpu_insn:
+ gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
+ goto egress;
+#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
+ nfq_insn:
+ gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
+ goto egress;
+#endif
+#ifndef TARGET_SPARC64
+ ncp_insn:
+ gen_exception(dc, TT_NCP_INSN);
+ goto egress;
+#endif
+ egress:
+ if (dc->n_t32 != 0) {
+ int i;
+ for (i = dc->n_t32 - 1; i >= 0; --i) {
+ tcg_temp_free_i32(dc->t32[i]);
+ }
+ dc->n_t32 = 0;
+ }
+ if (dc->n_ttl != 0) {
+ int i;
+ for (i = dc->n_ttl - 1; i >= 0; --i) {
+ tcg_temp_free(dc->ttl[i]);
+ }
+ dc->n_ttl = 0;
+ }
+}
+
+void gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
+{
+ SPARCCPU *cpu = sparc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ target_ulong pc_start, last_pc;
+ DisasContext dc1, *dc = &dc1;
+ int num_insns;
+ int max_insns;
+ unsigned int insn;
+
+ memset(dc, 0, sizeof(DisasContext));
+ dc->tb = tb;
+ pc_start = tb->pc;
+ dc->pc = pc_start;
+ last_pc = dc->pc;
+ dc->npc = (target_ulong) tb->cs_base;
+ dc->cc_op = CC_OP_DYNAMIC;
+ dc->mem_idx = tb->flags & TB_FLAG_MMU_MASK;
+ dc->def = env->def;
+ dc->fpu_enabled = tb_fpu_enabled(tb->flags);
+ dc->address_mask_32bit = tb_am_enabled(tb->flags);
+ dc->singlestep = (cs->singlestep_enabled || singlestep);
+#ifdef TARGET_SPARC64
+ dc->fprs_dirty = 0;
+ dc->asi = (tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
+#endif
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ gen_tb_start(tb);
+ do {
+ if (dc->npc & JUMP_PC) {
+ assert(dc->jump_pc[1] == dc->pc + 4);
+ tcg_gen_insn_start(dc->pc, dc->jump_pc[0] | JUMP_PC);
+ } else {
+ tcg_gen_insn_start(dc->pc, dc->npc);
+ }
+ num_insns++;
+ last_pc = dc->pc;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ if (dc->pc != pc_start) {
+ save_state(dc);
+ }
+ gen_helper_debug(cpu_env);
+ tcg_gen_exit_tb(0);
+ dc->is_br = 1;
+ goto exit_gen_loop;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ insn = cpu_ldl_code(env, dc->pc);
+
+ disas_sparc_insn(dc, insn);
+
+ if (dc->is_br)
+ break;
+ /* if the next PC is different, we abort now */
+ if (dc->pc != (last_pc + 4))
+ break;
+ /* if we reach a page boundary, we stop generation so that the
+ PC of a TT_TFAULT exception is always in the right page */
+ if ((dc->pc & (TARGET_PAGE_SIZE - 1)) == 0)
+ break;
+ /* if single step mode, we generate only one instruction and
+ generate an exception */
+ if (dc->singlestep) {
+ break;
+ }
+ } while (!tcg_op_buf_full() &&
+ (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32) &&
+ num_insns < max_insns);
+
+ exit_gen_loop:
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+ if (!dc->is_br) {
+ if (dc->pc != DYNAMIC_PC &&
+ (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
+ /* static PC and NPC: we can use direct chaining */
+ gen_goto_tb(dc, 0, dc->pc, dc->npc);
+ } else {
+ if (dc->pc != DYNAMIC_PC) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ save_npc(dc);
+ tcg_gen_exit_tb(0);
+ }
+ }
+ gen_tb_end(tb, num_insns);
+
+ tb->size = last_pc + 4 - pc_start;
+ tb->icount = num_insns;
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("--------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, last_pc + 4 - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void gen_intermediate_code_init(CPUSPARCState *env)
+{
+ static int inited;
+ static const char gregnames[32][4] = {
+ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
+ "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
+ "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
+ "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
+ };
+ static const char fregnames[32][4] = {
+ "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
+ "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
+ "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
+ "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
+ };
+
+ static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
+#ifdef TARGET_SPARC64
+ { &cpu_xcc, offsetof(CPUSPARCState, xcc), "xcc" },
+ { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
+#else
+ { &cpu_wim, offsetof(CPUSPARCState, wim), "wim" },
+#endif
+ { &cpu_cc_op, offsetof(CPUSPARCState, cc_op), "cc_op" },
+ { &cpu_psr, offsetof(CPUSPARCState, psr), "psr" },
+ };
+
+ static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
+#ifdef TARGET_SPARC64
+ { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
+ { &cpu_tick_cmpr, offsetof(CPUSPARCState, tick_cmpr), "tick_cmpr" },
+ { &cpu_stick_cmpr, offsetof(CPUSPARCState, stick_cmpr), "stick_cmpr" },
+ { &cpu_hstick_cmpr, offsetof(CPUSPARCState, hstick_cmpr),
+ "hstick_cmpr" },
+ { &cpu_hintp, offsetof(CPUSPARCState, hintp), "hintp" },
+ { &cpu_htba, offsetof(CPUSPARCState, htba), "htba" },
+ { &cpu_hver, offsetof(CPUSPARCState, hver), "hver" },
+ { &cpu_ssr, offsetof(CPUSPARCState, ssr), "ssr" },
+ { &cpu_ver, offsetof(CPUSPARCState, version), "ver" },
+#endif
+ { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
+ { &cpu_cc_src, offsetof(CPUSPARCState, cc_src), "cc_src" },
+ { &cpu_cc_src2, offsetof(CPUSPARCState, cc_src2), "cc_src2" },
+ { &cpu_cc_dst, offsetof(CPUSPARCState, cc_dst), "cc_dst" },
+ { &cpu_fsr, offsetof(CPUSPARCState, fsr), "fsr" },
+ { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
+ { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
+ { &cpu_y, offsetof(CPUSPARCState, y), "y" },
+#ifndef CONFIG_USER_ONLY
+ { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
+#endif
+ };
+
+ unsigned int i;
+
+ /* init various static tables */
+ if (inited) {
+ return;
+ }
+ inited = 1;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ cpu_regwptr = tcg_global_mem_new_ptr(cpu_env,
+ offsetof(CPUSPARCState, regwptr),
+ "regwptr");
+
+ for (i = 0; i < ARRAY_SIZE(r32); ++i) {
+ *r32[i].ptr = tcg_global_mem_new_i32(cpu_env, r32[i].off, r32[i].name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
+ *rtl[i].ptr = tcg_global_mem_new(cpu_env, rtl[i].off, rtl[i].name);
+ }
+
+ TCGV_UNUSED(cpu_regs[0]);
+ for (i = 1; i < 8; ++i) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUSPARCState, gregs[i]),
+ gregnames[i]);
+ }
+
+ for (i = 8; i < 32; ++i) {
+ cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
+ (i - 8) * sizeof(target_ulong),
+ gregnames[i]);
+ }
+
+ for (i = 0; i < TARGET_DPREGS; i++) {
+ cpu_fpr[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUSPARCState, fpr[i]),
+ fregnames[i]);
+ }
+}
+
+void restore_state_to_opc(CPUSPARCState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ target_ulong pc = data[0];
+ target_ulong npc = data[1];
+
+ env->pc = pc;
+ if (npc == DYNAMIC_PC) {
+ /* dynamic NPC: already stored */
+ } else if (npc & JUMP_PC) {
+ /* jump PC: use 'cond' and the jump targets of the translation */
+ if (env->cond) {
+ env->npc = npc & ~3;
+ } else {
+ env->npc = pc + 4;
+ }
+ } else {
+ env->npc = npc;
+ }
+}
diff --git a/target/sparc/vis_helper.c b/target/sparc/vis_helper.c
new file mode 100644
index 0000000000..8a9b763d0b
--- /dev/null
+++ b/target/sparc/vis_helper.c
@@ -0,0 +1,490 @@
+/*
+ * VIS op helpers
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/* This function uses non-native bit order */
+#define GET_FIELD(X, FROM, TO) \
+ ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
+
+/* This function uses the order in the manuals, i.e. bit 0 is 2^0 */
+#define GET_FIELD_SP(X, FROM, TO) \
+ GET_FIELD(X, 63 - (TO), 63 - (FROM))
+
+target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
+{
+ return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
+ (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
+ (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
+ (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
+ (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
+ (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
+ (((pixel_addr >> 55) & 1) << 4) |
+ (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
+ GET_FIELD_SP(pixel_addr, 11, 12);
+}
+
+#ifdef HOST_WORDS_BIGENDIAN
+#define VIS_B64(n) b[7 - (n)]
+#define VIS_W64(n) w[3 - (n)]
+#define VIS_SW64(n) sw[3 - (n)]
+#define VIS_L64(n) l[1 - (n)]
+#define VIS_B32(n) b[3 - (n)]
+#define VIS_W32(n) w[1 - (n)]
+#else
+#define VIS_B64(n) b[n]
+#define VIS_W64(n) w[n]
+#define VIS_SW64(n) sw[n]
+#define VIS_L64(n) l[n]
+#define VIS_B32(n) b[n]
+#define VIS_W32(n) w[n]
+#endif
+
+typedef union {
+ uint8_t b[8];
+ uint16_t w[4];
+ int16_t sw[4];
+ uint32_t l[2];
+ uint64_t ll;
+ float64 d;
+} VIS64;
+
+typedef union {
+ uint8_t b[4];
+ uint16_t w[2];
+ uint32_t l;
+ float32 f;
+} VIS32;
+
+uint64_t helper_fpmerge(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+
+ s.ll = src1;
+ d.ll = src2;
+
+ /* Reverse calculation order to handle overlap */
+ d.VIS_B64(7) = s.VIS_B64(3);
+ d.VIS_B64(6) = d.VIS_B64(3);
+ d.VIS_B64(5) = s.VIS_B64(2);
+ d.VIS_B64(4) = d.VIS_B64(2);
+ d.VIS_B64(3) = s.VIS_B64(1);
+ d.VIS_B64(2) = d.VIS_B64(1);
+ d.VIS_B64(1) = s.VIS_B64(0);
+ /* d.VIS_B64(0) = d.VIS_B64(0); */
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_W64(r) = tmp >> 8;
+
+ PMUL(0);
+ PMUL(1);
+ PMUL(2);
+ PMUL(3);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_L64(r) = tmp;
+
+ /* Reverse calculation order to handle overlap */
+ PMUL(1);
+ PMUL(0);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2)
+{
+ VIS64 s, d;
+ uint32_t tmp;
+
+ s.ll = src1;
+ d.ll = src2;
+
+#define PMUL(r) \
+ tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
+ if ((tmp & 0xff) > 0x7f) { \
+ tmp += 0x100; \
+ } \
+ d.VIS_L64(r) = tmp;
+
+ /* Reverse calculation order to handle overlap */
+ PMUL(1);
+ PMUL(0);
+#undef PMUL
+
+ return d.ll;
+}
+
+uint64_t helper_fexpand(uint64_t src1, uint64_t src2)
+{
+ VIS32 s;
+ VIS64 d;
+
+ s.l = (uint32_t)src1;
+ d.ll = src2;
+ d.VIS_W64(0) = s.VIS_B32(0) << 4;
+ d.VIS_W64(1) = s.VIS_B32(1) << 4;
+ d.VIS_W64(2) = s.VIS_B32(2) << 4;
+ d.VIS_W64(3) = s.VIS_B32(3) << 4;
+
+ return d.ll;
+}
+
+#define VIS_HELPER(name, F) \
+ uint64_t name##16(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
+ d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
+ d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
+ d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
+ \
+ return d.ll; \
+ } \
+ \
+ uint32_t name##16s(uint32_t src1, uint32_t src2) \
+ { \
+ VIS32 s, d; \
+ \
+ s.l = src1; \
+ d.l = src2; \
+ \
+ d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
+ d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
+ \
+ return d.l; \
+ } \
+ \
+ uint64_t name##32(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
+ d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
+ \
+ return d.ll; \
+ } \
+ \
+ uint32_t name##32s(uint32_t src1, uint32_t src2) \
+ { \
+ VIS32 s, d; \
+ \
+ s.l = src1; \
+ d.l = src2; \
+ \
+ d.l = F(d.l, s.l); \
+ \
+ return d.l; \
+ }
+
+#define FADD(a, b) ((a) + (b))
+#define FSUB(a, b) ((a) - (b))
+VIS_HELPER(helper_fpadd, FADD)
+VIS_HELPER(helper_fpsub, FSUB)
+
+#define VIS_CMPHELPER(name, F) \
+ uint64_t name##16(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \
+ d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \
+ d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \
+ \
+ return d.ll; \
+ } \
+ \
+ uint64_t name##32(uint64_t src1, uint64_t src2) \
+ { \
+ VIS64 s, d; \
+ \
+ s.ll = src1; \
+ d.ll = src2; \
+ \
+ d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \
+ d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \
+ d.VIS_L64(1) = 0; \
+ \
+ return d.ll; \
+ }
+
+#define FCMPGT(a, b) ((a) > (b))
+#define FCMPEQ(a, b) ((a) == (b))
+#define FCMPLE(a, b) ((a) <= (b))
+#define FCMPNE(a, b) ((a) != (b))
+
+VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
+VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
+VIS_CMPHELPER(helper_fcmple, FCMPLE)
+VIS_CMPHELPER(helper_fcmpne, FCMPNE)
+
+uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2)
+{
+ int i;
+ for (i = 0; i < 8; i++) {
+ int s1, s2;
+
+ s1 = (src1 >> (56 - (i * 8))) & 0xff;
+ s2 = (src2 >> (56 - (i * 8))) & 0xff;
+
+ /* Absolute value of difference. */
+ s1 -= s2;
+ if (s1 < 0) {
+ s1 = -s1;
+ }
+
+ sum += s1;
+ }
+
+ return sum;
+}
+
+uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0xf;
+ uint32_t ret = 0;
+ int byte;
+
+ for (byte = 0; byte < 4; byte++) {
+ uint32_t val;
+ int16_t src = rs2 >> (byte * 16);
+ int32_t scaled = src << scale;
+ int32_t from_fixed = scaled >> 7;
+
+ val = (from_fixed < 0 ? 0 :
+ from_fixed > 255 ? 255 : from_fixed);
+
+ ret |= val << (8 * byte);
+ }
+
+ return ret;
+}
+
+uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0x1f;
+ uint64_t ret = 0;
+ int word;
+
+ ret = (rs1 << 8) & ~(0x000000ff000000ffULL);
+ for (word = 0; word < 2; word++) {
+ uint64_t val;
+ int32_t src = rs2 >> (word * 32);
+ int64_t scaled = (int64_t)src << scale;
+ int64_t from_fixed = scaled >> 23;
+
+ val = (from_fixed < 0 ? 0 :
+ (from_fixed > 255) ? 255 : from_fixed);
+
+ ret |= val << (32 * word);
+ }
+
+ return ret;
+}
+
+uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2)
+{
+ int scale = (gsr >> 3) & 0x1f;
+ uint32_t ret = 0;
+ int word;
+
+ for (word = 0; word < 2; word++) {
+ uint32_t val;
+ int32_t src = rs2 >> (word * 32);
+ int64_t scaled = (int64_t)src << scale;
+ int64_t from_fixed = scaled >> 16;
+
+ val = (from_fixed < -32768 ? -32768 :
+ from_fixed > 32767 ? 32767 : from_fixed);
+
+ ret |= (val & 0xffff) << (word * 16);
+ }
+
+ return ret;
+}
+
+uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2)
+{
+ union {
+ uint64_t ll[2];
+ uint8_t b[16];
+ } s;
+ VIS64 r;
+ uint32_t i, mask, host;
+
+ /* Set up S such that we can index across all of the bytes. */
+#ifdef HOST_WORDS_BIGENDIAN
+ s.ll[0] = src1;
+ s.ll[1] = src2;
+ host = 0;
+#else
+ s.ll[1] = src1;
+ s.ll[0] = src2;
+ host = 15;
+#endif
+ mask = gsr >> 32;
+
+ for (i = 0; i < 8; ++i) {
+ unsigned e = (mask >> (28 - i*4)) & 0xf;
+ r.VIS_B64(i) = s.b[e ^ host];
+ }
+
+ return r.ll;
+}
diff --git a/target/sparc/win_helper.c b/target/sparc/win_helper.c
new file mode 100644
index 0000000000..2d5b5469a9
--- /dev/null
+++ b/target/sparc/win_helper.c
@@ -0,0 +1,400 @@
+/*
+ * Helpers for CWP and PSTATE handling
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "trace.h"
+
+static inline void memcpy32(target_ulong *dst, const target_ulong *src)
+{
+ dst[0] = src[0];
+ dst[1] = src[1];
+ dst[2] = src[2];
+ dst[3] = src[3];
+ dst[4] = src[4];
+ dst[5] = src[5];
+ dst[6] = src[6];
+ dst[7] = src[7];
+}
+
+void cpu_set_cwp(CPUSPARCState *env, int new_cwp)
+{
+ /* put the modified wrap registers at their proper location */
+ if (env->cwp == env->nwindows - 1) {
+ memcpy32(env->regbase, env->regbase + env->nwindows * 16);
+ }
+ env->cwp = new_cwp;
+
+ /* put the wrap registers at their temporary location */
+ if (new_cwp == env->nwindows - 1) {
+ memcpy32(env->regbase + env->nwindows * 16, env->regbase);
+ }
+ env->regwptr = env->regbase + (new_cwp * 16);
+}
+
+target_ulong cpu_get_psr(CPUSPARCState *env)
+{
+ helper_compute_psr(env);
+
+#if !defined(TARGET_SPARC64)
+ return env->version | (env->psr & PSR_ICC) |
+ (env->psref ? PSR_EF : 0) |
+ (env->psrpil << 8) |
+ (env->psrs ? PSR_S : 0) |
+ (env->psrps ? PSR_PS : 0) |
+ (env->psret ? PSR_ET : 0) | env->cwp;
+#else
+ return env->psr & PSR_ICC;
+#endif
+}
+
+void cpu_put_psr_raw(CPUSPARCState *env, target_ulong val)
+{
+ env->psr = val & PSR_ICC;
+#if !defined(TARGET_SPARC64)
+ env->psref = (val & PSR_EF) ? 1 : 0;
+ env->psrpil = (val & PSR_PIL) >> 8;
+ env->psrs = (val & PSR_S) ? 1 : 0;
+ env->psrps = (val & PSR_PS) ? 1 : 0;
+ env->psret = (val & PSR_ET) ? 1 : 0;
+#endif
+ env->cc_op = CC_OP_FLAGS;
+#if !defined(TARGET_SPARC64)
+ cpu_set_cwp(env, val & PSR_CWP);
+#endif
+}
+
+void cpu_put_psr(CPUSPARCState *env, target_ulong val)
+{
+ cpu_put_psr_raw(env, val);
+#if ((!defined(TARGET_SPARC64)) && !defined(CONFIG_USER_ONLY))
+ cpu_check_irqs(env);
+#endif
+}
+
+int cpu_cwp_inc(CPUSPARCState *env, int cwp)
+{
+ if (unlikely(cwp >= env->nwindows)) {
+ cwp -= env->nwindows;
+ }
+ return cwp;
+}
+
+int cpu_cwp_dec(CPUSPARCState *env, int cwp)
+{
+ if (unlikely(cwp < 0)) {
+ cwp += env->nwindows;
+ }
+ return cwp;
+}
+
+#ifndef TARGET_SPARC64
+void helper_rett(CPUSPARCState *env)
+{
+ unsigned int cwp;
+
+ if (env->psret == 1) {
+ cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
+ }
+
+ env->psret = 1;
+ cwp = cpu_cwp_inc(env, env->cwp + 1) ;
+ if (env->wim & (1 << cwp)) {
+ cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
+ }
+ cpu_set_cwp(env, cwp);
+ env->psrs = env->psrps;
+}
+
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+ handling ? */
+void helper_save(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
+ if (env->wim & (1 << cwp)) {
+ cpu_raise_exception_ra(env, TT_WIN_OVF, GETPC());
+ }
+ cpu_set_cwp(env, cwp);
+}
+
+void helper_restore(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_inc(env, env->cwp + 1);
+ if (env->wim & (1 << cwp)) {
+ cpu_raise_exception_ra(env, TT_WIN_UNF, GETPC());
+ }
+ cpu_set_cwp(env, cwp);
+}
+
+void helper_wrpsr(CPUSPARCState *env, target_ulong new_psr)
+{
+ if ((new_psr & PSR_CWP) >= env->nwindows) {
+ cpu_raise_exception_ra(env, TT_ILL_INSN, GETPC());
+ } else {
+ cpu_put_psr(env, new_psr);
+ }
+}
+
+target_ulong helper_rdpsr(CPUSPARCState *env)
+{
+ return cpu_get_psr(env);
+}
+
+#else
+/* XXX: use another pointer for %iN registers to avoid slow wrapping
+ handling ? */
+void helper_save(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_dec(env, env->cwp - 1);
+ if (env->cansave == 0) {
+ int tt = TT_SPILL | (env->otherwin != 0
+ ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+ : ((env->wstate & 0x7) << 2));
+ cpu_raise_exception_ra(env, tt, GETPC());
+ } else {
+ if (env->cleanwin - env->canrestore == 0) {
+ /* XXX Clean windows without trap */
+ cpu_raise_exception_ra(env, TT_CLRWIN, GETPC());
+ } else {
+ env->cansave--;
+ env->canrestore++;
+ cpu_set_cwp(env, cwp);
+ }
+ }
+}
+
+void helper_restore(CPUSPARCState *env)
+{
+ uint32_t cwp;
+
+ cwp = cpu_cwp_inc(env, env->cwp + 1);
+ if (env->canrestore == 0) {
+ int tt = TT_FILL | (env->otherwin != 0
+ ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+ : ((env->wstate & 0x7) << 2));
+ cpu_raise_exception_ra(env, tt, GETPC());
+ } else {
+ env->cansave++;
+ env->canrestore--;
+ cpu_set_cwp(env, cwp);
+ }
+}
+
+void helper_flushw(CPUSPARCState *env)
+{
+ if (env->cansave != env->nwindows - 2) {
+ int tt = TT_SPILL | (env->otherwin != 0
+ ? (TT_WOTHER | ((env->wstate & 0x38) >> 1))
+ : ((env->wstate & 0x7) << 2));
+ cpu_raise_exception_ra(env, tt, GETPC());
+ }
+}
+
+void helper_saved(CPUSPARCState *env)
+{
+ env->cansave++;
+ if (env->otherwin == 0) {
+ env->canrestore--;
+ } else {
+ env->otherwin--;
+ }
+}
+
+void helper_restored(CPUSPARCState *env)
+{
+ env->canrestore++;
+ if (env->cleanwin < env->nwindows - 1) {
+ env->cleanwin++;
+ }
+ if (env->otherwin == 0) {
+ env->cansave--;
+ } else {
+ env->otherwin--;
+ }
+}
+
+target_ulong cpu_get_ccr(CPUSPARCState *env)
+{
+ target_ulong psr;
+
+ psr = cpu_get_psr(env);
+
+ return ((env->xcc >> 20) << 4) | ((psr & PSR_ICC) >> 20);
+}
+
+void cpu_put_ccr(CPUSPARCState *env, target_ulong val)
+{
+ env->xcc = (val >> 4) << 20;
+ env->psr = (val & 0xf) << 20;
+ CC_OP = CC_OP_FLAGS;
+}
+
+target_ulong cpu_get_cwp64(CPUSPARCState *env)
+{
+ return env->nwindows - 1 - env->cwp;
+}
+
+void cpu_put_cwp64(CPUSPARCState *env, int cwp)
+{
+ if (unlikely(cwp >= env->nwindows || cwp < 0)) {
+ cwp %= env->nwindows;
+ }
+ cpu_set_cwp(env, env->nwindows - 1 - cwp);
+}
+
+target_ulong helper_rdccr(CPUSPARCState *env)
+{
+ return cpu_get_ccr(env);
+}
+
+void helper_wrccr(CPUSPARCState *env, target_ulong new_ccr)
+{
+ cpu_put_ccr(env, new_ccr);
+}
+
+/* CWP handling is reversed in V9, but we still use the V8 register
+ order. */
+target_ulong helper_rdcwp(CPUSPARCState *env)
+{
+ return cpu_get_cwp64(env);
+}
+
+void helper_wrcwp(CPUSPARCState *env, target_ulong new_cwp)
+{
+ cpu_put_cwp64(env, new_cwp);
+}
+
+static inline uint64_t *get_gregset(CPUSPARCState *env, uint32_t pstate)
+{
+ switch (pstate) {
+ default:
+ trace_win_helper_gregset_error(pstate);
+ /* pass through to normal set of global registers */
+ case 0:
+ return env->bgregs;
+ case PS_AG:
+ return env->agregs;
+ case PS_MG:
+ return env->mgregs;
+ case PS_IG:
+ return env->igregs;
+ }
+}
+
+void cpu_change_pstate(CPUSPARCState *env, uint32_t new_pstate)
+{
+ uint32_t pstate_regs, new_pstate_regs;
+ uint64_t *src, *dst;
+
+ if (env->def->features & CPU_FEATURE_GL) {
+ /* PS_AG is not implemented in this case */
+ new_pstate &= ~PS_AG;
+ }
+
+ pstate_regs = env->pstate & 0xc01;
+ new_pstate_regs = new_pstate & 0xc01;
+
+ if (new_pstate_regs != pstate_regs) {
+ trace_win_helper_switch_pstate(pstate_regs, new_pstate_regs);
+
+ /* Switch global register bank */
+ src = get_gregset(env, new_pstate_regs);
+ dst = get_gregset(env, pstate_regs);
+ memcpy32(dst, env->gregs);
+ memcpy32(env->gregs, src);
+ } else {
+ trace_win_helper_no_switch_pstate(new_pstate_regs);
+ }
+ env->pstate = new_pstate;
+}
+
+void helper_wrpstate(CPUSPARCState *env, target_ulong new_state)
+{
+ cpu_change_pstate(env, new_state & 0xf3f);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+void helper_wrpil(CPUSPARCState *env, target_ulong new_pil)
+{
+#if !defined(CONFIG_USER_ONLY)
+ trace_win_helper_wrpil(env->psrpil, (uint32_t)new_pil);
+
+ env->psrpil = new_pil;
+
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+void helper_done(CPUSPARCState *env)
+{
+ trap_state *tsptr = cpu_tsptr(env);
+
+ env->pc = tsptr->tnpc;
+ env->npc = tsptr->tnpc + 4;
+ cpu_put_ccr(env, tsptr->tstate >> 32);
+ env->asi = (tsptr->tstate >> 24) & 0xff;
+ cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
+ cpu_put_cwp64(env, tsptr->tstate & 0xff);
+ env->tl--;
+
+ trace_win_helper_done(env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+
+void helper_retry(CPUSPARCState *env)
+{
+ trap_state *tsptr = cpu_tsptr(env);
+
+ env->pc = tsptr->tpc;
+ env->npc = tsptr->tnpc;
+ cpu_put_ccr(env, tsptr->tstate >> 32);
+ env->asi = (tsptr->tstate >> 24) & 0xff;
+ cpu_change_pstate(env, (tsptr->tstate >> 8) & 0xf3f);
+ cpu_put_cwp64(env, tsptr->tstate & 0xff);
+ env->tl--;
+
+ trace_win_helper_retry(env->tl);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (cpu_interrupts_enabled(env)) {
+ cpu_check_irqs(env);
+ }
+#endif
+}
+#endif
diff --git a/target/tilegx/Makefile.objs b/target/tilegx/Makefile.objs
new file mode 100644
index 0000000000..0db778f407
--- /dev/null
+++ b/target/tilegx/Makefile.objs
@@ -0,0 +1 @@
+obj-y += cpu.o translate.o helper.o simd_helper.o
diff --git a/target/tilegx/cpu.c b/target/tilegx/cpu.c
new file mode 100644
index 0000000000..454793f94a
--- /dev/null
+++ b/target/tilegx/cpu.c
@@ -0,0 +1,187 @@
+/*
+ * QEMU TILE-Gx CPU
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "hw/qdev-properties.h"
+#include "migration/vmstate.h"
+#include "linux-user/syscall_defs.h"
+#include "exec/exec-all.h"
+
+static void tilegx_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ static const char * const reg_names[TILEGX_R_COUNT] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
+ "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
+ "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr"
+ };
+
+ TileGXCPU *cpu = TILEGX_CPU(cs);
+ CPUTLGState *env = &cpu->env;
+ int i;
+
+ for (i = 0; i < TILEGX_R_COUNT; i++) {
+ cpu_fprintf(f, "%-4s" TARGET_FMT_lx "%s",
+ reg_names[i], env->regs[i],
+ (i % 4) == 3 ? "\n" : " ");
+ }
+ cpu_fprintf(f, "PC " TARGET_FMT_lx " CEX " TARGET_FMT_lx "\n\n",
+ env->pc, env->spregs[TILEGX_SPR_CMPEXCH]);
+}
+
+TileGXCPU *cpu_tilegx_init(const char *cpu_model)
+{
+ TileGXCPU *cpu;
+
+ cpu = TILEGX_CPU(object_new(TYPE_TILEGX_CPU));
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+static void tilegx_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ TileGXCPU *cpu = TILEGX_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool tilegx_cpu_has_work(CPUState *cs)
+{
+ return true;
+}
+
+static void tilegx_cpu_reset(CPUState *s)
+{
+ TileGXCPU *cpu = TILEGX_CPU(s);
+ TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(cpu);
+ CPUTLGState *env = &cpu->env;
+
+ tcc->parent_reset(s);
+
+ memset(env, 0, sizeof(CPUTLGState));
+ tlb_flush(s, 1);
+}
+
+static void tilegx_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ TileGXCPUClass *tcc = TILEGX_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ tcc->parent_realize(dev, errp);
+}
+
+static void tilegx_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ TileGXCPU *cpu = TILEGX_CPU(obj);
+ CPUTLGState *env = &cpu->env;
+ static bool tcg_initialized;
+
+ cs->env_ptr = env;
+
+ if (tcg_enabled() && !tcg_initialized) {
+ tcg_initialized = true;
+ tilegx_tcg_init();
+ }
+}
+
+static void tilegx_cpu_do_interrupt(CPUState *cs)
+{
+ cs->exception_index = -1;
+}
+
+static int tilegx_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
+{
+ TileGXCPU *cpu = TILEGX_CPU(cs);
+
+ /* The sigcode field will be filled in by do_signal in main.c. */
+ cs->exception_index = TILEGX_EXCP_SIGNAL;
+ cpu->env.excaddr = address;
+ cpu->env.signo = TARGET_SIGSEGV;
+ cpu->env.sigcode = 0;
+
+ return 1;
+}
+
+static bool tilegx_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ tilegx_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+static void tilegx_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ TileGXCPUClass *tcc = TILEGX_CPU_CLASS(oc);
+
+ tcc->parent_realize = dc->realize;
+ dc->realize = tilegx_cpu_realizefn;
+
+ tcc->parent_reset = cc->reset;
+ cc->reset = tilegx_cpu_reset;
+
+ cc->has_work = tilegx_cpu_has_work;
+ cc->do_interrupt = tilegx_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = tilegx_cpu_exec_interrupt;
+ cc->dump_state = tilegx_cpu_dump_state;
+ cc->set_pc = tilegx_cpu_set_pc;
+ cc->handle_mmu_fault = tilegx_cpu_handle_mmu_fault;
+ cc->gdb_num_core_regs = 0;
+}
+
+static const TypeInfo tilegx_cpu_type_info = {
+ .name = TYPE_TILEGX_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(TileGXCPU),
+ .instance_init = tilegx_cpu_initfn,
+ .class_size = sizeof(TileGXCPUClass),
+ .class_init = tilegx_cpu_class_init,
+};
+
+static void tilegx_cpu_register_types(void)
+{
+ type_register_static(&tilegx_cpu_type_info);
+}
+
+type_init(tilegx_cpu_register_types)
diff --git a/target/tilegx/cpu.h b/target/tilegx/cpu.h
new file mode 100644
index 0000000000..1735427233
--- /dev/null
+++ b/target/tilegx/cpu.h
@@ -0,0 +1,178 @@
+/*
+ * TILE-Gx virtual CPU header
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TILEGX_CPU_H
+#define TILEGX_CPU_H
+
+#include "qemu-common.h"
+
+#define TARGET_LONG_BITS 64
+
+#define CPUArchState struct CPUTLGState
+
+#include "exec/cpu-defs.h"
+
+
+/* TILE-Gx common register alias */
+#define TILEGX_R_RE 0 /* 0 register, for function/syscall return value */
+#define TILEGX_R_ERR 1 /* 1 register, for syscall errno flag */
+#define TILEGX_R_NR 10 /* 10 register, for syscall number */
+#define TILEGX_R_BP 52 /* 52 register, optional frame pointer */
+#define TILEGX_R_TP 53 /* TP register, thread local storage data */
+#define TILEGX_R_SP 54 /* SP register, stack pointer */
+#define TILEGX_R_LR 55 /* LR register, may save pc, but it is not pc */
+#define TILEGX_R_COUNT 56 /* Only 56 registers are really useful */
+#define TILEGX_R_SN 56 /* SN register, obsoleted, it likes zero register */
+#define TILEGX_R_IDN0 57 /* IDN0 register, cause IDN_ACCESS exception */
+#define TILEGX_R_IDN1 58 /* IDN1 register, cause IDN_ACCESS exception */
+#define TILEGX_R_UDN0 59 /* UDN0 register, cause UDN_ACCESS exception */
+#define TILEGX_R_UDN1 60 /* UDN1 register, cause UDN_ACCESS exception */
+#define TILEGX_R_UDN2 61 /* UDN2 register, cause UDN_ACCESS exception */
+#define TILEGX_R_UDN3 62 /* UDN3 register, cause UDN_ACCESS exception */
+#define TILEGX_R_ZERO 63 /* Zero register, always zero */
+#define TILEGX_R_NOREG 255 /* Invalid register value */
+
+/* TILE-Gx special registers used by outside */
+enum {
+ TILEGX_SPR_CMPEXCH = 0,
+ TILEGX_SPR_CRITICAL_SEC = 1,
+ TILEGX_SPR_SIM_CONTROL = 2,
+ TILEGX_SPR_EX_CONTEXT_0_0 = 3,
+ TILEGX_SPR_EX_CONTEXT_0_1 = 4,
+ TILEGX_SPR_COUNT
+};
+
+/* Exception numbers */
+typedef enum {
+ TILEGX_EXCP_NONE = 0,
+ TILEGX_EXCP_SYSCALL = 1,
+ TILEGX_EXCP_SIGNAL = 2,
+ TILEGX_EXCP_OPCODE_UNKNOWN = 0x101,
+ TILEGX_EXCP_OPCODE_UNIMPLEMENTED = 0x102,
+ TILEGX_EXCP_OPCODE_CMPEXCH = 0x103,
+ TILEGX_EXCP_OPCODE_CMPEXCH4 = 0x104,
+ TILEGX_EXCP_OPCODE_EXCH = 0x105,
+ TILEGX_EXCP_OPCODE_EXCH4 = 0x106,
+ TILEGX_EXCP_OPCODE_FETCHADD = 0x107,
+ TILEGX_EXCP_OPCODE_FETCHADD4 = 0x108,
+ TILEGX_EXCP_OPCODE_FETCHADDGEZ = 0x109,
+ TILEGX_EXCP_OPCODE_FETCHADDGEZ4 = 0x10a,
+ TILEGX_EXCP_OPCODE_FETCHAND = 0x10b,
+ TILEGX_EXCP_OPCODE_FETCHAND4 = 0x10c,
+ TILEGX_EXCP_OPCODE_FETCHOR = 0x10d,
+ TILEGX_EXCP_OPCODE_FETCHOR4 = 0x10e,
+ TILEGX_EXCP_REG_IDN_ACCESS = 0x181,
+ TILEGX_EXCP_REG_UDN_ACCESS = 0x182,
+ TILEGX_EXCP_UNALIGNMENT = 0x201,
+ TILEGX_EXCP_DBUG_BREAK = 0x301
+} TileExcp;
+
+typedef struct CPUTLGState {
+ uint64_t regs[TILEGX_R_COUNT]; /* Common used registers by outside */
+ uint64_t spregs[TILEGX_SPR_COUNT]; /* Special used registers by outside */
+ uint64_t pc; /* Current pc */
+
+#if defined(CONFIG_USER_ONLY)
+ uint64_t excaddr; /* exception address */
+ uint64_t atomic_srca; /* Arguments to atomic "exceptions" */
+ uint64_t atomic_srcb;
+ uint32_t atomic_dstr;
+ uint32_t signo; /* Signal number */
+ uint32_t sigcode; /* Signal code */
+#endif
+
+ CPU_COMMON
+} CPUTLGState;
+
+#include "qom/cpu.h"
+
+#define TYPE_TILEGX_CPU "tilegx-cpu"
+
+#define TILEGX_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(TileGXCPUClass, (klass), TYPE_TILEGX_CPU)
+#define TILEGX_CPU(obj) \
+ OBJECT_CHECK(TileGXCPU, (obj), TYPE_TILEGX_CPU)
+#define TILEGX_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(TileGXCPUClass, (obj), TYPE_TILEGX_CPU)
+
+/**
+ * TileGXCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A Tile-Gx CPU model.
+ */
+typedef struct TileGXCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} TileGXCPUClass;
+
+/**
+ * TileGXCPU:
+ * @env: #CPUTLGState
+ *
+ * A Tile-GX CPU.
+ */
+typedef struct TileGXCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUTLGState env;
+} TileGXCPU;
+
+static inline TileGXCPU *tilegx_env_get_cpu(CPUTLGState *env)
+{
+ return container_of(env, TileGXCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(tilegx_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(TileGXCPU, env)
+
+/* TILE-Gx memory attributes */
+#define TARGET_PAGE_BITS 16 /* TILE-Gx uses 64KB page size */
+#define TARGET_PHYS_ADDR_SPACE_BITS 42
+#define TARGET_VIRT_ADDR_SPACE_BITS 64
+#define MMU_USER_IDX 0 /* Current memory operation is in user mode */
+
+#include "exec/cpu-all.h"
+
+void tilegx_tcg_init(void);
+int cpu_tilegx_signal_handler(int host_signum, void *pinfo, void *puc);
+
+TileGXCPU *cpu_tilegx_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_tilegx_init(cpu_model))
+
+#define cpu_signal_handler cpu_tilegx_signal_handler
+
+static inline void cpu_get_tb_cpu_state(CPUTLGState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = 0;
+}
+
+#endif
diff --git a/target/tilegx/helper.c b/target/tilegx/helper.c
new file mode 100644
index 0000000000..b4fba9cc21
--- /dev/null
+++ b/target/tilegx/helper.c
@@ -0,0 +1,163 @@
+/*
+ * QEMU TILE-Gx helpers
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "qemu-common.h"
+#include "exec/helper-proto.h"
+#include <zlib.h> /* For crc32 */
+#include "syscall_defs.h"
+
+void helper_exception(CPUTLGState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(tilegx_env_get_cpu(env));
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+void helper_ext01_ics(CPUTLGState *env)
+{
+ uint64_t val = env->spregs[TILEGX_SPR_EX_CONTEXT_0_1];
+
+ switch (val) {
+ case 0:
+ case 1:
+ env->spregs[TILEGX_SPR_CRITICAL_SEC] = val;
+ break;
+ default:
+#if defined(CONFIG_USER_ONLY)
+ env->signo = TARGET_SIGILL;
+ env->sigcode = TARGET_ILL_ILLOPC;
+ helper_exception(env, TILEGX_EXCP_SIGNAL);
+#else
+ helper_exception(env, TILEGX_EXCP_OPCODE_UNIMPLEMENTED);
+#endif
+ break;
+ }
+}
+
+uint64_t helper_cntlz(uint64_t arg)
+{
+ return clz64(arg);
+}
+
+uint64_t helper_cnttz(uint64_t arg)
+{
+ return ctz64(arg);
+}
+
+uint64_t helper_pcnt(uint64_t arg)
+{
+ return ctpop64(arg);
+}
+
+uint64_t helper_revbits(uint64_t arg)
+{
+ return revbit64(arg);
+}
+
+/*
+ * Functional Description
+ * uint64_t a = rf[SrcA];
+ * uint64_t b = rf[SrcB];
+ * uint64_t d = rf[Dest];
+ * uint64_t output = 0;
+ * unsigned int counter;
+ * for (counter = 0; counter < (WORD_SIZE / BYTE_SIZE); counter++)
+ * {
+ * int sel = getByte (b, counter) & 0xf;
+ * uint8_t byte = (sel < 8) ? getByte (d, sel) : getByte (a, (sel - 8));
+ * output = setByte (output, counter, byte);
+ * }
+ * rf[Dest] = output;
+ */
+uint64_t helper_shufflebytes(uint64_t dest, uint64_t srca, uint64_t srcb)
+{
+ uint64_t vdst = 0;
+ int count;
+
+ for (count = 0; count < 64; count += 8) {
+ uint64_t sel = srcb >> count;
+ uint64_t src = (sel & 8) ? srca : dest;
+ vdst |= extract64(src, (sel & 7) * 8, 8) << count;
+ }
+
+ return vdst;
+}
+
+uint64_t helper_crc32_8(uint64_t accum, uint64_t input)
+{
+ uint8_t buf = input;
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(accum ^ 0xffffffff, &buf, 1) ^ 0xffffffff;
+}
+
+uint64_t helper_crc32_32(uint64_t accum, uint64_t input)
+{
+ uint8_t buf[4];
+
+ stl_le_p(buf, input);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(accum ^ 0xffffffff, buf, 4) ^ 0xffffffff;
+}
+
+uint64_t helper_cmula(uint64_t srcd, uint64_t srca, uint64_t srcb)
+{
+ uint32_t reala = (int16_t)srca;
+ uint32_t imaga = (int16_t)(srca >> 16);
+ uint32_t realb = (int16_t)srcb;
+ uint32_t imagb = (int16_t)(srcb >> 16);
+ uint32_t reald = srcd;
+ uint32_t imagd = srcd >> 32;
+ uint32_t realr = reala * realb - imaga * imagb + reald;
+ uint32_t imagr = reala * imagb + imaga * realb + imagd;
+
+ return deposit64(realr, 32, 32, imagr);
+}
+
+uint64_t helper_cmulaf(uint64_t srcd, uint64_t srca, uint64_t srcb)
+{
+ uint32_t reala = (int16_t)srca;
+ uint32_t imaga = (int16_t)(srca >> 16);
+ uint32_t realb = (int16_t)srcb;
+ uint32_t imagb = (int16_t)(srcb >> 16);
+ uint32_t reald = (int16_t)srcd;
+ uint32_t imagd = (int16_t)(srcd >> 16);
+ int32_t realr = reala * realb - imaga * imagb;
+ int32_t imagr = reala * imagb + imaga * realb;
+
+ return deposit32((realr >> 15) + reald, 16, 16, (imagr >> 15) + imagd);
+}
+
+uint64_t helper_cmul2(uint64_t srca, uint64_t srcb, int shift, int round)
+{
+ uint32_t reala = (int16_t)srca;
+ uint32_t imaga = (int16_t)(srca >> 16);
+ uint32_t realb = (int16_t)srcb;
+ uint32_t imagb = (int16_t)(srcb >> 16);
+ int32_t realr = reala * realb - imaga * imagb + round;
+ int32_t imagr = reala * imagb + imaga * realb + round;
+
+ return deposit32(realr >> shift, 16, 16, imagr >> shift);
+}
diff --git a/target/tilegx/helper.h b/target/tilegx/helper.h
new file mode 100644
index 0000000000..9281d0f428
--- /dev/null
+++ b/target/tilegx/helper.h
@@ -0,0 +1,26 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_1(ext01_ics, void, env)
+DEF_HELPER_FLAGS_1(cntlz, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(cnttz, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(pcnt, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_1(revbits, TCG_CALL_NO_RWG_SE, i64, i64)
+DEF_HELPER_FLAGS_3(shufflebytes, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_2(crc32_8, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(crc32_32, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_3(cmula, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_3(cmulaf, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64)
+DEF_HELPER_FLAGS_4(cmul2, TCG_CALL_NO_RWG_SE, i64, i64, i64, int, int)
+
+DEF_HELPER_FLAGS_2(v1int_h, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v1int_l, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v2int_h, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v2int_l, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+
+DEF_HELPER_FLAGS_2(v1multu, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v2mults, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v1shl, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v1shru, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v1shrs, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v2shl, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v2shru, TCG_CALL_NO_RWG_SE, i64, i64, i64)
+DEF_HELPER_FLAGS_2(v2shrs, TCG_CALL_NO_RWG_SE, i64, i64, i64)
diff --git a/target/tilegx/opcode_tilegx.h b/target/tilegx/opcode_tilegx.h
new file mode 100644
index 0000000000..55376be4cf
--- /dev/null
+++ b/target/tilegx/opcode_tilegx.h
@@ -0,0 +1,1406 @@
+/* TILE-Gx opcode information.
+ *
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ *
+ *
+ *
+ *
+ */
+
+#ifndef OPCODE_TILEGX_H
+#define OPCODE_TILEGX_H
+
+#ifndef __ASSEMBLER__
+
+typedef uint64_t tilegx_bundle_bits;
+
+/* These are the bits that determine if a bundle is in the X encoding. */
+#define TILEGX_BUNDLE_MODE_MASK ((tilegx_bundle_bits)3 << 62)
+
+enum
+{
+ /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */
+ TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE = 3,
+
+ /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */
+ TILEGX_NUM_PIPELINE_ENCODINGS = 5,
+
+ /* Log base 2 of TILEGX_BUNDLE_SIZE_IN_BYTES. */
+ TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES = 3,
+
+ /* Instructions take this many bytes. */
+ TILEGX_BUNDLE_SIZE_IN_BYTES = 1 << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES,
+
+ /* Log base 2 of TILEGX_BUNDLE_ALIGNMENT_IN_BYTES. */
+ TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3,
+
+ /* Bundles should be aligned modulo this number of bytes. */
+ TILEGX_BUNDLE_ALIGNMENT_IN_BYTES =
+ (1 << TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES),
+
+ /* Number of registers (some are magic, such as network I/O). */
+ TILEGX_NUM_REGISTERS = 64,
+};
+
+/* Make a few "tile_" variables to simplify common code between
+ architectures. */
+
+typedef tilegx_bundle_bits tile_bundle_bits;
+#define TILE_BUNDLE_SIZE_IN_BYTES TILEGX_BUNDLE_SIZE_IN_BYTES
+#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
+ TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
+#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
+
+/* 64-bit pattern for a { bpt ; nop } bundle. */
+#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
+
+static inline unsigned int
+get_BFEnd_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_BFOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 24)) & 0xf);
+}
+
+static inline unsigned int
+get_BFStart_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 18)) & 0x3f);
+}
+
+static inline unsigned int
+get_BrOff_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 31)) & 0x0000003f) |
+ (((unsigned int)(n >> 37)) & 0x0001ffc0);
+}
+
+static inline unsigned int
+get_BrType_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 54)) & 0x1f);
+}
+
+static inline unsigned int
+get_Dest_Imm8_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 31)) & 0x0000003f) |
+ (((unsigned int)(n >> 43)) & 0x000000c0);
+}
+
+static inline unsigned int
+get_Dest_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 0)) & 0x3f);
+}
+
+static inline unsigned int
+get_Dest_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 31)) & 0x3f);
+}
+
+static inline unsigned int
+get_Dest_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 0)) & 0x3f);
+}
+
+static inline unsigned int
+get_Dest_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 31)) & 0x3f);
+}
+
+static inline unsigned int
+get_Imm16_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0xffff);
+}
+
+static inline unsigned int
+get_Imm16_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0xffff);
+}
+
+static inline unsigned int
+get_Imm8OpcodeExtension_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 20)) & 0xff);
+}
+
+static inline unsigned int
+get_Imm8OpcodeExtension_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 51)) & 0xff);
+}
+
+static inline unsigned int
+get_Imm8_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0xff);
+}
+
+static inline unsigned int
+get_Imm8_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0xff);
+}
+
+static inline unsigned int
+get_Imm8_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0xff);
+}
+
+static inline unsigned int
+get_Imm8_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0xff);
+}
+
+static inline unsigned int
+get_JumpOff_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 31)) & 0x7ffffff);
+}
+
+static inline unsigned int
+get_JumpOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 58)) & 0x1);
+}
+
+static inline unsigned int
+get_MF_Imm14_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 37)) & 0x3fff);
+}
+
+static inline unsigned int
+get_MT_Imm14_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 31)) & 0x0000003f) |
+ (((unsigned int)(n >> 37)) & 0x00003fc0);
+}
+
+static inline unsigned int
+get_Mode(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 62)) & 0x3);
+}
+
+static inline unsigned int
+get_Opcode_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 28)) & 0x7);
+}
+
+static inline unsigned int
+get_Opcode_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 59)) & 0x7);
+}
+
+static inline unsigned int
+get_Opcode_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 27)) & 0xf);
+}
+
+static inline unsigned int
+get_Opcode_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 58)) & 0xf);
+}
+
+static inline unsigned int
+get_Opcode_Y2(tilegx_bundle_bits n)
+{
+ return (((n >> 26)) & 0x00000001) |
+ (((unsigned int)(n >> 56)) & 0x00000002);
+}
+
+static inline unsigned int
+get_RRROpcodeExtension_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 18)) & 0x3ff);
+}
+
+static inline unsigned int
+get_RRROpcodeExtension_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 49)) & 0x3ff);
+}
+
+static inline unsigned int
+get_RRROpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 18)) & 0x3);
+}
+
+static inline unsigned int
+get_RRROpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 49)) & 0x3);
+}
+
+static inline unsigned int
+get_ShAmt_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_ShAmt_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static inline unsigned int
+get_ShAmt_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_ShAmt_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static inline unsigned int
+get_ShiftOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 18)) & 0x3ff);
+}
+
+static inline unsigned int
+get_ShiftOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 49)) & 0x3ff);
+}
+
+static inline unsigned int
+get_ShiftOpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 18)) & 0x3);
+}
+
+static inline unsigned int
+get_ShiftOpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 49)) & 0x3);
+}
+
+static inline unsigned int
+get_SrcA_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 6)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcA_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 37)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcA_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 6)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcA_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 37)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcA_Y2(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 20)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcBDest_Y2(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 51)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcB_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcB_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcB_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_SrcB_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static inline unsigned int
+get_UnaryOpcodeExtension_X0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_UnaryOpcodeExtension_X1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+static inline unsigned int
+get_UnaryOpcodeExtension_Y0(tilegx_bundle_bits num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((n >> 12)) & 0x3f);
+}
+
+static inline unsigned int
+get_UnaryOpcodeExtension_Y1(tilegx_bundle_bits n)
+{
+ return (((unsigned int)(n >> 43)) & 0x3f);
+}
+
+
+static inline int
+sign_extend(int n, int num_bits)
+{
+ int shift = (int)(sizeof(int) * 8 - num_bits);
+ return (n << shift) >> shift;
+}
+
+
+
+static inline tilegx_bundle_bits
+create_BFEnd_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_BFOpcodeExtension_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0xf) << 24);
+}
+
+static inline tilegx_bundle_bits
+create_BFStart_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 18);
+}
+
+static inline tilegx_bundle_bits
+create_BrOff_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+ (((tilegx_bundle_bits)(n & 0x0001ffc0)) << 37);
+}
+
+static inline tilegx_bundle_bits
+create_BrType_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x1f)) << 54);
+}
+
+static inline tilegx_bundle_bits
+create_Dest_Imm8_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+ (((tilegx_bundle_bits)(n & 0x000000c0)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_Dest_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 0);
+}
+
+static inline tilegx_bundle_bits
+create_Dest_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
+}
+
+static inline tilegx_bundle_bits
+create_Dest_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 0);
+}
+
+static inline tilegx_bundle_bits
+create_Dest_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 31);
+}
+
+static inline tilegx_bundle_bits
+create_Imm16_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0xffff) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_Imm16_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0xffff)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0xff) << 20);
+}
+
+static inline tilegx_bundle_bits
+create_Imm8OpcodeExtension_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0xff)) << 51);
+}
+
+static inline tilegx_bundle_bits
+create_Imm8_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0xff) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_Imm8_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0xff)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_Imm8_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0xff) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_Imm8_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0xff)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_JumpOff_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x7ffffff)) << 31);
+}
+
+static inline tilegx_bundle_bits
+create_JumpOpcodeExtension_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x1)) << 58);
+}
+
+static inline tilegx_bundle_bits
+create_MF_Imm14_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3fff)) << 37);
+}
+
+static inline tilegx_bundle_bits
+create_MT_Imm14_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x0000003f)) << 31) |
+ (((tilegx_bundle_bits)(n & 0x00003fc0)) << 37);
+}
+
+static inline tilegx_bundle_bits
+create_Mode(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3)) << 62);
+}
+
+static inline tilegx_bundle_bits
+create_Opcode_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x7) << 28);
+}
+
+static inline tilegx_bundle_bits
+create_Opcode_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x7)) << 59);
+}
+
+static inline tilegx_bundle_bits
+create_Opcode_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0xf) << 27);
+}
+
+static inline tilegx_bundle_bits
+create_Opcode_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0xf)) << 58);
+}
+
+static inline tilegx_bundle_bits
+create_Opcode_Y2(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x00000001) << 26) |
+ (((tilegx_bundle_bits)(n & 0x00000002)) << 56);
+}
+
+static inline tilegx_bundle_bits
+create_RRROpcodeExtension_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3ff) << 18);
+}
+
+static inline tilegx_bundle_bits
+create_RRROpcodeExtension_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static inline tilegx_bundle_bits
+create_RRROpcodeExtension_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3) << 18);
+}
+
+static inline tilegx_bundle_bits
+create_RRROpcodeExtension_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static inline tilegx_bundle_bits
+create_ShAmt_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_ShAmt_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_ShAmt_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_ShAmt_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3ff) << 18);
+}
+
+static inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3ff)) << 49);
+}
+
+static inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3) << 18);
+}
+
+static inline tilegx_bundle_bits
+create_ShiftOpcodeExtension_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3)) << 49);
+}
+
+static inline tilegx_bundle_bits
+create_SrcA_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 6);
+}
+
+static inline tilegx_bundle_bits
+create_SrcA_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
+}
+
+static inline tilegx_bundle_bits
+create_SrcA_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 6);
+}
+
+static inline tilegx_bundle_bits
+create_SrcA_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 37);
+}
+
+static inline tilegx_bundle_bits
+create_SrcA_Y2(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 20);
+}
+
+static inline tilegx_bundle_bits
+create_SrcBDest_Y2(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 51);
+}
+
+static inline tilegx_bundle_bits
+create_SrcB_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_SrcB_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_SrcB_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_SrcB_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_X1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+static inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y0(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return ((n & 0x3f) << 12);
+}
+
+static inline tilegx_bundle_bits
+create_UnaryOpcodeExtension_Y1(int num)
+{
+ const unsigned int n = (unsigned int)num;
+ return (((tilegx_bundle_bits)(n & 0x3f)) << 43);
+}
+
+
+enum
+{
+ ADDI_IMM8_OPCODE_X0 = 1,
+ ADDI_IMM8_OPCODE_X1 = 1,
+ ADDI_OPCODE_Y0 = 0,
+ ADDI_OPCODE_Y1 = 1,
+ ADDLI_OPCODE_X0 = 1,
+ ADDLI_OPCODE_X1 = 0,
+ ADDXI_IMM8_OPCODE_X0 = 2,
+ ADDXI_IMM8_OPCODE_X1 = 2,
+ ADDXI_OPCODE_Y0 = 1,
+ ADDXI_OPCODE_Y1 = 2,
+ ADDXLI_OPCODE_X0 = 2,
+ ADDXLI_OPCODE_X1 = 1,
+ ADDXSC_RRR_0_OPCODE_X0 = 1,
+ ADDXSC_RRR_0_OPCODE_X1 = 1,
+ ADDX_RRR_0_OPCODE_X0 = 2,
+ ADDX_RRR_0_OPCODE_X1 = 2,
+ ADDX_RRR_0_OPCODE_Y0 = 0,
+ ADDX_RRR_0_OPCODE_Y1 = 0,
+ ADD_RRR_0_OPCODE_X0 = 3,
+ ADD_RRR_0_OPCODE_X1 = 3,
+ ADD_RRR_0_OPCODE_Y0 = 1,
+ ADD_RRR_0_OPCODE_Y1 = 1,
+ ANDI_IMM8_OPCODE_X0 = 3,
+ ANDI_IMM8_OPCODE_X1 = 3,
+ ANDI_OPCODE_Y0 = 2,
+ ANDI_OPCODE_Y1 = 3,
+ AND_RRR_0_OPCODE_X0 = 4,
+ AND_RRR_0_OPCODE_X1 = 4,
+ AND_RRR_5_OPCODE_Y0 = 0,
+ AND_RRR_5_OPCODE_Y1 = 0,
+ BEQZT_BRANCH_OPCODE_X1 = 16,
+ BEQZ_BRANCH_OPCODE_X1 = 17,
+ BFEXTS_BF_OPCODE_X0 = 4,
+ BFEXTU_BF_OPCODE_X0 = 5,
+ BFINS_BF_OPCODE_X0 = 6,
+ BF_OPCODE_X0 = 3,
+ BGEZT_BRANCH_OPCODE_X1 = 18,
+ BGEZ_BRANCH_OPCODE_X1 = 19,
+ BGTZT_BRANCH_OPCODE_X1 = 20,
+ BGTZ_BRANCH_OPCODE_X1 = 21,
+ BLBCT_BRANCH_OPCODE_X1 = 22,
+ BLBC_BRANCH_OPCODE_X1 = 23,
+ BLBST_BRANCH_OPCODE_X1 = 24,
+ BLBS_BRANCH_OPCODE_X1 = 25,
+ BLEZT_BRANCH_OPCODE_X1 = 26,
+ BLEZ_BRANCH_OPCODE_X1 = 27,
+ BLTZT_BRANCH_OPCODE_X1 = 28,
+ BLTZ_BRANCH_OPCODE_X1 = 29,
+ BNEZT_BRANCH_OPCODE_X1 = 30,
+ BNEZ_BRANCH_OPCODE_X1 = 31,
+ BRANCH_OPCODE_X1 = 2,
+ CMOVEQZ_RRR_0_OPCODE_X0 = 5,
+ CMOVEQZ_RRR_4_OPCODE_Y0 = 0,
+ CMOVNEZ_RRR_0_OPCODE_X0 = 6,
+ CMOVNEZ_RRR_4_OPCODE_Y0 = 1,
+ CMPEQI_IMM8_OPCODE_X0 = 4,
+ CMPEQI_IMM8_OPCODE_X1 = 4,
+ CMPEQI_OPCODE_Y0 = 3,
+ CMPEQI_OPCODE_Y1 = 4,
+ CMPEQ_RRR_0_OPCODE_X0 = 7,
+ CMPEQ_RRR_0_OPCODE_X1 = 5,
+ CMPEQ_RRR_3_OPCODE_Y0 = 0,
+ CMPEQ_RRR_3_OPCODE_Y1 = 2,
+ CMPEXCH4_RRR_0_OPCODE_X1 = 6,
+ CMPEXCH_RRR_0_OPCODE_X1 = 7,
+ CMPLES_RRR_0_OPCODE_X0 = 8,
+ CMPLES_RRR_0_OPCODE_X1 = 8,
+ CMPLES_RRR_2_OPCODE_Y0 = 0,
+ CMPLES_RRR_2_OPCODE_Y1 = 0,
+ CMPLEU_RRR_0_OPCODE_X0 = 9,
+ CMPLEU_RRR_0_OPCODE_X1 = 9,
+ CMPLEU_RRR_2_OPCODE_Y0 = 1,
+ CMPLEU_RRR_2_OPCODE_Y1 = 1,
+ CMPLTSI_IMM8_OPCODE_X0 = 5,
+ CMPLTSI_IMM8_OPCODE_X1 = 5,
+ CMPLTSI_OPCODE_Y0 = 4,
+ CMPLTSI_OPCODE_Y1 = 5,
+ CMPLTS_RRR_0_OPCODE_X0 = 10,
+ CMPLTS_RRR_0_OPCODE_X1 = 10,
+ CMPLTS_RRR_2_OPCODE_Y0 = 2,
+ CMPLTS_RRR_2_OPCODE_Y1 = 2,
+ CMPLTUI_IMM8_OPCODE_X0 = 6,
+ CMPLTUI_IMM8_OPCODE_X1 = 6,
+ CMPLTU_RRR_0_OPCODE_X0 = 11,
+ CMPLTU_RRR_0_OPCODE_X1 = 11,
+ CMPLTU_RRR_2_OPCODE_Y0 = 3,
+ CMPLTU_RRR_2_OPCODE_Y1 = 3,
+ CMPNE_RRR_0_OPCODE_X0 = 12,
+ CMPNE_RRR_0_OPCODE_X1 = 12,
+ CMPNE_RRR_3_OPCODE_Y0 = 1,
+ CMPNE_RRR_3_OPCODE_Y1 = 3,
+ CMULAF_RRR_0_OPCODE_X0 = 13,
+ CMULA_RRR_0_OPCODE_X0 = 14,
+ CMULFR_RRR_0_OPCODE_X0 = 15,
+ CMULF_RRR_0_OPCODE_X0 = 16,
+ CMULHR_RRR_0_OPCODE_X0 = 17,
+ CMULH_RRR_0_OPCODE_X0 = 18,
+ CMUL_RRR_0_OPCODE_X0 = 19,
+ CNTLZ_UNARY_OPCODE_X0 = 1,
+ CNTLZ_UNARY_OPCODE_Y0 = 1,
+ CNTTZ_UNARY_OPCODE_X0 = 2,
+ CNTTZ_UNARY_OPCODE_Y0 = 2,
+ CRC32_32_RRR_0_OPCODE_X0 = 20,
+ CRC32_8_RRR_0_OPCODE_X0 = 21,
+ DBLALIGN2_RRR_0_OPCODE_X0 = 22,
+ DBLALIGN2_RRR_0_OPCODE_X1 = 13,
+ DBLALIGN4_RRR_0_OPCODE_X0 = 23,
+ DBLALIGN4_RRR_0_OPCODE_X1 = 14,
+ DBLALIGN6_RRR_0_OPCODE_X0 = 24,
+ DBLALIGN6_RRR_0_OPCODE_X1 = 15,
+ DBLALIGN_RRR_0_OPCODE_X0 = 25,
+ DRAIN_UNARY_OPCODE_X1 = 1,
+ DTLBPR_UNARY_OPCODE_X1 = 2,
+ EXCH4_RRR_0_OPCODE_X1 = 16,
+ EXCH_RRR_0_OPCODE_X1 = 17,
+ FDOUBLE_ADDSUB_RRR_0_OPCODE_X0 = 26,
+ FDOUBLE_ADD_FLAGS_RRR_0_OPCODE_X0 = 27,
+ FDOUBLE_MUL_FLAGS_RRR_0_OPCODE_X0 = 28,
+ FDOUBLE_PACK1_RRR_0_OPCODE_X0 = 29,
+ FDOUBLE_PACK2_RRR_0_OPCODE_X0 = 30,
+ FDOUBLE_SUB_FLAGS_RRR_0_OPCODE_X0 = 31,
+ FDOUBLE_UNPACK_MAX_RRR_0_OPCODE_X0 = 32,
+ FDOUBLE_UNPACK_MIN_RRR_0_OPCODE_X0 = 33,
+ FETCHADD4_RRR_0_OPCODE_X1 = 18,
+ FETCHADDGEZ4_RRR_0_OPCODE_X1 = 19,
+ FETCHADDGEZ_RRR_0_OPCODE_X1 = 20,
+ FETCHADD_RRR_0_OPCODE_X1 = 21,
+ FETCHAND4_RRR_0_OPCODE_X1 = 22,
+ FETCHAND_RRR_0_OPCODE_X1 = 23,
+ FETCHOR4_RRR_0_OPCODE_X1 = 24,
+ FETCHOR_RRR_0_OPCODE_X1 = 25,
+ FINV_UNARY_OPCODE_X1 = 3,
+ FLUSHWB_UNARY_OPCODE_X1 = 4,
+ FLUSH_UNARY_OPCODE_X1 = 5,
+ FNOP_UNARY_OPCODE_X0 = 3,
+ FNOP_UNARY_OPCODE_X1 = 6,
+ FNOP_UNARY_OPCODE_Y0 = 3,
+ FNOP_UNARY_OPCODE_Y1 = 8,
+ FSINGLE_ADD1_RRR_0_OPCODE_X0 = 34,
+ FSINGLE_ADDSUB2_RRR_0_OPCODE_X0 = 35,
+ FSINGLE_MUL1_RRR_0_OPCODE_X0 = 36,
+ FSINGLE_MUL2_RRR_0_OPCODE_X0 = 37,
+ FSINGLE_PACK1_UNARY_OPCODE_X0 = 4,
+ FSINGLE_PACK1_UNARY_OPCODE_Y0 = 4,
+ FSINGLE_PACK2_RRR_0_OPCODE_X0 = 38,
+ FSINGLE_SUB1_RRR_0_OPCODE_X0 = 39,
+ ICOH_UNARY_OPCODE_X1 = 7,
+ ILL_UNARY_OPCODE_X1 = 8,
+ ILL_UNARY_OPCODE_Y1 = 9,
+ IMM8_OPCODE_X0 = 4,
+ IMM8_OPCODE_X1 = 3,
+ INV_UNARY_OPCODE_X1 = 9,
+ IRET_UNARY_OPCODE_X1 = 10,
+ JALRP_UNARY_OPCODE_X1 = 11,
+ JALRP_UNARY_OPCODE_Y1 = 10,
+ JALR_UNARY_OPCODE_X1 = 12,
+ JALR_UNARY_OPCODE_Y1 = 11,
+ JAL_JUMP_OPCODE_X1 = 0,
+ JRP_UNARY_OPCODE_X1 = 13,
+ JRP_UNARY_OPCODE_Y1 = 12,
+ JR_UNARY_OPCODE_X1 = 14,
+ JR_UNARY_OPCODE_Y1 = 13,
+ JUMP_OPCODE_X1 = 4,
+ J_JUMP_OPCODE_X1 = 1,
+ LD1S_ADD_IMM8_OPCODE_X1 = 7,
+ LD1S_OPCODE_Y2 = 0,
+ LD1S_UNARY_OPCODE_X1 = 15,
+ LD1U_ADD_IMM8_OPCODE_X1 = 8,
+ LD1U_OPCODE_Y2 = 1,
+ LD1U_UNARY_OPCODE_X1 = 16,
+ LD2S_ADD_IMM8_OPCODE_X1 = 9,
+ LD2S_OPCODE_Y2 = 2,
+ LD2S_UNARY_OPCODE_X1 = 17,
+ LD2U_ADD_IMM8_OPCODE_X1 = 10,
+ LD2U_OPCODE_Y2 = 3,
+ LD2U_UNARY_OPCODE_X1 = 18,
+ LD4S_ADD_IMM8_OPCODE_X1 = 11,
+ LD4S_OPCODE_Y2 = 1,
+ LD4S_UNARY_OPCODE_X1 = 19,
+ LD4U_ADD_IMM8_OPCODE_X1 = 12,
+ LD4U_OPCODE_Y2 = 2,
+ LD4U_UNARY_OPCODE_X1 = 20,
+ LDNA_UNARY_OPCODE_X1 = 21,
+ LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
+ LDNT1S_UNARY_OPCODE_X1 = 22,
+ LDNT1U_ADD_IMM8_OPCODE_X1 = 14,
+ LDNT1U_UNARY_OPCODE_X1 = 23,
+ LDNT2S_ADD_IMM8_OPCODE_X1 = 15,
+ LDNT2S_UNARY_OPCODE_X1 = 24,
+ LDNT2U_ADD_IMM8_OPCODE_X1 = 16,
+ LDNT2U_UNARY_OPCODE_X1 = 25,
+ LDNT4S_ADD_IMM8_OPCODE_X1 = 17,
+ LDNT4S_UNARY_OPCODE_X1 = 26,
+ LDNT4U_ADD_IMM8_OPCODE_X1 = 18,
+ LDNT4U_UNARY_OPCODE_X1 = 27,
+ LDNT_ADD_IMM8_OPCODE_X1 = 19,
+ LDNT_UNARY_OPCODE_X1 = 28,
+ LD_ADD_IMM8_OPCODE_X1 = 20,
+ LD_OPCODE_Y2 = 3,
+ LD_UNARY_OPCODE_X1 = 29,
+ LNK_UNARY_OPCODE_X1 = 30,
+ LNK_UNARY_OPCODE_Y1 = 14,
+ LDNA_ADD_IMM8_OPCODE_X1 = 21,
+ MFSPR_IMM8_OPCODE_X1 = 22,
+ MF_UNARY_OPCODE_X1 = 31,
+ MM_BF_OPCODE_X0 = 7,
+ MNZ_RRR_0_OPCODE_X0 = 40,
+ MNZ_RRR_0_OPCODE_X1 = 26,
+ MNZ_RRR_4_OPCODE_Y0 = 2,
+ MNZ_RRR_4_OPCODE_Y1 = 2,
+ MODE_OPCODE_YA2 = 1,
+ MODE_OPCODE_YB2 = 2,
+ MODE_OPCODE_YC2 = 3,
+ MTSPR_IMM8_OPCODE_X1 = 23,
+ MULAX_RRR_0_OPCODE_X0 = 41,
+ MULAX_RRR_3_OPCODE_Y0 = 2,
+ MULA_HS_HS_RRR_0_OPCODE_X0 = 42,
+ MULA_HS_HS_RRR_9_OPCODE_Y0 = 0,
+ MULA_HS_HU_RRR_0_OPCODE_X0 = 43,
+ MULA_HS_LS_RRR_0_OPCODE_X0 = 44,
+ MULA_HS_LU_RRR_0_OPCODE_X0 = 45,
+ MULA_HU_HU_RRR_0_OPCODE_X0 = 46,
+ MULA_HU_HU_RRR_9_OPCODE_Y0 = 1,
+ MULA_HU_LS_RRR_0_OPCODE_X0 = 47,
+ MULA_HU_LU_RRR_0_OPCODE_X0 = 48,
+ MULA_LS_LS_RRR_0_OPCODE_X0 = 49,
+ MULA_LS_LS_RRR_9_OPCODE_Y0 = 2,
+ MULA_LS_LU_RRR_0_OPCODE_X0 = 50,
+ MULA_LU_LU_RRR_0_OPCODE_X0 = 51,
+ MULA_LU_LU_RRR_9_OPCODE_Y0 = 3,
+ MULX_RRR_0_OPCODE_X0 = 52,
+ MULX_RRR_3_OPCODE_Y0 = 3,
+ MUL_HS_HS_RRR_0_OPCODE_X0 = 53,
+ MUL_HS_HS_RRR_8_OPCODE_Y0 = 0,
+ MUL_HS_HU_RRR_0_OPCODE_X0 = 54,
+ MUL_HS_LS_RRR_0_OPCODE_X0 = 55,
+ MUL_HS_LU_RRR_0_OPCODE_X0 = 56,
+ MUL_HU_HU_RRR_0_OPCODE_X0 = 57,
+ MUL_HU_HU_RRR_8_OPCODE_Y0 = 1,
+ MUL_HU_LS_RRR_0_OPCODE_X0 = 58,
+ MUL_HU_LU_RRR_0_OPCODE_X0 = 59,
+ MUL_LS_LS_RRR_0_OPCODE_X0 = 60,
+ MUL_LS_LS_RRR_8_OPCODE_Y0 = 2,
+ MUL_LS_LU_RRR_0_OPCODE_X0 = 61,
+ MUL_LU_LU_RRR_0_OPCODE_X0 = 62,
+ MUL_LU_LU_RRR_8_OPCODE_Y0 = 3,
+ MZ_RRR_0_OPCODE_X0 = 63,
+ MZ_RRR_0_OPCODE_X1 = 27,
+ MZ_RRR_4_OPCODE_Y0 = 3,
+ MZ_RRR_4_OPCODE_Y1 = 3,
+ NAP_UNARY_OPCODE_X1 = 32,
+ NOP_UNARY_OPCODE_X0 = 5,
+ NOP_UNARY_OPCODE_X1 = 33,
+ NOP_UNARY_OPCODE_Y0 = 5,
+ NOP_UNARY_OPCODE_Y1 = 15,
+ NOR_RRR_0_OPCODE_X0 = 64,
+ NOR_RRR_0_OPCODE_X1 = 28,
+ NOR_RRR_5_OPCODE_Y0 = 1,
+ NOR_RRR_5_OPCODE_Y1 = 1,
+ ORI_IMM8_OPCODE_X0 = 7,
+ ORI_IMM8_OPCODE_X1 = 24,
+ OR_RRR_0_OPCODE_X0 = 65,
+ OR_RRR_0_OPCODE_X1 = 29,
+ OR_RRR_5_OPCODE_Y0 = 2,
+ OR_RRR_5_OPCODE_Y1 = 2,
+ PCNT_UNARY_OPCODE_X0 = 6,
+ PCNT_UNARY_OPCODE_Y0 = 6,
+ REVBITS_UNARY_OPCODE_X0 = 7,
+ REVBITS_UNARY_OPCODE_Y0 = 7,
+ REVBYTES_UNARY_OPCODE_X0 = 8,
+ REVBYTES_UNARY_OPCODE_Y0 = 8,
+ ROTLI_SHIFT_OPCODE_X0 = 1,
+ ROTLI_SHIFT_OPCODE_X1 = 1,
+ ROTLI_SHIFT_OPCODE_Y0 = 0,
+ ROTLI_SHIFT_OPCODE_Y1 = 0,
+ ROTL_RRR_0_OPCODE_X0 = 66,
+ ROTL_RRR_0_OPCODE_X1 = 30,
+ ROTL_RRR_6_OPCODE_Y0 = 0,
+ ROTL_RRR_6_OPCODE_Y1 = 0,
+ RRR_0_OPCODE_X0 = 5,
+ RRR_0_OPCODE_X1 = 5,
+ RRR_0_OPCODE_Y0 = 5,
+ RRR_0_OPCODE_Y1 = 6,
+ RRR_1_OPCODE_Y0 = 6,
+ RRR_1_OPCODE_Y1 = 7,
+ RRR_2_OPCODE_Y0 = 7,
+ RRR_2_OPCODE_Y1 = 8,
+ RRR_3_OPCODE_Y0 = 8,
+ RRR_3_OPCODE_Y1 = 9,
+ RRR_4_OPCODE_Y0 = 9,
+ RRR_4_OPCODE_Y1 = 10,
+ RRR_5_OPCODE_Y0 = 10,
+ RRR_5_OPCODE_Y1 = 11,
+ RRR_6_OPCODE_Y0 = 11,
+ RRR_6_OPCODE_Y1 = 12,
+ RRR_7_OPCODE_Y0 = 12,
+ RRR_7_OPCODE_Y1 = 13,
+ RRR_8_OPCODE_Y0 = 13,
+ RRR_9_OPCODE_Y0 = 14,
+ SHIFT_OPCODE_X0 = 6,
+ SHIFT_OPCODE_X1 = 6,
+ SHIFT_OPCODE_Y0 = 15,
+ SHIFT_OPCODE_Y1 = 14,
+ SHL16INSLI_OPCODE_X0 = 7,
+ SHL16INSLI_OPCODE_X1 = 7,
+ SHL1ADDX_RRR_0_OPCODE_X0 = 67,
+ SHL1ADDX_RRR_0_OPCODE_X1 = 31,
+ SHL1ADDX_RRR_7_OPCODE_Y0 = 1,
+ SHL1ADDX_RRR_7_OPCODE_Y1 = 1,
+ SHL1ADD_RRR_0_OPCODE_X0 = 68,
+ SHL1ADD_RRR_0_OPCODE_X1 = 32,
+ SHL1ADD_RRR_1_OPCODE_Y0 = 0,
+ SHL1ADD_RRR_1_OPCODE_Y1 = 0,
+ SHL2ADDX_RRR_0_OPCODE_X0 = 69,
+ SHL2ADDX_RRR_0_OPCODE_X1 = 33,
+ SHL2ADDX_RRR_7_OPCODE_Y0 = 2,
+ SHL2ADDX_RRR_7_OPCODE_Y1 = 2,
+ SHL2ADD_RRR_0_OPCODE_X0 = 70,
+ SHL2ADD_RRR_0_OPCODE_X1 = 34,
+ SHL2ADD_RRR_1_OPCODE_Y0 = 1,
+ SHL2ADD_RRR_1_OPCODE_Y1 = 1,
+ SHL3ADDX_RRR_0_OPCODE_X0 = 71,
+ SHL3ADDX_RRR_0_OPCODE_X1 = 35,
+ SHL3ADDX_RRR_7_OPCODE_Y0 = 3,
+ SHL3ADDX_RRR_7_OPCODE_Y1 = 3,
+ SHL3ADD_RRR_0_OPCODE_X0 = 72,
+ SHL3ADD_RRR_0_OPCODE_X1 = 36,
+ SHL3ADD_RRR_1_OPCODE_Y0 = 2,
+ SHL3ADD_RRR_1_OPCODE_Y1 = 2,
+ SHLI_SHIFT_OPCODE_X0 = 2,
+ SHLI_SHIFT_OPCODE_X1 = 2,
+ SHLI_SHIFT_OPCODE_Y0 = 1,
+ SHLI_SHIFT_OPCODE_Y1 = 1,
+ SHLXI_SHIFT_OPCODE_X0 = 3,
+ SHLXI_SHIFT_OPCODE_X1 = 3,
+ SHLX_RRR_0_OPCODE_X0 = 73,
+ SHLX_RRR_0_OPCODE_X1 = 37,
+ SHL_RRR_0_OPCODE_X0 = 74,
+ SHL_RRR_0_OPCODE_X1 = 38,
+ SHL_RRR_6_OPCODE_Y0 = 1,
+ SHL_RRR_6_OPCODE_Y1 = 1,
+ SHRSI_SHIFT_OPCODE_X0 = 4,
+ SHRSI_SHIFT_OPCODE_X1 = 4,
+ SHRSI_SHIFT_OPCODE_Y0 = 2,
+ SHRSI_SHIFT_OPCODE_Y1 = 2,
+ SHRS_RRR_0_OPCODE_X0 = 75,
+ SHRS_RRR_0_OPCODE_X1 = 39,
+ SHRS_RRR_6_OPCODE_Y0 = 2,
+ SHRS_RRR_6_OPCODE_Y1 = 2,
+ SHRUI_SHIFT_OPCODE_X0 = 5,
+ SHRUI_SHIFT_OPCODE_X1 = 5,
+ SHRUI_SHIFT_OPCODE_Y0 = 3,
+ SHRUI_SHIFT_OPCODE_Y1 = 3,
+ SHRUXI_SHIFT_OPCODE_X0 = 6,
+ SHRUXI_SHIFT_OPCODE_X1 = 6,
+ SHRUX_RRR_0_OPCODE_X0 = 76,
+ SHRUX_RRR_0_OPCODE_X1 = 40,
+ SHRU_RRR_0_OPCODE_X0 = 77,
+ SHRU_RRR_0_OPCODE_X1 = 41,
+ SHRU_RRR_6_OPCODE_Y0 = 3,
+ SHRU_RRR_6_OPCODE_Y1 = 3,
+ SHUFFLEBYTES_RRR_0_OPCODE_X0 = 78,
+ ST1_ADD_IMM8_OPCODE_X1 = 25,
+ ST1_OPCODE_Y2 = 0,
+ ST1_RRR_0_OPCODE_X1 = 42,
+ ST2_ADD_IMM8_OPCODE_X1 = 26,
+ ST2_OPCODE_Y2 = 1,
+ ST2_RRR_0_OPCODE_X1 = 43,
+ ST4_ADD_IMM8_OPCODE_X1 = 27,
+ ST4_OPCODE_Y2 = 2,
+ ST4_RRR_0_OPCODE_X1 = 44,
+ STNT1_ADD_IMM8_OPCODE_X1 = 28,
+ STNT1_RRR_0_OPCODE_X1 = 45,
+ STNT2_ADD_IMM8_OPCODE_X1 = 29,
+ STNT2_RRR_0_OPCODE_X1 = 46,
+ STNT4_ADD_IMM8_OPCODE_X1 = 30,
+ STNT4_RRR_0_OPCODE_X1 = 47,
+ STNT_ADD_IMM8_OPCODE_X1 = 31,
+ STNT_RRR_0_OPCODE_X1 = 48,
+ ST_ADD_IMM8_OPCODE_X1 = 32,
+ ST_OPCODE_Y2 = 3,
+ ST_RRR_0_OPCODE_X1 = 49,
+ SUBXSC_RRR_0_OPCODE_X0 = 79,
+ SUBXSC_RRR_0_OPCODE_X1 = 50,
+ SUBX_RRR_0_OPCODE_X0 = 80,
+ SUBX_RRR_0_OPCODE_X1 = 51,
+ SUBX_RRR_0_OPCODE_Y0 = 2,
+ SUBX_RRR_0_OPCODE_Y1 = 2,
+ SUB_RRR_0_OPCODE_X0 = 81,
+ SUB_RRR_0_OPCODE_X1 = 52,
+ SUB_RRR_0_OPCODE_Y0 = 3,
+ SUB_RRR_0_OPCODE_Y1 = 3,
+ SWINT0_UNARY_OPCODE_X1 = 34,
+ SWINT1_UNARY_OPCODE_X1 = 35,
+ SWINT2_UNARY_OPCODE_X1 = 36,
+ SWINT3_UNARY_OPCODE_X1 = 37,
+ TBLIDXB0_UNARY_OPCODE_X0 = 9,
+ TBLIDXB0_UNARY_OPCODE_Y0 = 9,
+ TBLIDXB1_UNARY_OPCODE_X0 = 10,
+ TBLIDXB1_UNARY_OPCODE_Y0 = 10,
+ TBLIDXB2_UNARY_OPCODE_X0 = 11,
+ TBLIDXB2_UNARY_OPCODE_Y0 = 11,
+ TBLIDXB3_UNARY_OPCODE_X0 = 12,
+ TBLIDXB3_UNARY_OPCODE_Y0 = 12,
+ UNARY_RRR_0_OPCODE_X0 = 82,
+ UNARY_RRR_0_OPCODE_X1 = 53,
+ UNARY_RRR_1_OPCODE_Y0 = 3,
+ UNARY_RRR_1_OPCODE_Y1 = 3,
+ V1ADDI_IMM8_OPCODE_X0 = 8,
+ V1ADDI_IMM8_OPCODE_X1 = 33,
+ V1ADDUC_RRR_0_OPCODE_X0 = 83,
+ V1ADDUC_RRR_0_OPCODE_X1 = 54,
+ V1ADD_RRR_0_OPCODE_X0 = 84,
+ V1ADD_RRR_0_OPCODE_X1 = 55,
+ V1ADIFFU_RRR_0_OPCODE_X0 = 85,
+ V1AVGU_RRR_0_OPCODE_X0 = 86,
+ V1CMPEQI_IMM8_OPCODE_X0 = 9,
+ V1CMPEQI_IMM8_OPCODE_X1 = 34,
+ V1CMPEQ_RRR_0_OPCODE_X0 = 87,
+ V1CMPEQ_RRR_0_OPCODE_X1 = 56,
+ V1CMPLES_RRR_0_OPCODE_X0 = 88,
+ V1CMPLES_RRR_0_OPCODE_X1 = 57,
+ V1CMPLEU_RRR_0_OPCODE_X0 = 89,
+ V1CMPLEU_RRR_0_OPCODE_X1 = 58,
+ V1CMPLTSI_IMM8_OPCODE_X0 = 10,
+ V1CMPLTSI_IMM8_OPCODE_X1 = 35,
+ V1CMPLTS_RRR_0_OPCODE_X0 = 90,
+ V1CMPLTS_RRR_0_OPCODE_X1 = 59,
+ V1CMPLTUI_IMM8_OPCODE_X0 = 11,
+ V1CMPLTUI_IMM8_OPCODE_X1 = 36,
+ V1CMPLTU_RRR_0_OPCODE_X0 = 91,
+ V1CMPLTU_RRR_0_OPCODE_X1 = 60,
+ V1CMPNE_RRR_0_OPCODE_X0 = 92,
+ V1CMPNE_RRR_0_OPCODE_X1 = 61,
+ V1DDOTPUA_RRR_0_OPCODE_X0 = 161,
+ V1DDOTPUSA_RRR_0_OPCODE_X0 = 93,
+ V1DDOTPUS_RRR_0_OPCODE_X0 = 94,
+ V1DDOTPU_RRR_0_OPCODE_X0 = 162,
+ V1DOTPA_RRR_0_OPCODE_X0 = 95,
+ V1DOTPUA_RRR_0_OPCODE_X0 = 163,
+ V1DOTPUSA_RRR_0_OPCODE_X0 = 96,
+ V1DOTPUS_RRR_0_OPCODE_X0 = 97,
+ V1DOTPU_RRR_0_OPCODE_X0 = 164,
+ V1DOTP_RRR_0_OPCODE_X0 = 98,
+ V1INT_H_RRR_0_OPCODE_X0 = 99,
+ V1INT_H_RRR_0_OPCODE_X1 = 62,
+ V1INT_L_RRR_0_OPCODE_X0 = 100,
+ V1INT_L_RRR_0_OPCODE_X1 = 63,
+ V1MAXUI_IMM8_OPCODE_X0 = 12,
+ V1MAXUI_IMM8_OPCODE_X1 = 37,
+ V1MAXU_RRR_0_OPCODE_X0 = 101,
+ V1MAXU_RRR_0_OPCODE_X1 = 64,
+ V1MINUI_IMM8_OPCODE_X0 = 13,
+ V1MINUI_IMM8_OPCODE_X1 = 38,
+ V1MINU_RRR_0_OPCODE_X0 = 102,
+ V1MINU_RRR_0_OPCODE_X1 = 65,
+ V1MNZ_RRR_0_OPCODE_X0 = 103,
+ V1MNZ_RRR_0_OPCODE_X1 = 66,
+ V1MULTU_RRR_0_OPCODE_X0 = 104,
+ V1MULUS_RRR_0_OPCODE_X0 = 105,
+ V1MULU_RRR_0_OPCODE_X0 = 106,
+ V1MZ_RRR_0_OPCODE_X0 = 107,
+ V1MZ_RRR_0_OPCODE_X1 = 67,
+ V1SADAU_RRR_0_OPCODE_X0 = 108,
+ V1SADU_RRR_0_OPCODE_X0 = 109,
+ V1SHLI_SHIFT_OPCODE_X0 = 7,
+ V1SHLI_SHIFT_OPCODE_X1 = 7,
+ V1SHL_RRR_0_OPCODE_X0 = 110,
+ V1SHL_RRR_0_OPCODE_X1 = 68,
+ V1SHRSI_SHIFT_OPCODE_X0 = 8,
+ V1SHRSI_SHIFT_OPCODE_X1 = 8,
+ V1SHRS_RRR_0_OPCODE_X0 = 111,
+ V1SHRS_RRR_0_OPCODE_X1 = 69,
+ V1SHRUI_SHIFT_OPCODE_X0 = 9,
+ V1SHRUI_SHIFT_OPCODE_X1 = 9,
+ V1SHRU_RRR_0_OPCODE_X0 = 112,
+ V1SHRU_RRR_0_OPCODE_X1 = 70,
+ V1SUBUC_RRR_0_OPCODE_X0 = 113,
+ V1SUBUC_RRR_0_OPCODE_X1 = 71,
+ V1SUB_RRR_0_OPCODE_X0 = 114,
+ V1SUB_RRR_0_OPCODE_X1 = 72,
+ V2ADDI_IMM8_OPCODE_X0 = 14,
+ V2ADDI_IMM8_OPCODE_X1 = 39,
+ V2ADDSC_RRR_0_OPCODE_X0 = 115,
+ V2ADDSC_RRR_0_OPCODE_X1 = 73,
+ V2ADD_RRR_0_OPCODE_X0 = 116,
+ V2ADD_RRR_0_OPCODE_X1 = 74,
+ V2ADIFFS_RRR_0_OPCODE_X0 = 117,
+ V2AVGS_RRR_0_OPCODE_X0 = 118,
+ V2CMPEQI_IMM8_OPCODE_X0 = 15,
+ V2CMPEQI_IMM8_OPCODE_X1 = 40,
+ V2CMPEQ_RRR_0_OPCODE_X0 = 119,
+ V2CMPEQ_RRR_0_OPCODE_X1 = 75,
+ V2CMPLES_RRR_0_OPCODE_X0 = 120,
+ V2CMPLES_RRR_0_OPCODE_X1 = 76,
+ V2CMPLEU_RRR_0_OPCODE_X0 = 121,
+ V2CMPLEU_RRR_0_OPCODE_X1 = 77,
+ V2CMPLTSI_IMM8_OPCODE_X0 = 16,
+ V2CMPLTSI_IMM8_OPCODE_X1 = 41,
+ V2CMPLTS_RRR_0_OPCODE_X0 = 122,
+ V2CMPLTS_RRR_0_OPCODE_X1 = 78,
+ V2CMPLTUI_IMM8_OPCODE_X0 = 17,
+ V2CMPLTUI_IMM8_OPCODE_X1 = 42,
+ V2CMPLTU_RRR_0_OPCODE_X0 = 123,
+ V2CMPLTU_RRR_0_OPCODE_X1 = 79,
+ V2CMPNE_RRR_0_OPCODE_X0 = 124,
+ V2CMPNE_RRR_0_OPCODE_X1 = 80,
+ V2DOTPA_RRR_0_OPCODE_X0 = 125,
+ V2DOTP_RRR_0_OPCODE_X0 = 126,
+ V2INT_H_RRR_0_OPCODE_X0 = 127,
+ V2INT_H_RRR_0_OPCODE_X1 = 81,
+ V2INT_L_RRR_0_OPCODE_X0 = 128,
+ V2INT_L_RRR_0_OPCODE_X1 = 82,
+ V2MAXSI_IMM8_OPCODE_X0 = 18,
+ V2MAXSI_IMM8_OPCODE_X1 = 43,
+ V2MAXS_RRR_0_OPCODE_X0 = 129,
+ V2MAXS_RRR_0_OPCODE_X1 = 83,
+ V2MINSI_IMM8_OPCODE_X0 = 19,
+ V2MINSI_IMM8_OPCODE_X1 = 44,
+ V2MINS_RRR_0_OPCODE_X0 = 130,
+ V2MINS_RRR_0_OPCODE_X1 = 84,
+ V2MNZ_RRR_0_OPCODE_X0 = 131,
+ V2MNZ_RRR_0_OPCODE_X1 = 85,
+ V2MULFSC_RRR_0_OPCODE_X0 = 132,
+ V2MULS_RRR_0_OPCODE_X0 = 133,
+ V2MULTS_RRR_0_OPCODE_X0 = 134,
+ V2MZ_RRR_0_OPCODE_X0 = 135,
+ V2MZ_RRR_0_OPCODE_X1 = 86,
+ V2PACKH_RRR_0_OPCODE_X0 = 136,
+ V2PACKH_RRR_0_OPCODE_X1 = 87,
+ V2PACKL_RRR_0_OPCODE_X0 = 137,
+ V2PACKL_RRR_0_OPCODE_X1 = 88,
+ V2PACKUC_RRR_0_OPCODE_X0 = 138,
+ V2PACKUC_RRR_0_OPCODE_X1 = 89,
+ V2SADAS_RRR_0_OPCODE_X0 = 139,
+ V2SADAU_RRR_0_OPCODE_X0 = 140,
+ V2SADS_RRR_0_OPCODE_X0 = 141,
+ V2SADU_RRR_0_OPCODE_X0 = 142,
+ V2SHLI_SHIFT_OPCODE_X0 = 10,
+ V2SHLI_SHIFT_OPCODE_X1 = 10,
+ V2SHLSC_RRR_0_OPCODE_X0 = 143,
+ V2SHLSC_RRR_0_OPCODE_X1 = 90,
+ V2SHL_RRR_0_OPCODE_X0 = 144,
+ V2SHL_RRR_0_OPCODE_X1 = 91,
+ V2SHRSI_SHIFT_OPCODE_X0 = 11,
+ V2SHRSI_SHIFT_OPCODE_X1 = 11,
+ V2SHRS_RRR_0_OPCODE_X0 = 145,
+ V2SHRS_RRR_0_OPCODE_X1 = 92,
+ V2SHRUI_SHIFT_OPCODE_X0 = 12,
+ V2SHRUI_SHIFT_OPCODE_X1 = 12,
+ V2SHRU_RRR_0_OPCODE_X0 = 146,
+ V2SHRU_RRR_0_OPCODE_X1 = 93,
+ V2SUBSC_RRR_0_OPCODE_X0 = 147,
+ V2SUBSC_RRR_0_OPCODE_X1 = 94,
+ V2SUB_RRR_0_OPCODE_X0 = 148,
+ V2SUB_RRR_0_OPCODE_X1 = 95,
+ V4ADDSC_RRR_0_OPCODE_X0 = 149,
+ V4ADDSC_RRR_0_OPCODE_X1 = 96,
+ V4ADD_RRR_0_OPCODE_X0 = 150,
+ V4ADD_RRR_0_OPCODE_X1 = 97,
+ V4INT_H_RRR_0_OPCODE_X0 = 151,
+ V4INT_H_RRR_0_OPCODE_X1 = 98,
+ V4INT_L_RRR_0_OPCODE_X0 = 152,
+ V4INT_L_RRR_0_OPCODE_X1 = 99,
+ V4PACKSC_RRR_0_OPCODE_X0 = 153,
+ V4PACKSC_RRR_0_OPCODE_X1 = 100,
+ V4SHLSC_RRR_0_OPCODE_X0 = 154,
+ V4SHLSC_RRR_0_OPCODE_X1 = 101,
+ V4SHL_RRR_0_OPCODE_X0 = 155,
+ V4SHL_RRR_0_OPCODE_X1 = 102,
+ V4SHRS_RRR_0_OPCODE_X0 = 156,
+ V4SHRS_RRR_0_OPCODE_X1 = 103,
+ V4SHRU_RRR_0_OPCODE_X0 = 157,
+ V4SHRU_RRR_0_OPCODE_X1 = 104,
+ V4SUBSC_RRR_0_OPCODE_X0 = 158,
+ V4SUBSC_RRR_0_OPCODE_X1 = 105,
+ V4SUB_RRR_0_OPCODE_X0 = 159,
+ V4SUB_RRR_0_OPCODE_X1 = 106,
+ WH64_UNARY_OPCODE_X1 = 38,
+ XORI_IMM8_OPCODE_X0 = 20,
+ XORI_IMM8_OPCODE_X1 = 45,
+ XOR_RRR_0_OPCODE_X0 = 160,
+ XOR_RRR_0_OPCODE_X1 = 107,
+ XOR_RRR_5_OPCODE_Y0 = 3,
+ XOR_RRR_5_OPCODE_Y1 = 3
+};
+
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* OPCODE_TILEGX_H */
diff --git a/target/tilegx/simd_helper.c b/target/tilegx/simd_helper.c
new file mode 100644
index 0000000000..2d40ddb63e
--- /dev/null
+++ b/target/tilegx/simd_helper.c
@@ -0,0 +1,166 @@
+/*
+ * QEMU TILE-Gx helpers
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "exec/helper-proto.h"
+
+
+/* Broadcast a value to all elements of a vector. */
+#define V1(X) (((X) & 0xff) * 0x0101010101010101ull)
+#define V2(X) (((X) & 0xffff) * 0x0001000100010001ull)
+
+
+uint64_t helper_v1multu(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 64; i += 8) {
+ unsigned ae = extract64(a, i, 8);
+ unsigned be = extract64(b, i, 8);
+ r = deposit64(r, i, 8, ae * be);
+ }
+ return r;
+}
+
+uint64_t helper_v2mults(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ /* While the instruction talks about signed inputs, with a
+ truncated result the sign of the inputs doesn't matter. */
+ for (i = 0; i < 64; i += 16) {
+ unsigned ae = extract64(a, i, 16);
+ unsigned be = extract64(b, i, 16);
+ r = deposit64(r, i, 16, ae * be);
+ }
+ return r;
+}
+
+uint64_t helper_v1shl(uint64_t a, uint64_t b)
+{
+ uint64_t m;
+
+ b &= 7;
+ m = V1(0xff >> b);
+ return (a & m) << b;
+}
+
+uint64_t helper_v2shl(uint64_t a, uint64_t b)
+{
+ uint64_t m;
+
+ b &= 15;
+ m = V2(0xffff >> b);
+ return (a & m) << b;
+}
+
+uint64_t helper_v1shru(uint64_t a, uint64_t b)
+{
+ uint64_t m;
+
+ b &= 7;
+ m = V1(0xff << b);
+ return (a & m) >> b;
+}
+
+uint64_t helper_v2shru(uint64_t a, uint64_t b)
+{
+ uint64_t m;
+
+ b &= 15;
+ m = V2(0xffff << b);
+ return (a & m) >> b;
+}
+
+uint64_t helper_v1shrs(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ b &= 7;
+ for (i = 0; i < 64; i += 8) {
+ r = deposit64(r, i, 8, sextract64(a, i + b, 8 - b));
+ }
+ return r;
+}
+
+uint64_t helper_v2shrs(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ b &= 15;
+ for (i = 0; i < 64; i += 16) {
+ r = deposit64(r, i, 16, sextract64(a, i + b, 16 - b));
+ }
+ return r;
+}
+
+uint64_t helper_v1int_h(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 32; i += 8) {
+ r = deposit64(r, 2 * i + 8, 8, extract64(a, i + 32, 8));
+ r = deposit64(r, 2 * i, 8, extract64(b, i + 32, 8));
+ }
+ return r;
+}
+
+uint64_t helper_v1int_l(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 32; i += 8) {
+ r = deposit64(r, 2 * i + 8, 8, extract64(a, i, 8));
+ r = deposit64(r, 2 * i, 8, extract64(b, i, 8));
+ }
+ return r;
+}
+
+uint64_t helper_v2int_h(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 32; i += 16) {
+ r = deposit64(r, 2 * i + 16, 16, extract64(a, i + 32, 16));
+ r = deposit64(r, 2 * i, 16, extract64(b, i + 32, 16));
+ }
+ return r;
+}
+
+uint64_t helper_v2int_l(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ for (i = 0; i < 32; i += 16) {
+ r = deposit64(r, 2 * i + 16, 16, extract64(a, i, 16));
+ r = deposit64(r, 2 * i, 16, extract64(b, i, 16));
+ }
+ return r;
+}
diff --git a/target/tilegx/spr_def_64.h b/target/tilegx/spr_def_64.h
new file mode 100644
index 0000000000..67a6c1751e
--- /dev/null
+++ b/target/tilegx/spr_def_64.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DOXYGEN__
+
+#ifndef __ARCH_SPR_DEF_64_H__
+#define __ARCH_SPR_DEF_64_H__
+
+#define SPR_AUX_PERF_COUNT_0 0x2105
+#define SPR_AUX_PERF_COUNT_1 0x2106
+#define SPR_AUX_PERF_COUNT_CTL 0x2107
+#define SPR_AUX_PERF_COUNT_STS 0x2108
+#define SPR_CMPEXCH_VALUE 0x2780
+#define SPR_CYCLE 0x2781
+#define SPR_DONE 0x2705
+#define SPR_DSTREAM_PF 0x2706
+#define SPR_EVENT_BEGIN 0x2782
+#define SPR_EVENT_END 0x2783
+#define SPR_EX_CONTEXT_0_0 0x2580
+#define SPR_EX_CONTEXT_0_1 0x2581
+#define SPR_EX_CONTEXT_0_1__PL_SHIFT 0
+#define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3
+#define SPR_EX_CONTEXT_0_1__PL_MASK 0x3
+#define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2
+#define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1
+#define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4
+#define SPR_EX_CONTEXT_1_0 0x2480
+#define SPR_EX_CONTEXT_1_1 0x2481
+#define SPR_EX_CONTEXT_1_1__PL_SHIFT 0
+#define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3
+#define SPR_EX_CONTEXT_1_1__PL_MASK 0x3
+#define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2
+#define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1
+#define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4
+#define SPR_EX_CONTEXT_2_0 0x2380
+#define SPR_EX_CONTEXT_2_1 0x2381
+#define SPR_EX_CONTEXT_2_1__PL_SHIFT 0
+#define SPR_EX_CONTEXT_2_1__PL_RMASK 0x3
+#define SPR_EX_CONTEXT_2_1__PL_MASK 0x3
+#define SPR_EX_CONTEXT_2_1__ICS_SHIFT 2
+#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
+#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
+#define SPR_FAIL 0x2707
+#define SPR_IDN_AVAIL_EN 0x1a05
+#define SPR_IDN_DATA_AVAIL 0x0a80
+#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
+#define SPR_IDN_DEMUX_COUNT_0 0x0a05
+#define SPR_IDN_DEMUX_COUNT_1 0x0a06
+#define SPR_IDN_DIRECTION_PROTECT 0x1405
+#define SPR_IDN_PENDING 0x0a08
+#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
+#define SPR_INTCTRL_0_STATUS 0x2505
+#define SPR_INTCTRL_1_STATUS 0x2405
+#define SPR_INTCTRL_2_STATUS 0x2305
+#define SPR_INTERRUPT_CRITICAL_SECTION 0x2708
+#define SPR_INTERRUPT_MASK_0 0x2506
+#define SPR_INTERRUPT_MASK_1 0x2406
+#define SPR_INTERRUPT_MASK_2 0x2306
+#define SPR_INTERRUPT_MASK_RESET_0 0x2507
+#define SPR_INTERRUPT_MASK_RESET_1 0x2407
+#define SPR_INTERRUPT_MASK_RESET_2 0x2307
+#define SPR_INTERRUPT_MASK_SET_0 0x2508
+#define SPR_INTERRUPT_MASK_SET_1 0x2408
+#define SPR_INTERRUPT_MASK_SET_2 0x2308
+#define SPR_INTERRUPT_VECTOR_BASE_0 0x2509
+#define SPR_INTERRUPT_VECTOR_BASE_1 0x2409
+#define SPR_INTERRUPT_VECTOR_BASE_2 0x2309
+#define SPR_INTERRUPT_VECTOR_BASE_3 0x2209
+#define SPR_IPI_EVENT_0 0x1f05
+#define SPR_IPI_EVENT_1 0x1e05
+#define SPR_IPI_EVENT_2 0x1d05
+#define SPR_IPI_EVENT_RESET_0 0x1f06
+#define SPR_IPI_EVENT_RESET_1 0x1e06
+#define SPR_IPI_EVENT_RESET_2 0x1d06
+#define SPR_IPI_EVENT_SET_0 0x1f07
+#define SPR_IPI_EVENT_SET_1 0x1e07
+#define SPR_IPI_EVENT_SET_2 0x1d07
+#define SPR_IPI_MASK_0 0x1f08
+#define SPR_IPI_MASK_1 0x1e08
+#define SPR_IPI_MASK_2 0x1d08
+#define SPR_IPI_MASK_RESET_0 0x1f09
+#define SPR_IPI_MASK_RESET_1 0x1e09
+#define SPR_IPI_MASK_RESET_2 0x1d09
+#define SPR_IPI_MASK_SET_0 0x1f0a
+#define SPR_IPI_MASK_SET_1 0x1e0a
+#define SPR_IPI_MASK_SET_2 0x1d0a
+#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
+#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
+#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
+#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
+#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
+#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
+#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
+#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
+#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
+#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
+#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
+#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
+#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
+#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
+#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
+#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
+#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
+#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
+#define SPR_MPL_IDN_TIMER_SET_0 0x1800
+#define SPR_MPL_IDN_TIMER_SET_1 0x1801
+#define SPR_MPL_IDN_TIMER_SET_2 0x1802
+#define SPR_MPL_INTCTRL_0_SET_0 0x2500
+#define SPR_MPL_INTCTRL_0_SET_1 0x2501
+#define SPR_MPL_INTCTRL_0_SET_2 0x2502
+#define SPR_MPL_INTCTRL_1_SET_0 0x2400
+#define SPR_MPL_INTCTRL_1_SET_1 0x2401
+#define SPR_MPL_INTCTRL_1_SET_2 0x2402
+#define SPR_MPL_INTCTRL_2_SET_0 0x2300
+#define SPR_MPL_INTCTRL_2_SET_1 0x2301
+#define SPR_MPL_INTCTRL_2_SET_2 0x2302
+#define SPR_MPL_IPI_0 0x1f04
+#define SPR_MPL_IPI_0_SET_0 0x1f00
+#define SPR_MPL_IPI_0_SET_1 0x1f01
+#define SPR_MPL_IPI_0_SET_2 0x1f02
+#define SPR_MPL_IPI_1 0x1e04
+#define SPR_MPL_IPI_1_SET_0 0x1e00
+#define SPR_MPL_IPI_1_SET_1 0x1e01
+#define SPR_MPL_IPI_1_SET_2 0x1e02
+#define SPR_MPL_IPI_2 0x1d04
+#define SPR_MPL_IPI_2_SET_0 0x1d00
+#define SPR_MPL_IPI_2_SET_1 0x1d01
+#define SPR_MPL_IPI_2_SET_2 0x1d02
+#define SPR_MPL_PERF_COUNT_SET_0 0x2000
+#define SPR_MPL_PERF_COUNT_SET_1 0x2001
+#define SPR_MPL_PERF_COUNT_SET_2 0x2002
+#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
+#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
+#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
+#define SPR_MPL_UDN_AVAIL_SET_0 0x1b00
+#define SPR_MPL_UDN_AVAIL_SET_1 0x1b01
+#define SPR_MPL_UDN_AVAIL_SET_2 0x1b02
+#define SPR_MPL_UDN_COMPLETE_SET_0 0x0600
+#define SPR_MPL_UDN_COMPLETE_SET_1 0x0601
+#define SPR_MPL_UDN_COMPLETE_SET_2 0x0602
+#define SPR_MPL_UDN_FIREWALL_SET_0 0x1500
+#define SPR_MPL_UDN_FIREWALL_SET_1 0x1501
+#define SPR_MPL_UDN_FIREWALL_SET_2 0x1502
+#define SPR_MPL_UDN_TIMER_SET_0 0x1900
+#define SPR_MPL_UDN_TIMER_SET_1 0x1901
+#define SPR_MPL_UDN_TIMER_SET_2 0x1902
+#define SPR_MPL_WORLD_ACCESS_SET_0 0x2700
+#define SPR_MPL_WORLD_ACCESS_SET_1 0x2701
+#define SPR_MPL_WORLD_ACCESS_SET_2 0x2702
+#define SPR_PASS 0x2709
+#define SPR_PERF_COUNT_0 0x2005
+#define SPR_PERF_COUNT_1 0x2006
+#define SPR_PERF_COUNT_CTL 0x2007
+#define SPR_PERF_COUNT_DN_CTL 0x2008
+#define SPR_PERF_COUNT_STS 0x2009
+#define SPR_PROC_STATUS 0x2784
+#define SPR_SIM_CONTROL 0x2785
+#define SPR_SINGLE_STEP_CONTROL_0 0x0405
+#define SPR_SINGLE_STEP_CONTROL_0__CANCELED_MASK 0x1
+#define SPR_SINGLE_STEP_CONTROL_0__INHIBIT_MASK 0x2
+#define SPR_SINGLE_STEP_CONTROL_1 0x0305
+#define SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK 0x1
+#define SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK 0x2
+#define SPR_SINGLE_STEP_CONTROL_2 0x0205
+#define SPR_SINGLE_STEP_CONTROL_2__CANCELED_MASK 0x1
+#define SPR_SINGLE_STEP_CONTROL_2__INHIBIT_MASK 0x2
+#define SPR_SINGLE_STEP_EN_0_0 0x250a
+#define SPR_SINGLE_STEP_EN_0_1 0x240a
+#define SPR_SINGLE_STEP_EN_0_2 0x230a
+#define SPR_SINGLE_STEP_EN_1_0 0x250b
+#define SPR_SINGLE_STEP_EN_1_1 0x240b
+#define SPR_SINGLE_STEP_EN_1_2 0x230b
+#define SPR_SINGLE_STEP_EN_2_0 0x250c
+#define SPR_SINGLE_STEP_EN_2_1 0x240c
+#define SPR_SINGLE_STEP_EN_2_2 0x230c
+#define SPR_SYSTEM_SAVE_0_0 0x2582
+#define SPR_SYSTEM_SAVE_0_1 0x2583
+#define SPR_SYSTEM_SAVE_0_2 0x2584
+#define SPR_SYSTEM_SAVE_0_3 0x2585
+#define SPR_SYSTEM_SAVE_1_0 0x2482
+#define SPR_SYSTEM_SAVE_1_1 0x2483
+#define SPR_SYSTEM_SAVE_1_2 0x2484
+#define SPR_SYSTEM_SAVE_1_3 0x2485
+#define SPR_SYSTEM_SAVE_2_0 0x2382
+#define SPR_SYSTEM_SAVE_2_1 0x2383
+#define SPR_SYSTEM_SAVE_2_2 0x2384
+#define SPR_SYSTEM_SAVE_2_3 0x2385
+#define SPR_TILE_COORD 0x270b
+#define SPR_TILE_RTF_HWM 0x270c
+#define SPR_TILE_TIMER_CONTROL 0x1605
+#define SPR_UDN_AVAIL_EN 0x1b05
+#define SPR_UDN_DATA_AVAIL 0x0b80
+#define SPR_UDN_DEADLOCK_TIMEOUT 0x1906
+#define SPR_UDN_DEMUX_COUNT_0 0x0b05
+#define SPR_UDN_DEMUX_COUNT_1 0x0b06
+#define SPR_UDN_DEMUX_COUNT_2 0x0b07
+#define SPR_UDN_DEMUX_COUNT_3 0x0b08
+#define SPR_UDN_DIRECTION_PROTECT 0x1505
+#define SPR_UDN_PENDING 0x0b0a
+#define SPR_WATCH_MASK 0x200a
+#define SPR_WATCH_VAL 0x200b
+
+#endif /* !defined(__ARCH_SPR_DEF_64_H__) */
+
+#endif /* !defined(__DOXYGEN__) */
diff --git a/target/tilegx/translate.c b/target/tilegx/translate.c
new file mode 100644
index 0000000000..9c734eeba3
--- /dev/null
+++ b/target/tilegx/translate.c
@@ -0,0 +1,2457 @@
+/*
+ * QEMU TILE-Gx CPU
+ *
+ * Copyright (c) 2015 Chen Gang
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/lgpl-2.1.html>
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/log.h"
+#include "exec/log.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+#include "linux-user/syscall_defs.h"
+
+#include "opcode_tilegx.h"
+#include "spr_def_64.h"
+
+#define FMT64X "%016" PRIx64
+
+static TCGv_env cpu_env;
+static TCGv cpu_pc;
+static TCGv cpu_regs[TILEGX_R_COUNT];
+
+static const char * const reg_names[64] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
+ "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
+ "r48", "r49", "r50", "r51", "bp", "tp", "sp", "lr",
+ "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn2", "zero"
+};
+
+/* Modified registers are cached in temporaries until the end of the bundle. */
+typedef struct {
+ unsigned reg;
+ TCGv val;
+} DisasContextTemp;
+
+#define MAX_WRITEBACK 4
+
+/* This is the state at translation time. */
+typedef struct {
+ uint64_t pc; /* Current pc */
+
+ TCGv zero; /* For zero register */
+
+ DisasContextTemp wb[MAX_WRITEBACK];
+ int num_wb;
+ int mmuidx;
+ bool exit_tb;
+ TileExcp atomic_excp;
+
+ struct {
+ TCGCond cond; /* branch condition */
+ TCGv dest; /* branch destination */
+ TCGv val1; /* value to be compared against zero, for cond */
+ } jmp; /* Jump object, only once in each TB block */
+} DisasContext;
+
+#include "exec/gen-icount.h"
+
+/* Differentiate the various pipe encodings. */
+#define TY_X0 0
+#define TY_X1 1
+#define TY_Y0 2
+#define TY_Y1 3
+
+/* Remerge the base opcode and extension fields for switching.
+ The X opcode fields are 3 bits; Y0/Y1 opcode fields are 4 bits;
+ Y2 opcode field is 2 bits. */
+#define OE(OP, EXT, XY) (TY_##XY + OP * 4 + EXT * 64)
+
+/* Similar, but for Y2 only. */
+#define OEY2(OP, MODE) (OP + MODE * 4)
+
+/* Similar, but make sure opcode names match up. */
+#define OE_RR_X0(E) OE(RRR_0_OPCODE_X0, E##_UNARY_OPCODE_X0, X0)
+#define OE_RR_X1(E) OE(RRR_0_OPCODE_X1, E##_UNARY_OPCODE_X1, X1)
+#define OE_RR_Y0(E) OE(RRR_1_OPCODE_Y0, E##_UNARY_OPCODE_Y0, Y0)
+#define OE_RR_Y1(E) OE(RRR_1_OPCODE_Y1, E##_UNARY_OPCODE_Y1, Y1)
+#define OE_RRR(E,N,XY) OE(RRR_##N##_OPCODE_##XY, E##_RRR_##N##_OPCODE_##XY, XY)
+#define OE_IM(E,XY) OE(IMM8_OPCODE_##XY, E##_IMM8_OPCODE_##XY, XY)
+#define OE_SH(E,XY) OE(SHIFT_OPCODE_##XY, E##_SHIFT_OPCODE_##XY, XY)
+
+#define V1_IMM(X) (((X) & 0xff) * 0x0101010101010101ull)
+#define V2_IMM(X) (((X) & 0xffff) * 0x0001000100010001ull)
+
+
+static void gen_exception(DisasContext *dc, TileExcp num)
+{
+ TCGv_i32 tmp;
+
+ tcg_gen_movi_tl(cpu_pc, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+
+ tmp = tcg_const_i32(num);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+ dc->exit_tb = true;
+}
+
+static bool check_gr(DisasContext *dc, uint8_t reg)
+{
+ if (likely(reg < TILEGX_R_COUNT)) {
+ return true;
+ }
+
+ switch (reg) {
+ case TILEGX_R_SN:
+ case TILEGX_R_ZERO:
+ break;
+ case TILEGX_R_IDN0:
+ case TILEGX_R_IDN1:
+ gen_exception(dc, TILEGX_EXCP_REG_IDN_ACCESS);
+ break;
+ case TILEGX_R_UDN0:
+ case TILEGX_R_UDN1:
+ case TILEGX_R_UDN2:
+ case TILEGX_R_UDN3:
+ gen_exception(dc, TILEGX_EXCP_REG_UDN_ACCESS);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return false;
+}
+
+static TCGv load_zero(DisasContext *dc)
+{
+ if (TCGV_IS_UNUSED_I64(dc->zero)) {
+ dc->zero = tcg_const_i64(0);
+ }
+ return dc->zero;
+}
+
+static TCGv load_gr(DisasContext *dc, unsigned reg)
+{
+ if (check_gr(dc, reg)) {
+ return cpu_regs[reg];
+ }
+ return load_zero(dc);
+}
+
+static TCGv dest_gr(DisasContext *dc, unsigned reg)
+{
+ int n;
+
+ /* Skip the result, mark the exception if necessary, and continue */
+ check_gr(dc, reg);
+
+ n = dc->num_wb++;
+ dc->wb[n].reg = reg;
+ return dc->wb[n].val = tcg_temp_new_i64();
+}
+
+static void gen_saturate_op(TCGv tdest, TCGv tsrca, TCGv tsrcb,
+ void (*operate)(TCGv, TCGv, TCGv))
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_ext32s_tl(tdest, tsrca);
+ tcg_gen_ext32s_tl(t0, tsrcb);
+ operate(tdest, tdest, t0);
+
+ tcg_gen_movi_tl(t0, 0x7fffffff);
+ tcg_gen_movcond_tl(TCG_COND_GT, tdest, tdest, t0, t0, tdest);
+ tcg_gen_movi_tl(t0, -0x80000000LL);
+ tcg_gen_movcond_tl(TCG_COND_LT, tdest, tdest, t0, t0, tdest);
+
+ tcg_temp_free(t0);
+}
+
+static void gen_atomic_excp(DisasContext *dc, unsigned dest, TCGv tdest,
+ TCGv tsrca, TCGv tsrcb, TileExcp excp)
+{
+#ifdef CONFIG_USER_ONLY
+ TCGv_i32 t;
+
+ tcg_gen_st_tl(tsrca, cpu_env, offsetof(CPUTLGState, atomic_srca));
+ tcg_gen_st_tl(tsrcb, cpu_env, offsetof(CPUTLGState, atomic_srcb));
+ t = tcg_const_i32(dest);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUTLGState, atomic_dstr));
+ tcg_temp_free_i32(t);
+
+ /* We're going to write the real result in the exception. But in
+ the meantime we've already created a writeback register, and
+ we don't want that to remain uninitialized. */
+ tcg_gen_movi_tl(tdest, 0);
+
+ /* Note that we need to delay issuing the exception that implements
+ the atomic operation until after writing back the results of the
+ instruction occupying the X0 pipe. */
+ dc->atomic_excp = excp;
+#else
+ gen_exception(dc, TILEGX_EXCP_OPCODE_UNIMPLEMENTED);
+#endif
+}
+
+/* Shift the 128-bit value TSRCA:TSRCD right by the number of bytes
+ specified by the bottom 3 bits of TSRCB, and set TDEST to the
+ low 64 bits of the resulting value. */
+static void gen_dblalign(TCGv tdest, TCGv tsrcd, TCGv tsrca, TCGv tsrcb)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_andi_tl(t0, tsrcb, 7);
+ tcg_gen_shli_tl(t0, t0, 3);
+ tcg_gen_shr_tl(tdest, tsrcd, t0);
+
+ /* We want to do "t0 = tsrca << (64 - t0)". Two's complement
+ arithmetic on a 6-bit field tells us that 64 - t0 is equal
+ to (t0 ^ 63) + 1. So we can do the shift in two parts,
+ neither of which will be an invalid shift by 64. */
+ tcg_gen_xori_tl(t0, t0, 63);
+ tcg_gen_shl_tl(t0, tsrca, t0);
+ tcg_gen_shli_tl(t0, t0, 1);
+ tcg_gen_or_tl(tdest, tdest, t0);
+
+ tcg_temp_free(t0);
+}
+
+/* Similarly, except that the 128-bit value is TSRCA:TSRCB, and the
+ right shift is an immediate. */
+static void gen_dblaligni(TCGv tdest, TCGv tsrca, TCGv tsrcb, int shr)
+{
+ TCGv t0 = tcg_temp_new();
+
+ tcg_gen_shri_tl(t0, tsrcb, shr);
+ tcg_gen_shli_tl(tdest, tsrca, 64 - shr);
+ tcg_gen_or_tl(tdest, tdest, t0);
+
+ tcg_temp_free(t0);
+}
+
+typedef enum {
+ LU, LS, HU, HS
+} MulHalf;
+
+static void gen_ext_half(TCGv d, TCGv s, MulHalf h)
+{
+ switch (h) {
+ case LU:
+ tcg_gen_ext32u_tl(d, s);
+ break;
+ case LS:
+ tcg_gen_ext32s_tl(d, s);
+ break;
+ case HU:
+ tcg_gen_shri_tl(d, s, 32);
+ break;
+ case HS:
+ tcg_gen_sari_tl(d, s, 32);
+ break;
+ }
+}
+
+static void gen_mul_half(TCGv tdest, TCGv tsrca, TCGv tsrcb,
+ MulHalf ha, MulHalf hb)
+{
+ TCGv t = tcg_temp_new();
+ gen_ext_half(t, tsrca, ha);
+ gen_ext_half(tdest, tsrcb, hb);
+ tcg_gen_mul_tl(tdest, tdest, t);
+ tcg_temp_free(t);
+}
+
+static void gen_cmul2(TCGv tdest, TCGv tsrca, TCGv tsrcb, int sh, int rd)
+{
+ TCGv_i32 tsh = tcg_const_i32(sh);
+ TCGv_i32 trd = tcg_const_i32(rd);
+ gen_helper_cmul2(tdest, tsrca, tsrcb, tsh, trd);
+ tcg_temp_free_i32(tsh);
+ tcg_temp_free_i32(trd);
+}
+
+static TileExcp gen_st_opcode(DisasContext *dc, unsigned dest, unsigned srca,
+ unsigned srcb, TCGMemOp memop, const char *name)
+{
+ if (dest) {
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+
+ tcg_gen_qemu_st_tl(load_gr(dc, srcb), load_gr(dc, srca),
+ dc->mmuidx, memop);
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", name,
+ reg_names[srca], reg_names[srcb]);
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp gen_st_add_opcode(DisasContext *dc, unsigned srca, unsigned srcb,
+ int imm, TCGMemOp memop, const char *name)
+{
+ TCGv tsrca = load_gr(dc, srca);
+ TCGv tsrcb = load_gr(dc, srcb);
+
+ tcg_gen_qemu_st_tl(tsrcb, tsrca, dc->mmuidx, memop);
+ tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", name,
+ reg_names[srca], reg_names[srcb], imm);
+ return TILEGX_EXCP_NONE;
+}
+
+/* Equality comparison with zero can be done quickly and efficiently. */
+static void gen_v1cmpeq0(TCGv v)
+{
+ TCGv m = tcg_const_tl(V1_IMM(0x7f));
+ TCGv c = tcg_temp_new();
+
+ /* ~(((v & m) + m) | m | v). Sets the msb for each byte == 0. */
+ tcg_gen_and_tl(c, v, m);
+ tcg_gen_add_tl(c, c, m);
+ tcg_gen_or_tl(c, c, m);
+ tcg_gen_nor_tl(c, c, v);
+ tcg_temp_free(m);
+
+ /* Shift the msb down to form the lsb boolean result. */
+ tcg_gen_shri_tl(v, c, 7);
+ tcg_temp_free(c);
+}
+
+static void gen_v1cmpne0(TCGv v)
+{
+ TCGv m = tcg_const_tl(V1_IMM(0x7f));
+ TCGv c = tcg_temp_new();
+
+ /* (((v & m) + m) | v) & ~m. Sets the msb for each byte != 0. */
+ tcg_gen_and_tl(c, v, m);
+ tcg_gen_add_tl(c, c, m);
+ tcg_gen_or_tl(c, c, v);
+ tcg_gen_andc_tl(c, c, m);
+ tcg_temp_free(m);
+
+ /* Shift the msb down to form the lsb boolean result. */
+ tcg_gen_shri_tl(v, c, 7);
+ tcg_temp_free(c);
+}
+
+/* Vector addition can be performed via arithmetic plus masking. It is
+ efficient this way only for 4 or more elements. */
+static void gen_v12add(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign)
+{
+ TCGv tmask = tcg_const_tl(~sign);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /* ((a & ~sign) + (b & ~sign)) ^ ((a ^ b) & sign). */
+ tcg_gen_and_tl(t0, tsrca, tmask);
+ tcg_gen_and_tl(t1, tsrcb, tmask);
+ tcg_gen_add_tl(tdest, t0, t1);
+ tcg_gen_xor_tl(t0, tsrca, tsrcb);
+ tcg_gen_andc_tl(t0, t0, tmask);
+ tcg_gen_xor_tl(tdest, tdest, t0);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(tmask);
+}
+
+/* Similarly for vector subtraction. */
+static void gen_v12sub(TCGv tdest, TCGv tsrca, TCGv tsrcb, uint64_t sign)
+{
+ TCGv tsign = tcg_const_tl(sign);
+ TCGv t0 = tcg_temp_new();
+ TCGv t1 = tcg_temp_new();
+
+ /* ((a | sign) - (b & ~sign)) ^ ((a ^ ~b) & sign). */
+ tcg_gen_or_tl(t0, tsrca, tsign);
+ tcg_gen_andc_tl(t1, tsrcb, tsign);
+ tcg_gen_sub_tl(tdest, t0, t1);
+ tcg_gen_eqv_tl(t0, tsrca, tsrcb);
+ tcg_gen_and_tl(t0, t0, tsign);
+ tcg_gen_xor_tl(tdest, tdest, t0);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t0);
+ tcg_temp_free(tsign);
+}
+
+static void gen_v4sh(TCGv d64, TCGv a64, TCGv b64,
+ void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 al = tcg_temp_new_i32();
+ TCGv_i32 ah = tcg_temp_new_i32();
+ TCGv_i32 bl = tcg_temp_new_i32();
+
+ tcg_gen_extr_i64_i32(al, ah, a64);
+ tcg_gen_extrl_i64_i32(bl, b64);
+ tcg_gen_andi_i32(bl, bl, 31);
+ generate(al, al, bl);
+ generate(ah, ah, bl);
+ tcg_gen_concat_i32_i64(d64, al, ah);
+
+ tcg_temp_free_i32(al);
+ tcg_temp_free_i32(ah);
+ tcg_temp_free_i32(bl);
+}
+
+static void gen_v4op(TCGv d64, TCGv a64, TCGv b64,
+ void (*generate)(TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 al = tcg_temp_new_i32();
+ TCGv_i32 ah = tcg_temp_new_i32();
+ TCGv_i32 bl = tcg_temp_new_i32();
+ TCGv_i32 bh = tcg_temp_new_i32();
+
+ tcg_gen_extr_i64_i32(al, ah, a64);
+ tcg_gen_extr_i64_i32(bl, bh, b64);
+ generate(al, al, bl);
+ generate(ah, ah, bh);
+ tcg_gen_concat_i32_i64(d64, al, ah);
+
+ tcg_temp_free_i32(al);
+ tcg_temp_free_i32(ah);
+ tcg_temp_free_i32(bl);
+ tcg_temp_free_i32(bh);
+}
+
+static TileExcp gen_signal(DisasContext *dc, int signo, int sigcode,
+ const char *mnemonic)
+{
+ TCGv_i32 t0 = tcg_const_i32(signo);
+ TCGv_i32 t1 = tcg_const_i32(sigcode);
+
+ tcg_gen_st_i32(t0, cpu_env, offsetof(CPUTLGState, signo));
+ tcg_gen_st_i32(t1, cpu_env, offsetof(CPUTLGState, sigcode));
+
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
+ return TILEGX_EXCP_SIGNAL;
+}
+
+static bool parse_from_addli(uint64_t bundle, int *signo, int *sigcode)
+{
+ int imm;
+
+ if ((get_Opcode_X0(bundle) != ADDLI_OPCODE_X0)
+ || (get_Dest_X0(bundle) != TILEGX_R_ZERO)
+ || (get_SrcA_X0(bundle) != TILEGX_R_ZERO)) {
+ return false;
+ }
+
+ imm = get_Imm16_X0(bundle);
+ *signo = imm & 0x3f;
+ *sigcode = (imm >> 6) & 0xf;
+
+ /* ??? The linux kernel validates both signo and the sigcode vs the
+ known max for each signal. Don't bother here. */
+ return true;
+}
+
+static TileExcp gen_specill(DisasContext *dc, unsigned dest, unsigned srca,
+ uint64_t bundle)
+{
+ const char *mnemonic;
+ int signo;
+ int sigcode;
+
+ if (dest == 0x1c && srca == 0x25) {
+ signo = TARGET_SIGTRAP;
+ sigcode = TARGET_TRAP_BRKPT;
+ mnemonic = "bpt";
+ } else if (dest == 0x1d && srca == 0x25
+ && parse_from_addli(bundle, &signo, &sigcode)) {
+ mnemonic = "raise";
+ } else {
+ signo = TARGET_SIGILL;
+ sigcode = TARGET_ILL_ILLOPC;
+ mnemonic = "ill";
+ }
+
+ return gen_signal(dc, signo, sigcode, mnemonic);
+}
+
+static TileExcp gen_rr_opcode(DisasContext *dc, unsigned opext,
+ unsigned dest, unsigned srca, uint64_t bundle)
+{
+ TCGv tdest, tsrca;
+ const char *mnemonic;
+ TCGMemOp memop;
+ TileExcp ret = TILEGX_EXCP_NONE;
+ bool prefetch_nofault = false;
+
+ /* Eliminate instructions with no output before doing anything else. */
+ switch (opext) {
+ case OE_RR_Y0(NOP):
+ case OE_RR_Y1(NOP):
+ case OE_RR_X0(NOP):
+ case OE_RR_X1(NOP):
+ mnemonic = "nop";
+ goto done0;
+ case OE_RR_Y0(FNOP):
+ case OE_RR_Y1(FNOP):
+ case OE_RR_X0(FNOP):
+ case OE_RR_X1(FNOP):
+ mnemonic = "fnop";
+ goto done0;
+ case OE_RR_X1(DRAIN):
+ mnemonic = "drain";
+ goto done0;
+ case OE_RR_X1(FLUSHWB):
+ mnemonic = "flushwb";
+ goto done0;
+ case OE_RR_X1(ILL):
+ return gen_specill(dc, dest, srca, bundle);
+ case OE_RR_Y1(ILL):
+ return gen_signal(dc, TARGET_SIGILL, TARGET_ILL_ILLOPC, "ill");
+ case OE_RR_X1(MF):
+ mnemonic = "mf";
+ goto done0;
+ case OE_RR_X1(NAP):
+ /* ??? This should yield, especially in system mode. */
+ mnemonic = "nap";
+ goto done0;
+ case OE_RR_X1(IRET):
+ gen_helper_ext01_ics(cpu_env);
+ dc->jmp.cond = TCG_COND_ALWAYS;
+ dc->jmp.dest = tcg_temp_new();
+ tcg_gen_ld_tl(dc->jmp.dest, cpu_env,
+ offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]));
+ tcg_gen_andi_tl(dc->jmp.dest, dc->jmp.dest, ~7);
+ mnemonic = "iret";
+ goto done0;
+ case OE_RR_X1(SWINT0):
+ case OE_RR_X1(SWINT2):
+ case OE_RR_X1(SWINT3):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RR_X1(SWINT1):
+ ret = TILEGX_EXCP_SYSCALL;
+ mnemonic = "swint1";
+ done0:
+ if (srca || dest) {
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s", mnemonic);
+ return ret;
+
+ case OE_RR_X1(DTLBPR):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RR_X1(FINV):
+ mnemonic = "finv";
+ goto done1;
+ case OE_RR_X1(FLUSH):
+ mnemonic = "flush";
+ goto done1;
+ case OE_RR_X1(ICOH):
+ mnemonic = "icoh";
+ goto done1;
+ case OE_RR_X1(INV):
+ mnemonic = "inv";
+ goto done1;
+ case OE_RR_X1(WH64):
+ mnemonic = "wh64";
+ goto done1;
+ case OE_RR_X1(JRP):
+ case OE_RR_Y1(JRP):
+ mnemonic = "jrp";
+ goto do_jr;
+ case OE_RR_X1(JR):
+ case OE_RR_Y1(JR):
+ mnemonic = "jr";
+ goto do_jr;
+ case OE_RR_X1(JALRP):
+ case OE_RR_Y1(JALRP):
+ mnemonic = "jalrp";
+ goto do_jalr;
+ case OE_RR_X1(JALR):
+ case OE_RR_Y1(JALR):
+ mnemonic = "jalr";
+ do_jalr:
+ tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
+ dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+ do_jr:
+ dc->jmp.cond = TCG_COND_ALWAYS;
+ dc->jmp.dest = tcg_temp_new();
+ tcg_gen_andi_tl(dc->jmp.dest, load_gr(dc, srca), ~7);
+ done1:
+ if (dest) {
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s", mnemonic, reg_names[srca]);
+ return ret;
+ }
+
+ tdest = dest_gr(dc, dest);
+ tsrca = load_gr(dc, srca);
+
+ switch (opext) {
+ case OE_RR_X0(CNTLZ):
+ case OE_RR_Y0(CNTLZ):
+ gen_helper_cntlz(tdest, tsrca);
+ mnemonic = "cntlz";
+ break;
+ case OE_RR_X0(CNTTZ):
+ case OE_RR_Y0(CNTTZ):
+ gen_helper_cnttz(tdest, tsrca);
+ mnemonic = "cnttz";
+ break;
+ case OE_RR_X0(FSINGLE_PACK1):
+ case OE_RR_Y0(FSINGLE_PACK1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RR_X1(LD1S):
+ memop = MO_SB;
+ mnemonic = "ld1s"; /* prefetch_l1_fault */
+ goto do_load;
+ case OE_RR_X1(LD1U):
+ memop = MO_UB;
+ mnemonic = "ld1u"; /* prefetch, prefetch_l1 */
+ prefetch_nofault = (dest == TILEGX_R_ZERO);
+ goto do_load;
+ case OE_RR_X1(LD2S):
+ memop = MO_TESW;
+ mnemonic = "ld2s"; /* prefetch_l2_fault */
+ goto do_load;
+ case OE_RR_X1(LD2U):
+ memop = MO_TEUW;
+ mnemonic = "ld2u"; /* prefetch_l2 */
+ prefetch_nofault = (dest == TILEGX_R_ZERO);
+ goto do_load;
+ case OE_RR_X1(LD4S):
+ memop = MO_TESL;
+ mnemonic = "ld4s"; /* prefetch_l3_fault */
+ goto do_load;
+ case OE_RR_X1(LD4U):
+ memop = MO_TEUL;
+ mnemonic = "ld4u"; /* prefetch_l3 */
+ prefetch_nofault = (dest == TILEGX_R_ZERO);
+ goto do_load;
+ case OE_RR_X1(LDNT1S):
+ memop = MO_SB;
+ mnemonic = "ldnt1s";
+ goto do_load;
+ case OE_RR_X1(LDNT1U):
+ memop = MO_UB;
+ mnemonic = "ldnt1u";
+ goto do_load;
+ case OE_RR_X1(LDNT2S):
+ memop = MO_TESW;
+ mnemonic = "ldnt2s";
+ goto do_load;
+ case OE_RR_X1(LDNT2U):
+ memop = MO_TEUW;
+ mnemonic = "ldnt2u";
+ goto do_load;
+ case OE_RR_X1(LDNT4S):
+ memop = MO_TESL;
+ mnemonic = "ldnt4s";
+ goto do_load;
+ case OE_RR_X1(LDNT4U):
+ memop = MO_TEUL;
+ mnemonic = "ldnt4u";
+ goto do_load;
+ case OE_RR_X1(LDNT):
+ memop = MO_TEQ;
+ mnemonic = "ldnt";
+ goto do_load;
+ case OE_RR_X1(LD):
+ memop = MO_TEQ;
+ mnemonic = "ld";
+ do_load:
+ if (!prefetch_nofault) {
+ tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
+ }
+ break;
+ case OE_RR_X1(LDNA):
+ tcg_gen_andi_tl(tdest, tsrca, ~7);
+ tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
+ mnemonic = "ldna";
+ break;
+ case OE_RR_X1(LNK):
+ case OE_RR_Y1(LNK):
+ if (srca) {
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+ tcg_gen_movi_tl(tdest, dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+ mnemonic = "lnk";
+ break;
+ case OE_RR_X0(PCNT):
+ case OE_RR_Y0(PCNT):
+ gen_helper_pcnt(tdest, tsrca);
+ mnemonic = "pcnt";
+ break;
+ case OE_RR_X0(REVBITS):
+ case OE_RR_Y0(REVBITS):
+ gen_helper_revbits(tdest, tsrca);
+ mnemonic = "revbits";
+ break;
+ case OE_RR_X0(REVBYTES):
+ case OE_RR_Y0(REVBYTES):
+ tcg_gen_bswap64_tl(tdest, tsrca);
+ mnemonic = "revbytes";
+ break;
+ case OE_RR_X0(TBLIDXB0):
+ case OE_RR_Y0(TBLIDXB0):
+ tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tsrca, 2, 8);
+ mnemonic = "tblidxb0";
+ break;
+ case OE_RR_X0(TBLIDXB1):
+ case OE_RR_Y0(TBLIDXB1):
+ tcg_gen_shri_tl(tdest, tsrca, 8);
+ tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
+ mnemonic = "tblidxb1";
+ break;
+ case OE_RR_X0(TBLIDXB2):
+ case OE_RR_Y0(TBLIDXB2):
+ tcg_gen_shri_tl(tdest, tsrca, 16);
+ tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
+ mnemonic = "tblidxb2";
+ break;
+ case OE_RR_X0(TBLIDXB3):
+ case OE_RR_Y0(TBLIDXB3):
+ tcg_gen_shri_tl(tdest, tsrca, 24);
+ tcg_gen_deposit_tl(tdest, load_gr(dc, dest), tdest, 2, 8);
+ mnemonic = "tblidxb3";
+ break;
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
+ reg_names[dest], reg_names[srca]);
+ return ret;
+}
+
+static TileExcp gen_rrr_opcode(DisasContext *dc, unsigned opext,
+ unsigned dest, unsigned srca, unsigned srcb)
+{
+ TCGv tdest = dest_gr(dc, dest);
+ TCGv tsrca = load_gr(dc, srca);
+ TCGv tsrcb = load_gr(dc, srcb);
+ TCGv t0;
+ const char *mnemonic;
+
+ switch (opext) {
+ case OE_RRR(ADDXSC, 0, X0):
+ case OE_RRR(ADDXSC, 0, X1):
+ gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_add_tl);
+ mnemonic = "addxsc";
+ break;
+ case OE_RRR(ADDX, 0, X0):
+ case OE_RRR(ADDX, 0, X1):
+ case OE_RRR(ADDX, 0, Y0):
+ case OE_RRR(ADDX, 0, Y1):
+ tcg_gen_add_tl(tdest, tsrca, tsrcb);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "addx";
+ break;
+ case OE_RRR(ADD, 0, X0):
+ case OE_RRR(ADD, 0, X1):
+ case OE_RRR(ADD, 0, Y0):
+ case OE_RRR(ADD, 0, Y1):
+ tcg_gen_add_tl(tdest, tsrca, tsrcb);
+ mnemonic = "add";
+ break;
+ case OE_RRR(AND, 0, X0):
+ case OE_RRR(AND, 0, X1):
+ case OE_RRR(AND, 5, Y0):
+ case OE_RRR(AND, 5, Y1):
+ tcg_gen_and_tl(tdest, tsrca, tsrcb);
+ mnemonic = "and";
+ break;
+ case OE_RRR(CMOVEQZ, 0, X0):
+ case OE_RRR(CMOVEQZ, 4, Y0):
+ tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, load_zero(dc),
+ tsrcb, load_gr(dc, dest));
+ mnemonic = "cmoveqz";
+ break;
+ case OE_RRR(CMOVNEZ, 0, X0):
+ case OE_RRR(CMOVNEZ, 4, Y0):
+ tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, load_zero(dc),
+ tsrcb, load_gr(dc, dest));
+ mnemonic = "cmovnez";
+ break;
+ case OE_RRR(CMPEQ, 0, X0):
+ case OE_RRR(CMPEQ, 0, X1):
+ case OE_RRR(CMPEQ, 3, Y0):
+ case OE_RRR(CMPEQ, 3, Y1):
+ tcg_gen_setcond_tl(TCG_COND_EQ, tdest, tsrca, tsrcb);
+ mnemonic = "cmpeq";
+ break;
+ case OE_RRR(CMPEXCH4, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_CMPEXCH4);
+ mnemonic = "cmpexch4";
+ break;
+ case OE_RRR(CMPEXCH, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_CMPEXCH);
+ mnemonic = "cmpexch";
+ break;
+ case OE_RRR(CMPLES, 0, X0):
+ case OE_RRR(CMPLES, 0, X1):
+ case OE_RRR(CMPLES, 2, Y0):
+ case OE_RRR(CMPLES, 2, Y1):
+ tcg_gen_setcond_tl(TCG_COND_LE, tdest, tsrca, tsrcb);
+ mnemonic = "cmples";
+ break;
+ case OE_RRR(CMPLEU, 0, X0):
+ case OE_RRR(CMPLEU, 0, X1):
+ case OE_RRR(CMPLEU, 2, Y0):
+ case OE_RRR(CMPLEU, 2, Y1):
+ tcg_gen_setcond_tl(TCG_COND_LEU, tdest, tsrca, tsrcb);
+ mnemonic = "cmpleu";
+ break;
+ case OE_RRR(CMPLTS, 0, X0):
+ case OE_RRR(CMPLTS, 0, X1):
+ case OE_RRR(CMPLTS, 2, Y0):
+ case OE_RRR(CMPLTS, 2, Y1):
+ tcg_gen_setcond_tl(TCG_COND_LT, tdest, tsrca, tsrcb);
+ mnemonic = "cmplts";
+ break;
+ case OE_RRR(CMPLTU, 0, X0):
+ case OE_RRR(CMPLTU, 0, X1):
+ case OE_RRR(CMPLTU, 2, Y0):
+ case OE_RRR(CMPLTU, 2, Y1):
+ tcg_gen_setcond_tl(TCG_COND_LTU, tdest, tsrca, tsrcb);
+ mnemonic = "cmpltu";
+ break;
+ case OE_RRR(CMPNE, 0, X0):
+ case OE_RRR(CMPNE, 0, X1):
+ case OE_RRR(CMPNE, 3, Y0):
+ case OE_RRR(CMPNE, 3, Y1):
+ tcg_gen_setcond_tl(TCG_COND_NE, tdest, tsrca, tsrcb);
+ mnemonic = "cmpne";
+ break;
+ case OE_RRR(CMULAF, 0, X0):
+ gen_helper_cmulaf(tdest, load_gr(dc, dest), tsrca, tsrcb);
+ mnemonic = "cmulaf";
+ break;
+ case OE_RRR(CMULA, 0, X0):
+ gen_helper_cmula(tdest, load_gr(dc, dest), tsrca, tsrcb);
+ mnemonic = "cmula";
+ break;
+ case OE_RRR(CMULFR, 0, X0):
+ gen_cmul2(tdest, tsrca, tsrcb, 15, 1 << 14);
+ mnemonic = "cmulfr";
+ break;
+ case OE_RRR(CMULF, 0, X0):
+ gen_cmul2(tdest, tsrca, tsrcb, 15, 0);
+ mnemonic = "cmulf";
+ break;
+ case OE_RRR(CMULHR, 0, X0):
+ gen_cmul2(tdest, tsrca, tsrcb, 16, 1 << 15);
+ mnemonic = "cmulhr";
+ break;
+ case OE_RRR(CMULH, 0, X0):
+ gen_cmul2(tdest, tsrca, tsrcb, 16, 0);
+ mnemonic = "cmulh";
+ break;
+ case OE_RRR(CMUL, 0, X0):
+ gen_helper_cmula(tdest, load_zero(dc), tsrca, tsrcb);
+ mnemonic = "cmul";
+ break;
+ case OE_RRR(CRC32_32, 0, X0):
+ gen_helper_crc32_32(tdest, tsrca, tsrcb);
+ mnemonic = "crc32_32";
+ break;
+ case OE_RRR(CRC32_8, 0, X0):
+ gen_helper_crc32_8(tdest, tsrca, tsrcb);
+ mnemonic = "crc32_8";
+ break;
+ case OE_RRR(DBLALIGN2, 0, X0):
+ case OE_RRR(DBLALIGN2, 0, X1):
+ gen_dblaligni(tdest, tsrca, tsrcb, 16);
+ mnemonic = "dblalign2";
+ break;
+ case OE_RRR(DBLALIGN4, 0, X0):
+ case OE_RRR(DBLALIGN4, 0, X1):
+ gen_dblaligni(tdest, tsrca, tsrcb, 32);
+ mnemonic = "dblalign4";
+ break;
+ case OE_RRR(DBLALIGN6, 0, X0):
+ case OE_RRR(DBLALIGN6, 0, X1):
+ gen_dblaligni(tdest, tsrca, tsrcb, 48);
+ mnemonic = "dblalign6";
+ break;
+ case OE_RRR(DBLALIGN, 0, X0):
+ gen_dblalign(tdest, load_gr(dc, dest), tsrca, tsrcb);
+ mnemonic = "dblalign";
+ break;
+ case OE_RRR(EXCH4, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_EXCH4);
+ mnemonic = "exch4";
+ break;
+ case OE_RRR(EXCH, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_EXCH);
+ mnemonic = "exch";
+ break;
+ case OE_RRR(FDOUBLE_ADDSUB, 0, X0):
+ case OE_RRR(FDOUBLE_ADD_FLAGS, 0, X0):
+ case OE_RRR(FDOUBLE_MUL_FLAGS, 0, X0):
+ case OE_RRR(FDOUBLE_PACK1, 0, X0):
+ case OE_RRR(FDOUBLE_PACK2, 0, X0):
+ case OE_RRR(FDOUBLE_SUB_FLAGS, 0, X0):
+ case OE_RRR(FDOUBLE_UNPACK_MAX, 0, X0):
+ case OE_RRR(FDOUBLE_UNPACK_MIN, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(FETCHADD4, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHADD4);
+ mnemonic = "fetchadd4";
+ break;
+ case OE_RRR(FETCHADDGEZ4, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHADDGEZ4);
+ mnemonic = "fetchaddgez4";
+ break;
+ case OE_RRR(FETCHADDGEZ, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHADDGEZ);
+ mnemonic = "fetchaddgez";
+ break;
+ case OE_RRR(FETCHADD, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHADD);
+ mnemonic = "fetchadd";
+ break;
+ case OE_RRR(FETCHAND4, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHAND4);
+ mnemonic = "fetchand4";
+ break;
+ case OE_RRR(FETCHAND, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHAND);
+ mnemonic = "fetchand";
+ break;
+ case OE_RRR(FETCHOR4, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHOR4);
+ mnemonic = "fetchor4";
+ break;
+ case OE_RRR(FETCHOR, 0, X1):
+ gen_atomic_excp(dc, dest, tdest, tsrca, tsrcb,
+ TILEGX_EXCP_OPCODE_FETCHOR);
+ mnemonic = "fetchor";
+ break;
+ case OE_RRR(FSINGLE_ADD1, 0, X0):
+ case OE_RRR(FSINGLE_ADDSUB2, 0, X0):
+ case OE_RRR(FSINGLE_MUL1, 0, X0):
+ case OE_RRR(FSINGLE_MUL2, 0, X0):
+ case OE_RRR(FSINGLE_PACK2, 0, X0):
+ case OE_RRR(FSINGLE_SUB1, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(MNZ, 0, X0):
+ case OE_RRR(MNZ, 0, X1):
+ case OE_RRR(MNZ, 4, Y0):
+ case OE_RRR(MNZ, 4, Y1):
+ t0 = load_zero(dc);
+ tcg_gen_movcond_tl(TCG_COND_NE, tdest, tsrca, t0, tsrcb, t0);
+ mnemonic = "mnz";
+ break;
+ case OE_RRR(MULAX, 0, X0):
+ case OE_RRR(MULAX, 3, Y0):
+ tcg_gen_mul_tl(tdest, tsrca, tsrcb);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "mulax";
+ break;
+ case OE_RRR(MULA_HS_HS, 0, X0):
+ case OE_RRR(MULA_HS_HS, 9, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, HS);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hs_hs";
+ break;
+ case OE_RRR(MULA_HS_HU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, HU);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hs_hu";
+ break;
+ case OE_RRR(MULA_HS_LS, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, LS);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hs_ls";
+ break;
+ case OE_RRR(MULA_HS_LU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, LU);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hs_lu";
+ break;
+ case OE_RRR(MULA_HU_HU, 0, X0):
+ case OE_RRR(MULA_HU_HU, 9, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, HU, HU);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hu_hu";
+ break;
+ case OE_RRR(MULA_HU_LS, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HU, LS);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hu_ls";
+ break;
+ case OE_RRR(MULA_HU_LU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HU, LU);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_hu_lu";
+ break;
+ case OE_RRR(MULA_LS_LS, 0, X0):
+ case OE_RRR(MULA_LS_LS, 9, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, LS, LS);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_ls_ls";
+ break;
+ case OE_RRR(MULA_LS_LU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, LS, LU);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_ls_lu";
+ break;
+ case OE_RRR(MULA_LU_LU, 0, X0):
+ case OE_RRR(MULA_LU_LU, 9, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, LU, LU);
+ tcg_gen_add_tl(tdest, tdest, load_gr(dc, dest));
+ mnemonic = "mula_lu_lu";
+ break;
+ case OE_RRR(MULX, 0, X0):
+ case OE_RRR(MULX, 3, Y0):
+ tcg_gen_mul_tl(tdest, tsrca, tsrcb);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "mulx";
+ break;
+ case OE_RRR(MUL_HS_HS, 0, X0):
+ case OE_RRR(MUL_HS_HS, 8, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, HS);
+ mnemonic = "mul_hs_hs";
+ break;
+ case OE_RRR(MUL_HS_HU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, HU);
+ mnemonic = "mul_hs_hu";
+ break;
+ case OE_RRR(MUL_HS_LS, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, LS);
+ mnemonic = "mul_hs_ls";
+ break;
+ case OE_RRR(MUL_HS_LU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HS, LU);
+ mnemonic = "mul_hs_lu";
+ break;
+ case OE_RRR(MUL_HU_HU, 0, X0):
+ case OE_RRR(MUL_HU_HU, 8, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, HU, HU);
+ mnemonic = "mul_hu_hu";
+ break;
+ case OE_RRR(MUL_HU_LS, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HU, LS);
+ mnemonic = "mul_hu_ls";
+ break;
+ case OE_RRR(MUL_HU_LU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, HU, LU);
+ mnemonic = "mul_hu_lu";
+ break;
+ case OE_RRR(MUL_LS_LS, 0, X0):
+ case OE_RRR(MUL_LS_LS, 8, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, LS, LS);
+ mnemonic = "mul_ls_ls";
+ break;
+ case OE_RRR(MUL_LS_LU, 0, X0):
+ gen_mul_half(tdest, tsrca, tsrcb, LS, LU);
+ mnemonic = "mul_ls_lu";
+ break;
+ case OE_RRR(MUL_LU_LU, 0, X0):
+ case OE_RRR(MUL_LU_LU, 8, Y0):
+ gen_mul_half(tdest, tsrca, tsrcb, LU, LU);
+ mnemonic = "mul_lu_lu";
+ break;
+ case OE_RRR(MZ, 0, X0):
+ case OE_RRR(MZ, 0, X1):
+ case OE_RRR(MZ, 4, Y0):
+ case OE_RRR(MZ, 4, Y1):
+ t0 = load_zero(dc);
+ tcg_gen_movcond_tl(TCG_COND_EQ, tdest, tsrca, t0, tsrcb, t0);
+ mnemonic = "mz";
+ break;
+ case OE_RRR(NOR, 0, X0):
+ case OE_RRR(NOR, 0, X1):
+ case OE_RRR(NOR, 5, Y0):
+ case OE_RRR(NOR, 5, Y1):
+ tcg_gen_nor_tl(tdest, tsrca, tsrcb);
+ mnemonic = "nor";
+ break;
+ case OE_RRR(OR, 0, X0):
+ case OE_RRR(OR, 0, X1):
+ case OE_RRR(OR, 5, Y0):
+ case OE_RRR(OR, 5, Y1):
+ tcg_gen_or_tl(tdest, tsrca, tsrcb);
+ mnemonic = "or";
+ break;
+ case OE_RRR(ROTL, 0, X0):
+ case OE_RRR(ROTL, 0, X1):
+ case OE_RRR(ROTL, 6, Y0):
+ case OE_RRR(ROTL, 6, Y1):
+ tcg_gen_andi_tl(tdest, tsrcb, 63);
+ tcg_gen_rotl_tl(tdest, tsrca, tdest);
+ mnemonic = "rotl";
+ break;
+ case OE_RRR(SHL1ADDX, 0, X0):
+ case OE_RRR(SHL1ADDX, 0, X1):
+ case OE_RRR(SHL1ADDX, 7, Y0):
+ case OE_RRR(SHL1ADDX, 7, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, 1);
+ tcg_gen_add_tl(tdest, tdest, tsrcb);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "shl1addx";
+ break;
+ case OE_RRR(SHL1ADD, 0, X0):
+ case OE_RRR(SHL1ADD, 0, X1):
+ case OE_RRR(SHL1ADD, 1, Y0):
+ case OE_RRR(SHL1ADD, 1, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, 1);
+ tcg_gen_add_tl(tdest, tdest, tsrcb);
+ mnemonic = "shl1add";
+ break;
+ case OE_RRR(SHL2ADDX, 0, X0):
+ case OE_RRR(SHL2ADDX, 0, X1):
+ case OE_RRR(SHL2ADDX, 7, Y0):
+ case OE_RRR(SHL2ADDX, 7, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, 2);
+ tcg_gen_add_tl(tdest, tdest, tsrcb);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "shl2addx";
+ break;
+ case OE_RRR(SHL2ADD, 0, X0):
+ case OE_RRR(SHL2ADD, 0, X1):
+ case OE_RRR(SHL2ADD, 1, Y0):
+ case OE_RRR(SHL2ADD, 1, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, 2);
+ tcg_gen_add_tl(tdest, tdest, tsrcb);
+ mnemonic = "shl2add";
+ break;
+ case OE_RRR(SHL3ADDX, 0, X0):
+ case OE_RRR(SHL3ADDX, 0, X1):
+ case OE_RRR(SHL3ADDX, 7, Y0):
+ case OE_RRR(SHL3ADDX, 7, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, 3);
+ tcg_gen_add_tl(tdest, tdest, tsrcb);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "shl3addx";
+ break;
+ case OE_RRR(SHL3ADD, 0, X0):
+ case OE_RRR(SHL3ADD, 0, X1):
+ case OE_RRR(SHL3ADD, 1, Y0):
+ case OE_RRR(SHL3ADD, 1, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, 3);
+ tcg_gen_add_tl(tdest, tdest, tsrcb);
+ mnemonic = "shl3add";
+ break;
+ case OE_RRR(SHLX, 0, X0):
+ case OE_RRR(SHLX, 0, X1):
+ tcg_gen_andi_tl(tdest, tsrcb, 31);
+ tcg_gen_shl_tl(tdest, tsrca, tdest);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "shlx";
+ break;
+ case OE_RRR(SHL, 0, X0):
+ case OE_RRR(SHL, 0, X1):
+ case OE_RRR(SHL, 6, Y0):
+ case OE_RRR(SHL, 6, Y1):
+ tcg_gen_andi_tl(tdest, tsrcb, 63);
+ tcg_gen_shl_tl(tdest, tsrca, tdest);
+ mnemonic = "shl";
+ break;
+ case OE_RRR(SHRS, 0, X0):
+ case OE_RRR(SHRS, 0, X1):
+ case OE_RRR(SHRS, 6, Y0):
+ case OE_RRR(SHRS, 6, Y1):
+ tcg_gen_andi_tl(tdest, tsrcb, 63);
+ tcg_gen_sar_tl(tdest, tsrca, tdest);
+ mnemonic = "shrs";
+ break;
+ case OE_RRR(SHRUX, 0, X0):
+ case OE_RRR(SHRUX, 0, X1):
+ t0 = tcg_temp_new();
+ tcg_gen_andi_tl(t0, tsrcb, 31);
+ tcg_gen_ext32u_tl(tdest, tsrca);
+ tcg_gen_shr_tl(tdest, tdest, t0);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ tcg_temp_free(t0);
+ mnemonic = "shrux";
+ break;
+ case OE_RRR(SHRU, 0, X0):
+ case OE_RRR(SHRU, 0, X1):
+ case OE_RRR(SHRU, 6, Y0):
+ case OE_RRR(SHRU, 6, Y1):
+ tcg_gen_andi_tl(tdest, tsrcb, 63);
+ tcg_gen_shr_tl(tdest, tsrca, tdest);
+ mnemonic = "shru";
+ break;
+ case OE_RRR(SHUFFLEBYTES, 0, X0):
+ gen_helper_shufflebytes(tdest, load_gr(dc, dest), tsrca, tsrca);
+ mnemonic = "shufflebytes";
+ break;
+ case OE_RRR(SUBXSC, 0, X0):
+ case OE_RRR(SUBXSC, 0, X1):
+ gen_saturate_op(tdest, tsrca, tsrcb, tcg_gen_sub_tl);
+ mnemonic = "subxsc";
+ break;
+ case OE_RRR(SUBX, 0, X0):
+ case OE_RRR(SUBX, 0, X1):
+ case OE_RRR(SUBX, 0, Y0):
+ case OE_RRR(SUBX, 0, Y1):
+ tcg_gen_sub_tl(tdest, tsrca, tsrcb);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "subx";
+ break;
+ case OE_RRR(SUB, 0, X0):
+ case OE_RRR(SUB, 0, X1):
+ case OE_RRR(SUB, 0, Y0):
+ case OE_RRR(SUB, 0, Y1):
+ tcg_gen_sub_tl(tdest, tsrca, tsrcb);
+ mnemonic = "sub";
+ break;
+ case OE_RRR(V1ADDUC, 0, X0):
+ case OE_RRR(V1ADDUC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1ADD, 0, X0):
+ case OE_RRR(V1ADD, 0, X1):
+ gen_v12add(tdest, tsrca, tsrcb, V1_IMM(0x80));
+ mnemonic = "v1add";
+ break;
+ case OE_RRR(V1ADIFFU, 0, X0):
+ case OE_RRR(V1AVGU, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1CMPEQ, 0, X0):
+ case OE_RRR(V1CMPEQ, 0, X1):
+ tcg_gen_xor_tl(tdest, tsrca, tsrcb);
+ gen_v1cmpeq0(tdest);
+ mnemonic = "v1cmpeq";
+ break;
+ case OE_RRR(V1CMPLES, 0, X0):
+ case OE_RRR(V1CMPLES, 0, X1):
+ case OE_RRR(V1CMPLEU, 0, X0):
+ case OE_RRR(V1CMPLEU, 0, X1):
+ case OE_RRR(V1CMPLTS, 0, X0):
+ case OE_RRR(V1CMPLTS, 0, X1):
+ case OE_RRR(V1CMPLTU, 0, X0):
+ case OE_RRR(V1CMPLTU, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1CMPNE, 0, X0):
+ case OE_RRR(V1CMPNE, 0, X1):
+ tcg_gen_xor_tl(tdest, tsrca, tsrcb);
+ gen_v1cmpne0(tdest);
+ mnemonic = "v1cmpne";
+ break;
+ case OE_RRR(V1DDOTPUA, 0, X0):
+ case OE_RRR(V1DDOTPUSA, 0, X0):
+ case OE_RRR(V1DDOTPUS, 0, X0):
+ case OE_RRR(V1DDOTPU, 0, X0):
+ case OE_RRR(V1DOTPA, 0, X0):
+ case OE_RRR(V1DOTPUA, 0, X0):
+ case OE_RRR(V1DOTPUSA, 0, X0):
+ case OE_RRR(V1DOTPUS, 0, X0):
+ case OE_RRR(V1DOTPU, 0, X0):
+ case OE_RRR(V1DOTP, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1INT_H, 0, X0):
+ case OE_RRR(V1INT_H, 0, X1):
+ gen_helper_v1int_h(tdest, tsrca, tsrcb);
+ mnemonic = "v1int_h";
+ break;
+ case OE_RRR(V1INT_L, 0, X0):
+ case OE_RRR(V1INT_L, 0, X1):
+ gen_helper_v1int_l(tdest, tsrca, tsrcb);
+ mnemonic = "v1int_l";
+ break;
+ case OE_RRR(V1MAXU, 0, X0):
+ case OE_RRR(V1MAXU, 0, X1):
+ case OE_RRR(V1MINU, 0, X0):
+ case OE_RRR(V1MINU, 0, X1):
+ case OE_RRR(V1MNZ, 0, X0):
+ case OE_RRR(V1MNZ, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1MULTU, 0, X0):
+ gen_helper_v1multu(tdest, tsrca, tsrcb);
+ mnemonic = "v1multu";
+ break;
+ case OE_RRR(V1MULUS, 0, X0):
+ case OE_RRR(V1MULU, 0, X0):
+ case OE_RRR(V1MZ, 0, X0):
+ case OE_RRR(V1MZ, 0, X1):
+ case OE_RRR(V1SADAU, 0, X0):
+ case OE_RRR(V1SADU, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1SHL, 0, X0):
+ case OE_RRR(V1SHL, 0, X1):
+ gen_helper_v1shl(tdest, tsrca, tsrcb);
+ mnemonic = "v1shl";
+ break;
+ case OE_RRR(V1SHRS, 0, X0):
+ case OE_RRR(V1SHRS, 0, X1):
+ gen_helper_v1shrs(tdest, tsrca, tsrcb);
+ mnemonic = "v1shrs";
+ break;
+ case OE_RRR(V1SHRU, 0, X0):
+ case OE_RRR(V1SHRU, 0, X1):
+ gen_helper_v1shru(tdest, tsrca, tsrcb);
+ mnemonic = "v1shru";
+ break;
+ case OE_RRR(V1SUBUC, 0, X0):
+ case OE_RRR(V1SUBUC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V1SUB, 0, X0):
+ case OE_RRR(V1SUB, 0, X1):
+ gen_v12sub(tdest, tsrca, tsrcb, V1_IMM(0x80));
+ mnemonic = "v1sub";
+ break;
+ case OE_RRR(V2ADDSC, 0, X0):
+ case OE_RRR(V2ADDSC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V2ADD, 0, X0):
+ case OE_RRR(V2ADD, 0, X1):
+ gen_v12add(tdest, tsrca, tsrcb, V2_IMM(0x8000));
+ mnemonic = "v2add";
+ break;
+ case OE_RRR(V2ADIFFS, 0, X0):
+ case OE_RRR(V2AVGS, 0, X0):
+ case OE_RRR(V2CMPEQ, 0, X0):
+ case OE_RRR(V2CMPEQ, 0, X1):
+ case OE_RRR(V2CMPLES, 0, X0):
+ case OE_RRR(V2CMPLES, 0, X1):
+ case OE_RRR(V2CMPLEU, 0, X0):
+ case OE_RRR(V2CMPLEU, 0, X1):
+ case OE_RRR(V2CMPLTS, 0, X0):
+ case OE_RRR(V2CMPLTS, 0, X1):
+ case OE_RRR(V2CMPLTU, 0, X0):
+ case OE_RRR(V2CMPLTU, 0, X1):
+ case OE_RRR(V2CMPNE, 0, X0):
+ case OE_RRR(V2CMPNE, 0, X1):
+ case OE_RRR(V2DOTPA, 0, X0):
+ case OE_RRR(V2DOTP, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V2INT_H, 0, X0):
+ case OE_RRR(V2INT_H, 0, X1):
+ gen_helper_v2int_h(tdest, tsrca, tsrcb);
+ mnemonic = "v2int_h";
+ break;
+ case OE_RRR(V2INT_L, 0, X0):
+ case OE_RRR(V2INT_L, 0, X1):
+ gen_helper_v2int_l(tdest, tsrca, tsrcb);
+ mnemonic = "v2int_l";
+ break;
+ case OE_RRR(V2MAXS, 0, X0):
+ case OE_RRR(V2MAXS, 0, X1):
+ case OE_RRR(V2MINS, 0, X0):
+ case OE_RRR(V2MINS, 0, X1):
+ case OE_RRR(V2MNZ, 0, X0):
+ case OE_RRR(V2MNZ, 0, X1):
+ case OE_RRR(V2MULFSC, 0, X0):
+ case OE_RRR(V2MULS, 0, X0):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V2MULTS, 0, X0):
+ gen_helper_v2mults(tdest, tsrca, tsrcb);
+ mnemonic = "v2mults";
+ break;
+ case OE_RRR(V2MZ, 0, X0):
+ case OE_RRR(V2MZ, 0, X1):
+ case OE_RRR(V2PACKH, 0, X0):
+ case OE_RRR(V2PACKH, 0, X1):
+ case OE_RRR(V2PACKL, 0, X0):
+ case OE_RRR(V2PACKL, 0, X1):
+ case OE_RRR(V2PACKUC, 0, X0):
+ case OE_RRR(V2PACKUC, 0, X1):
+ case OE_RRR(V2SADAS, 0, X0):
+ case OE_RRR(V2SADAU, 0, X0):
+ case OE_RRR(V2SADS, 0, X0):
+ case OE_RRR(V2SADU, 0, X0):
+ case OE_RRR(V2SHLSC, 0, X0):
+ case OE_RRR(V2SHLSC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V2SHL, 0, X0):
+ case OE_RRR(V2SHL, 0, X1):
+ gen_helper_v2shl(tdest, tsrca, tsrcb);
+ mnemonic = "v2shl";
+ break;
+ case OE_RRR(V2SHRS, 0, X0):
+ case OE_RRR(V2SHRS, 0, X1):
+ gen_helper_v2shrs(tdest, tsrca, tsrcb);
+ mnemonic = "v2shrs";
+ break;
+ case OE_RRR(V2SHRU, 0, X0):
+ case OE_RRR(V2SHRU, 0, X1):
+ gen_helper_v2shru(tdest, tsrca, tsrcb);
+ mnemonic = "v2shru";
+ break;
+ case OE_RRR(V2SUBSC, 0, X0):
+ case OE_RRR(V2SUBSC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V2SUB, 0, X0):
+ case OE_RRR(V2SUB, 0, X1):
+ gen_v12sub(tdest, tsrca, tsrcb, V2_IMM(0x8000));
+ mnemonic = "v2sub";
+ break;
+ case OE_RRR(V4ADDSC, 0, X0):
+ case OE_RRR(V4ADDSC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V4ADD, 0, X0):
+ case OE_RRR(V4ADD, 0, X1):
+ gen_v4op(tdest, tsrca, tsrcb, tcg_gen_add_i32);
+ mnemonic = "v4add";
+ break;
+ case OE_RRR(V4INT_H, 0, X0):
+ case OE_RRR(V4INT_H, 0, X1):
+ tcg_gen_shri_tl(tdest, tsrcb, 32);
+ tcg_gen_deposit_tl(tdest, tsrca, tdest, 0, 32);
+ mnemonic = "v4int_h";
+ break;
+ case OE_RRR(V4INT_L, 0, X0):
+ case OE_RRR(V4INT_L, 0, X1):
+ tcg_gen_deposit_tl(tdest, tsrcb, tsrca, 32, 32);
+ mnemonic = "v4int_l";
+ break;
+ case OE_RRR(V4PACKSC, 0, X0):
+ case OE_RRR(V4PACKSC, 0, X1):
+ case OE_RRR(V4SHLSC, 0, X0):
+ case OE_RRR(V4SHLSC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V4SHL, 0, X0):
+ case OE_RRR(V4SHL, 0, X1):
+ gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shl_i32);
+ mnemonic = "v4shl";
+ break;
+ case OE_RRR(V4SHRS, 0, X0):
+ case OE_RRR(V4SHRS, 0, X1):
+ gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_sar_i32);
+ mnemonic = "v4shrs";
+ break;
+ case OE_RRR(V4SHRU, 0, X0):
+ case OE_RRR(V4SHRU, 0, X1):
+ gen_v4sh(tdest, tsrca, tsrcb, tcg_gen_shr_i32);
+ mnemonic = "v4shru";
+ break;
+ case OE_RRR(V4SUBSC, 0, X0):
+ case OE_RRR(V4SUBSC, 0, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_RRR(V4SUB, 0, X0):
+ case OE_RRR(V4SUB, 0, X1):
+ gen_v4op(tdest, tsrca, tsrcb, tcg_gen_sub_i32);
+ mnemonic = "v2sub";
+ break;
+ case OE_RRR(XOR, 0, X0):
+ case OE_RRR(XOR, 0, X1):
+ case OE_RRR(XOR, 5, Y0):
+ case OE_RRR(XOR, 5, Y1):
+ tcg_gen_xor_tl(tdest, tsrca, tsrcb);
+ mnemonic = "xor";
+ break;
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %s", mnemonic,
+ reg_names[dest], reg_names[srca], reg_names[srcb]);
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp gen_rri_opcode(DisasContext *dc, unsigned opext,
+ unsigned dest, unsigned srca, int imm)
+{
+ TCGv tdest = dest_gr(dc, dest);
+ TCGv tsrca = load_gr(dc, srca);
+ bool prefetch_nofault = false;
+ const char *mnemonic;
+ TCGMemOp memop;
+ int i2, i3;
+ TCGv t0;
+
+ switch (opext) {
+ case OE(ADDI_OPCODE_Y0, 0, Y0):
+ case OE(ADDI_OPCODE_Y1, 0, Y1):
+ case OE_IM(ADDI, X0):
+ case OE_IM(ADDI, X1):
+ tcg_gen_addi_tl(tdest, tsrca, imm);
+ mnemonic = "addi";
+ break;
+ case OE(ADDXI_OPCODE_Y0, 0, Y0):
+ case OE(ADDXI_OPCODE_Y1, 0, Y1):
+ case OE_IM(ADDXI, X0):
+ case OE_IM(ADDXI, X1):
+ tcg_gen_addi_tl(tdest, tsrca, imm);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "addxi";
+ break;
+ case OE(ANDI_OPCODE_Y0, 0, Y0):
+ case OE(ANDI_OPCODE_Y1, 0, Y1):
+ case OE_IM(ANDI, X0):
+ case OE_IM(ANDI, X1):
+ tcg_gen_andi_tl(tdest, tsrca, imm);
+ mnemonic = "andi";
+ break;
+ case OE(CMPEQI_OPCODE_Y0, 0, Y0):
+ case OE(CMPEQI_OPCODE_Y1, 0, Y1):
+ case OE_IM(CMPEQI, X0):
+ case OE_IM(CMPEQI, X1):
+ tcg_gen_setcondi_tl(TCG_COND_EQ, tdest, tsrca, imm);
+ mnemonic = "cmpeqi";
+ break;
+ case OE(CMPLTSI_OPCODE_Y0, 0, Y0):
+ case OE(CMPLTSI_OPCODE_Y1, 0, Y1):
+ case OE_IM(CMPLTSI, X0):
+ case OE_IM(CMPLTSI, X1):
+ tcg_gen_setcondi_tl(TCG_COND_LT, tdest, tsrca, imm);
+ mnemonic = "cmpltsi";
+ break;
+ case OE_IM(CMPLTUI, X0):
+ case OE_IM(CMPLTUI, X1):
+ tcg_gen_setcondi_tl(TCG_COND_LTU, tdest, tsrca, imm);
+ mnemonic = "cmpltui";
+ break;
+ case OE_IM(LD1S_ADD, X1):
+ memop = MO_SB;
+ mnemonic = "ld1s_add"; /* prefetch_add_l1_fault */
+ goto do_load_add;
+ case OE_IM(LD1U_ADD, X1):
+ memop = MO_UB;
+ mnemonic = "ld1u_add"; /* prefetch_add_l1 */
+ prefetch_nofault = (dest == TILEGX_R_ZERO);
+ goto do_load_add;
+ case OE_IM(LD2S_ADD, X1):
+ memop = MO_TESW;
+ mnemonic = "ld2s_add"; /* prefetch_add_l2_fault */
+ goto do_load_add;
+ case OE_IM(LD2U_ADD, X1):
+ memop = MO_TEUW;
+ mnemonic = "ld2u_add"; /* prefetch_add_l2 */
+ prefetch_nofault = (dest == TILEGX_R_ZERO);
+ goto do_load_add;
+ case OE_IM(LD4S_ADD, X1):
+ memop = MO_TESL;
+ mnemonic = "ld4s_add"; /* prefetch_add_l3_fault */
+ goto do_load_add;
+ case OE_IM(LD4U_ADD, X1):
+ memop = MO_TEUL;
+ mnemonic = "ld4u_add"; /* prefetch_add_l3 */
+ prefetch_nofault = (dest == TILEGX_R_ZERO);
+ goto do_load_add;
+ case OE_IM(LDNT1S_ADD, X1):
+ memop = MO_SB;
+ mnemonic = "ldnt1s_add";
+ goto do_load_add;
+ case OE_IM(LDNT1U_ADD, X1):
+ memop = MO_UB;
+ mnemonic = "ldnt1u_add";
+ goto do_load_add;
+ case OE_IM(LDNT2S_ADD, X1):
+ memop = MO_TESW;
+ mnemonic = "ldnt2s_add";
+ goto do_load_add;
+ case OE_IM(LDNT2U_ADD, X1):
+ memop = MO_TEUW;
+ mnemonic = "ldnt2u_add";
+ goto do_load_add;
+ case OE_IM(LDNT4S_ADD, X1):
+ memop = MO_TESL;
+ mnemonic = "ldnt4s_add";
+ goto do_load_add;
+ case OE_IM(LDNT4U_ADD, X1):
+ memop = MO_TEUL;
+ mnemonic = "ldnt4u_add";
+ goto do_load_add;
+ case OE_IM(LDNT_ADD, X1):
+ memop = MO_TEQ;
+ mnemonic = "ldnt_add";
+ goto do_load_add;
+ case OE_IM(LD_ADD, X1):
+ memop = MO_TEQ;
+ mnemonic = "ld_add";
+ do_load_add:
+ if (!prefetch_nofault) {
+ tcg_gen_qemu_ld_tl(tdest, tsrca, dc->mmuidx, memop);
+ }
+ tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
+ break;
+ case OE_IM(LDNA_ADD, X1):
+ tcg_gen_andi_tl(tdest, tsrca, ~7);
+ tcg_gen_qemu_ld_tl(tdest, tdest, dc->mmuidx, MO_TEQ);
+ tcg_gen_addi_tl(dest_gr(dc, srca), tsrca, imm);
+ mnemonic = "ldna_add";
+ break;
+ case OE_IM(ORI, X0):
+ case OE_IM(ORI, X1):
+ tcg_gen_ori_tl(tdest, tsrca, imm);
+ mnemonic = "ori";
+ break;
+ case OE_IM(V1ADDI, X0):
+ case OE_IM(V1ADDI, X1):
+ t0 = tcg_const_tl(V1_IMM(imm));
+ gen_v12add(tdest, tsrca, t0, V1_IMM(0x80));
+ tcg_temp_free(t0);
+ mnemonic = "v1addi";
+ break;
+ case OE_IM(V1CMPEQI, X0):
+ case OE_IM(V1CMPEQI, X1):
+ tcg_gen_xori_tl(tdest, tsrca, V1_IMM(imm));
+ gen_v1cmpeq0(tdest);
+ mnemonic = "v1cmpeqi";
+ break;
+ case OE_IM(V1CMPLTSI, X0):
+ case OE_IM(V1CMPLTSI, X1):
+ case OE_IM(V1CMPLTUI, X0):
+ case OE_IM(V1CMPLTUI, X1):
+ case OE_IM(V1MAXUI, X0):
+ case OE_IM(V1MAXUI, X1):
+ case OE_IM(V1MINUI, X0):
+ case OE_IM(V1MINUI, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_IM(V2ADDI, X0):
+ case OE_IM(V2ADDI, X1):
+ t0 = tcg_const_tl(V2_IMM(imm));
+ gen_v12add(tdest, tsrca, t0, V2_IMM(0x8000));
+ tcg_temp_free(t0);
+ mnemonic = "v2addi";
+ break;
+ case OE_IM(V2CMPEQI, X0):
+ case OE_IM(V2CMPEQI, X1):
+ case OE_IM(V2CMPLTSI, X0):
+ case OE_IM(V2CMPLTSI, X1):
+ case OE_IM(V2CMPLTUI, X0):
+ case OE_IM(V2CMPLTUI, X1):
+ case OE_IM(V2MAXSI, X0):
+ case OE_IM(V2MAXSI, X1):
+ case OE_IM(V2MINSI, X0):
+ case OE_IM(V2MINSI, X1):
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ case OE_IM(XORI, X0):
+ case OE_IM(XORI, X1):
+ tcg_gen_xori_tl(tdest, tsrca, imm);
+ mnemonic = "xori";
+ break;
+
+ case OE_SH(ROTLI, X0):
+ case OE_SH(ROTLI, X1):
+ case OE_SH(ROTLI, Y0):
+ case OE_SH(ROTLI, Y1):
+ tcg_gen_rotli_tl(tdest, tsrca, imm);
+ mnemonic = "rotli";
+ break;
+ case OE_SH(SHLI, X0):
+ case OE_SH(SHLI, X1):
+ case OE_SH(SHLI, Y0):
+ case OE_SH(SHLI, Y1):
+ tcg_gen_shli_tl(tdest, tsrca, imm);
+ mnemonic = "shli";
+ break;
+ case OE_SH(SHLXI, X0):
+ case OE_SH(SHLXI, X1):
+ tcg_gen_shli_tl(tdest, tsrca, imm & 31);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "shlxi";
+ break;
+ case OE_SH(SHRSI, X0):
+ case OE_SH(SHRSI, X1):
+ case OE_SH(SHRSI, Y0):
+ case OE_SH(SHRSI, Y1):
+ tcg_gen_sari_tl(tdest, tsrca, imm);
+ mnemonic = "shrsi";
+ break;
+ case OE_SH(SHRUI, X0):
+ case OE_SH(SHRUI, X1):
+ case OE_SH(SHRUI, Y0):
+ case OE_SH(SHRUI, Y1):
+ tcg_gen_shri_tl(tdest, tsrca, imm);
+ mnemonic = "shrui";
+ break;
+ case OE_SH(SHRUXI, X0):
+ case OE_SH(SHRUXI, X1):
+ if ((imm & 31) == 0) {
+ tcg_gen_ext32s_tl(tdest, tsrca);
+ } else {
+ tcg_gen_ext32u_tl(tdest, tsrca);
+ tcg_gen_shri_tl(tdest, tdest, imm & 31);
+ }
+ mnemonic = "shlxi";
+ break;
+ case OE_SH(V1SHLI, X0):
+ case OE_SH(V1SHLI, X1):
+ i2 = imm & 7;
+ i3 = 0xff >> i2;
+ tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3));
+ tcg_gen_shli_tl(tdest, tdest, i2);
+ mnemonic = "v1shli";
+ break;
+ case OE_SH(V1SHRSI, X0):
+ case OE_SH(V1SHRSI, X1):
+ t0 = tcg_const_tl(imm & 7);
+ gen_helper_v1shrs(tdest, tsrca, t0);
+ tcg_temp_free(t0);
+ mnemonic = "v1shrsi";
+ break;
+ case OE_SH(V1SHRUI, X0):
+ case OE_SH(V1SHRUI, X1):
+ i2 = imm & 7;
+ i3 = (0xff << i2) & 0xff;
+ tcg_gen_andi_tl(tdest, tsrca, V1_IMM(i3));
+ tcg_gen_shri_tl(tdest, tdest, i2);
+ mnemonic = "v1shrui";
+ break;
+ case OE_SH(V2SHLI, X0):
+ case OE_SH(V2SHLI, X1):
+ i2 = imm & 15;
+ i3 = 0xffff >> i2;
+ tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3));
+ tcg_gen_shli_tl(tdest, tdest, i2);
+ mnemonic = "v2shli";
+ break;
+ case OE_SH(V2SHRSI, X0):
+ case OE_SH(V2SHRSI, X1):
+ t0 = tcg_const_tl(imm & 15);
+ gen_helper_v2shrs(tdest, tsrca, t0);
+ tcg_temp_free(t0);
+ mnemonic = "v2shrsi";
+ break;
+ case OE_SH(V2SHRUI, X0):
+ case OE_SH(V2SHRUI, X1):
+ i2 = imm & 15;
+ i3 = (0xffff << i2) & 0xffff;
+ tcg_gen_andi_tl(tdest, tsrca, V2_IMM(i3));
+ tcg_gen_shri_tl(tdest, tdest, i2);
+ mnemonic = "v2shrui";
+ break;
+
+ case OE(ADDLI_OPCODE_X0, 0, X0):
+ case OE(ADDLI_OPCODE_X1, 0, X1):
+ tcg_gen_addi_tl(tdest, tsrca, imm);
+ mnemonic = "addli";
+ break;
+ case OE(ADDXLI_OPCODE_X0, 0, X0):
+ case OE(ADDXLI_OPCODE_X1, 0, X1):
+ tcg_gen_addi_tl(tdest, tsrca, imm);
+ tcg_gen_ext32s_tl(tdest, tdest);
+ mnemonic = "addxli";
+ break;
+ case OE(SHL16INSLI_OPCODE_X0, 0, X0):
+ case OE(SHL16INSLI_OPCODE_X1, 0, X1):
+ tcg_gen_shli_tl(tdest, tsrca, 16);
+ tcg_gen_ori_tl(tdest, tdest, imm & 0xffff);
+ mnemonic = "shl16insli";
+ break;
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %d", mnemonic,
+ reg_names[dest], reg_names[srca], imm);
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp gen_bf_opcode_x0(DisasContext *dc, unsigned ext,
+ unsigned dest, unsigned srca,
+ unsigned bfs, unsigned bfe)
+{
+ TCGv tdest = dest_gr(dc, dest);
+ TCGv tsrca = load_gr(dc, srca);
+ TCGv tsrcd;
+ int len;
+ const char *mnemonic;
+
+ /* The bitfield is either between E and S inclusive,
+ or up from S and down from E inclusive. */
+ if (bfs <= bfe) {
+ len = bfe - bfs + 1;
+ } else {
+ len = (64 - bfs) + (bfe + 1);
+ }
+
+ switch (ext) {
+ case BFEXTU_BF_OPCODE_X0:
+ if (bfs == 0 && bfe == 7) {
+ tcg_gen_ext8u_tl(tdest, tsrca);
+ } else if (bfs == 0 && bfe == 15) {
+ tcg_gen_ext16u_tl(tdest, tsrca);
+ } else if (bfs == 0 && bfe == 31) {
+ tcg_gen_ext32u_tl(tdest, tsrca);
+ } else {
+ int rol = 63 - bfe;
+ if (bfs <= bfe) {
+ tcg_gen_shli_tl(tdest, tsrca, rol);
+ } else {
+ tcg_gen_rotli_tl(tdest, tsrca, rol);
+ }
+ tcg_gen_shri_tl(tdest, tdest, (bfs + rol) & 63);
+ }
+ mnemonic = "bfextu";
+ break;
+
+ case BFEXTS_BF_OPCODE_X0:
+ if (bfs == 0 && bfe == 7) {
+ tcg_gen_ext8s_tl(tdest, tsrca);
+ } else if (bfs == 0 && bfe == 15) {
+ tcg_gen_ext16s_tl(tdest, tsrca);
+ } else if (bfs == 0 && bfe == 31) {
+ tcg_gen_ext32s_tl(tdest, tsrca);
+ } else {
+ int rol = 63 - bfe;
+ if (bfs <= bfe) {
+ tcg_gen_shli_tl(tdest, tsrca, rol);
+ } else {
+ tcg_gen_rotli_tl(tdest, tsrca, rol);
+ }
+ tcg_gen_sari_tl(tdest, tdest, (bfs + rol) & 63);
+ }
+ mnemonic = "bfexts";
+ break;
+
+ case BFINS_BF_OPCODE_X0:
+ tsrcd = load_gr(dc, dest);
+ if (bfs <= bfe) {
+ tcg_gen_deposit_tl(tdest, tsrcd, tsrca, bfs, len);
+ } else {
+ tcg_gen_rotri_tl(tdest, tsrcd, bfs);
+ tcg_gen_deposit_tl(tdest, tdest, tsrca, 0, len);
+ tcg_gen_rotli_tl(tdest, tdest, bfs);
+ }
+ mnemonic = "bfins";
+ break;
+
+ case MM_BF_OPCODE_X0:
+ tsrcd = load_gr(dc, dest);
+ if (bfs == 0) {
+ tcg_gen_deposit_tl(tdest, tsrca, tsrcd, 0, len);
+ } else {
+ uint64_t mask = len == 64 ? -1 : rol64((1ULL << len) - 1, bfs);
+ TCGv tmp = tcg_const_tl(mask);
+
+ tcg_gen_and_tl(tdest, tsrcd, tmp);
+ tcg_gen_andc_tl(tmp, tsrca, tmp);
+ tcg_gen_or_tl(tdest, tdest, tmp);
+ tcg_temp_free(tmp);
+ }
+ mnemonic = "mm";
+ break;
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s, %u, %u", mnemonic,
+ reg_names[dest], reg_names[srca], bfs, bfe);
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp gen_branch_opcode_x1(DisasContext *dc, unsigned ext,
+ unsigned srca, int off)
+{
+ target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
+ const char *mnemonic;
+
+ dc->jmp.dest = tcg_const_tl(tgt);
+ dc->jmp.val1 = tcg_temp_new();
+ tcg_gen_mov_tl(dc->jmp.val1, load_gr(dc, srca));
+
+ /* Note that the "predict taken" opcodes have bit 0 clear.
+ Therefore, fold the two cases together by setting bit 0. */
+ switch (ext | 1) {
+ case BEQZ_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_EQ;
+ mnemonic = "beqz";
+ break;
+ case BNEZ_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_NE;
+ mnemonic = "bnez";
+ break;
+ case BGEZ_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_GE;
+ mnemonic = "bgez";
+ break;
+ case BGTZ_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_GT;
+ mnemonic = "bgtz";
+ break;
+ case BLEZ_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_LE;
+ mnemonic = "blez";
+ break;
+ case BLTZ_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_LT;
+ mnemonic = "bltz";
+ break;
+ case BLBC_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_EQ;
+ tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
+ mnemonic = "blbc";
+ break;
+ case BLBS_BRANCH_OPCODE_X1:
+ dc->jmp.cond = TCG_COND_NE;
+ tcg_gen_andi_tl(dc->jmp.val1, dc->jmp.val1, 1);
+ mnemonic = "blbs";
+ break;
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("%s%s %s, " TARGET_FMT_lx " <%s>",
+ mnemonic, ext & 1 ? "" : "t",
+ reg_names[srca], tgt, lookup_symbol(tgt));
+ }
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp gen_jump_opcode_x1(DisasContext *dc, unsigned ext, int off)
+{
+ target_ulong tgt = dc->pc + off * TILEGX_BUNDLE_SIZE_IN_BYTES;
+ const char *mnemonic = "j";
+
+ /* The extension field is 1 bit, therefore we only have JAL and J. */
+ if (ext == JAL_JUMP_OPCODE_X1) {
+ tcg_gen_movi_tl(dest_gr(dc, TILEGX_R_LR),
+ dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+ mnemonic = "jal";
+ }
+ dc->jmp.cond = TCG_COND_ALWAYS;
+ dc->jmp.dest = tcg_const_tl(tgt);
+
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("%s " TARGET_FMT_lx " <%s>",
+ mnemonic, tgt, lookup_symbol(tgt));
+ }
+ return TILEGX_EXCP_NONE;
+}
+
+typedef struct {
+ const char *name;
+ intptr_t offset;
+ void (*get)(TCGv, TCGv_ptr);
+ void (*put)(TCGv_ptr, TCGv);
+} TileSPR;
+
+static const TileSPR *find_spr(unsigned spr)
+{
+ /* Allow the compiler to construct the binary search tree. */
+#define D(N, O, G, P) \
+ case SPR_##N: { static const TileSPR x = { #N, O, G, P }; return &x; }
+
+ switch (spr) {
+ D(CMPEXCH_VALUE,
+ offsetof(CPUTLGState, spregs[TILEGX_SPR_CMPEXCH]), 0, 0)
+ D(INTERRUPT_CRITICAL_SECTION,
+ offsetof(CPUTLGState, spregs[TILEGX_SPR_CRITICAL_SEC]), 0, 0)
+ D(SIM_CONTROL,
+ offsetof(CPUTLGState, spregs[TILEGX_SPR_SIM_CONTROL]), 0, 0)
+ D(EX_CONTEXT_0_0,
+ offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_0]), 0, 0)
+ D(EX_CONTEXT_0_1,
+ offsetof(CPUTLGState, spregs[TILEGX_SPR_EX_CONTEXT_0_1]), 0, 0)
+ }
+
+#undef D
+
+ qemu_log_mask(LOG_UNIMP, "UNIMP SPR %u\n", spr);
+ return NULL;
+}
+
+static TileExcp gen_mtspr_x1(DisasContext *dc, unsigned spr, unsigned srca)
+{
+ const TileSPR *def = find_spr(spr);
+ TCGv tsrca;
+
+ if (def == NULL) {
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr spr[%u], %s", spr, reg_names[srca]);
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ }
+
+ tsrca = load_gr(dc, srca);
+ if (def->put) {
+ def->put(cpu_env, tsrca);
+ } else {
+ tcg_gen_st_tl(tsrca, cpu_env, def->offset);
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, %s", def->name, reg_names[srca]);
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp gen_mfspr_x1(DisasContext *dc, unsigned dest, unsigned spr)
+{
+ const TileSPR *def = find_spr(spr);
+ TCGv tdest;
+
+ if (def == NULL) {
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "mtspr %s, spr[%u]", reg_names[dest], spr);
+ return TILEGX_EXCP_OPCODE_UNIMPLEMENTED;
+ }
+
+ tdest = dest_gr(dc, dest);
+ if (def->get) {
+ def->get(tdest, cpu_env);
+ } else {
+ tcg_gen_ld_tl(tdest, cpu_env, def->offset);
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "mfspr %s, %s", reg_names[dest], def->name);
+ return TILEGX_EXCP_NONE;
+}
+
+static TileExcp decode_y0(DisasContext *dc, tilegx_bundle_bits bundle)
+{
+ unsigned opc = get_Opcode_Y0(bundle);
+ unsigned ext = get_RRROpcodeExtension_Y0(bundle);
+ unsigned dest = get_Dest_Y0(bundle);
+ unsigned srca = get_SrcA_Y0(bundle);
+ unsigned srcb;
+ int imm;
+
+ switch (opc) {
+ case RRR_1_OPCODE_Y0:
+ if (ext == UNARY_RRR_1_OPCODE_Y0) {
+ ext = get_UnaryOpcodeExtension_Y0(bundle);
+ return gen_rr_opcode(dc, OE(opc, ext, Y0), dest, srca, bundle);
+ }
+ /* fallthru */
+ case RRR_0_OPCODE_Y0:
+ case RRR_2_OPCODE_Y0:
+ case RRR_3_OPCODE_Y0:
+ case RRR_4_OPCODE_Y0:
+ case RRR_5_OPCODE_Y0:
+ case RRR_6_OPCODE_Y0:
+ case RRR_7_OPCODE_Y0:
+ case RRR_8_OPCODE_Y0:
+ case RRR_9_OPCODE_Y0:
+ srcb = get_SrcB_Y0(bundle);
+ return gen_rrr_opcode(dc, OE(opc, ext, Y0), dest, srca, srcb);
+
+ case SHIFT_OPCODE_Y0:
+ ext = get_ShiftOpcodeExtension_Y0(bundle);
+ imm = get_ShAmt_Y0(bundle);
+ return gen_rri_opcode(dc, OE(opc, ext, Y0), dest, srca, imm);
+
+ case ADDI_OPCODE_Y0:
+ case ADDXI_OPCODE_Y0:
+ case ANDI_OPCODE_Y0:
+ case CMPEQI_OPCODE_Y0:
+ case CMPLTSI_OPCODE_Y0:
+ imm = (int8_t)get_Imm8_Y0(bundle);
+ return gen_rri_opcode(dc, OE(opc, 0, Y0), dest, srca, imm);
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+}
+
+static TileExcp decode_y1(DisasContext *dc, tilegx_bundle_bits bundle)
+{
+ unsigned opc = get_Opcode_Y1(bundle);
+ unsigned ext = get_RRROpcodeExtension_Y1(bundle);
+ unsigned dest = get_Dest_Y1(bundle);
+ unsigned srca = get_SrcA_Y1(bundle);
+ unsigned srcb;
+ int imm;
+
+ switch (get_Opcode_Y1(bundle)) {
+ case RRR_1_OPCODE_Y1:
+ if (ext == UNARY_RRR_1_OPCODE_Y0) {
+ ext = get_UnaryOpcodeExtension_Y1(bundle);
+ return gen_rr_opcode(dc, OE(opc, ext, Y1), dest, srca, bundle);
+ }
+ /* fallthru */
+ case RRR_0_OPCODE_Y1:
+ case RRR_2_OPCODE_Y1:
+ case RRR_3_OPCODE_Y1:
+ case RRR_4_OPCODE_Y1:
+ case RRR_5_OPCODE_Y1:
+ case RRR_6_OPCODE_Y1:
+ case RRR_7_OPCODE_Y1:
+ srcb = get_SrcB_Y1(bundle);
+ return gen_rrr_opcode(dc, OE(opc, ext, Y1), dest, srca, srcb);
+
+ case SHIFT_OPCODE_Y1:
+ ext = get_ShiftOpcodeExtension_Y1(bundle);
+ imm = get_ShAmt_Y1(bundle);
+ return gen_rri_opcode(dc, OE(opc, ext, Y1), dest, srca, imm);
+
+ case ADDI_OPCODE_Y1:
+ case ADDXI_OPCODE_Y1:
+ case ANDI_OPCODE_Y1:
+ case CMPEQI_OPCODE_Y1:
+ case CMPLTSI_OPCODE_Y1:
+ imm = (int8_t)get_Imm8_Y1(bundle);
+ return gen_rri_opcode(dc, OE(opc, 0, Y1), dest, srca, imm);
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+}
+
+static TileExcp decode_y2(DisasContext *dc, tilegx_bundle_bits bundle)
+{
+ unsigned mode = get_Mode(bundle);
+ unsigned opc = get_Opcode_Y2(bundle);
+ unsigned srca = get_SrcA_Y2(bundle);
+ unsigned srcbdest = get_SrcBDest_Y2(bundle);
+ const char *mnemonic;
+ TCGMemOp memop;
+ bool prefetch_nofault = false;
+
+ switch (OEY2(opc, mode)) {
+ case OEY2(LD1S_OPCODE_Y2, MODE_OPCODE_YA2):
+ memop = MO_SB;
+ mnemonic = "ld1s"; /* prefetch_l1_fault */
+ goto do_load;
+ case OEY2(LD1U_OPCODE_Y2, MODE_OPCODE_YA2):
+ memop = MO_UB;
+ mnemonic = "ld1u"; /* prefetch, prefetch_l1 */
+ prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
+ goto do_load;
+ case OEY2(LD2S_OPCODE_Y2, MODE_OPCODE_YA2):
+ memop = MO_TESW;
+ mnemonic = "ld2s"; /* prefetch_l2_fault */
+ goto do_load;
+ case OEY2(LD2U_OPCODE_Y2, MODE_OPCODE_YA2):
+ memop = MO_TEUW;
+ mnemonic = "ld2u"; /* prefetch_l2 */
+ prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
+ goto do_load;
+ case OEY2(LD4S_OPCODE_Y2, MODE_OPCODE_YB2):
+ memop = MO_TESL;
+ mnemonic = "ld4s"; /* prefetch_l3_fault */
+ goto do_load;
+ case OEY2(LD4U_OPCODE_Y2, MODE_OPCODE_YB2):
+ memop = MO_TEUL;
+ mnemonic = "ld4u"; /* prefetch_l3 */
+ prefetch_nofault = (srcbdest == TILEGX_R_ZERO);
+ goto do_load;
+ case OEY2(LD_OPCODE_Y2, MODE_OPCODE_YB2):
+ memop = MO_TEQ;
+ mnemonic = "ld";
+ do_load:
+ if (!prefetch_nofault) {
+ tcg_gen_qemu_ld_tl(dest_gr(dc, srcbdest), load_gr(dc, srca),
+ dc->mmuidx, memop);
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "%s %s, %s", mnemonic,
+ reg_names[srcbdest], reg_names[srca]);
+ return TILEGX_EXCP_NONE;
+
+ case OEY2(ST1_OPCODE_Y2, MODE_OPCODE_YC2):
+ return gen_st_opcode(dc, 0, srca, srcbdest, MO_UB, "st1");
+ case OEY2(ST2_OPCODE_Y2, MODE_OPCODE_YC2):
+ return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUW, "st2");
+ case OEY2(ST4_OPCODE_Y2, MODE_OPCODE_YC2):
+ return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEUL, "st4");
+ case OEY2(ST_OPCODE_Y2, MODE_OPCODE_YC2):
+ return gen_st_opcode(dc, 0, srca, srcbdest, MO_TEQ, "st");
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+}
+
+static TileExcp decode_x0(DisasContext *dc, tilegx_bundle_bits bundle)
+{
+ unsigned opc = get_Opcode_X0(bundle);
+ unsigned dest = get_Dest_X0(bundle);
+ unsigned srca = get_SrcA_X0(bundle);
+ unsigned ext, srcb, bfs, bfe;
+ int imm;
+
+ switch (opc) {
+ case RRR_0_OPCODE_X0:
+ ext = get_RRROpcodeExtension_X0(bundle);
+ if (ext == UNARY_RRR_0_OPCODE_X0) {
+ ext = get_UnaryOpcodeExtension_X0(bundle);
+ return gen_rr_opcode(dc, OE(opc, ext, X0), dest, srca, bundle);
+ }
+ srcb = get_SrcB_X0(bundle);
+ return gen_rrr_opcode(dc, OE(opc, ext, X0), dest, srca, srcb);
+
+ case SHIFT_OPCODE_X0:
+ ext = get_ShiftOpcodeExtension_X0(bundle);
+ imm = get_ShAmt_X0(bundle);
+ return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
+
+ case IMM8_OPCODE_X0:
+ ext = get_Imm8OpcodeExtension_X0(bundle);
+ imm = (int8_t)get_Imm8_X0(bundle);
+ return gen_rri_opcode(dc, OE(opc, ext, X0), dest, srca, imm);
+
+ case BF_OPCODE_X0:
+ ext = get_BFOpcodeExtension_X0(bundle);
+ bfs = get_BFStart_X0(bundle);
+ bfe = get_BFEnd_X0(bundle);
+ return gen_bf_opcode_x0(dc, ext, dest, srca, bfs, bfe);
+
+ case ADDLI_OPCODE_X0:
+ case SHL16INSLI_OPCODE_X0:
+ case ADDXLI_OPCODE_X0:
+ imm = (int16_t)get_Imm16_X0(bundle);
+ return gen_rri_opcode(dc, OE(opc, 0, X0), dest, srca, imm);
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+}
+
+static TileExcp decode_x1(DisasContext *dc, tilegx_bundle_bits bundle)
+{
+ unsigned opc = get_Opcode_X1(bundle);
+ unsigned dest = get_Dest_X1(bundle);
+ unsigned srca = get_SrcA_X1(bundle);
+ unsigned ext, srcb;
+ int imm;
+
+ switch (opc) {
+ case RRR_0_OPCODE_X1:
+ ext = get_RRROpcodeExtension_X1(bundle);
+ srcb = get_SrcB_X1(bundle);
+ switch (ext) {
+ case UNARY_RRR_0_OPCODE_X1:
+ ext = get_UnaryOpcodeExtension_X1(bundle);
+ return gen_rr_opcode(dc, OE(opc, ext, X1), dest, srca, bundle);
+ case ST1_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "st1");
+ case ST2_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "st2");
+ case ST4_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "st4");
+ case STNT1_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_UB, "stnt1");
+ case STNT2_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_TEUW, "stnt2");
+ case STNT4_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_TEUL, "stnt4");
+ case STNT_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "stnt");
+ case ST_RRR_0_OPCODE_X1:
+ return gen_st_opcode(dc, dest, srca, srcb, MO_TEQ, "st");
+ }
+ return gen_rrr_opcode(dc, OE(opc, ext, X1), dest, srca, srcb);
+
+ case SHIFT_OPCODE_X1:
+ ext = get_ShiftOpcodeExtension_X1(bundle);
+ imm = get_ShAmt_X1(bundle);
+ return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
+
+ case IMM8_OPCODE_X1:
+ ext = get_Imm8OpcodeExtension_X1(bundle);
+ imm = (int8_t)get_Dest_Imm8_X1(bundle);
+ srcb = get_SrcB_X1(bundle);
+ switch (ext) {
+ case ST1_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "st1_add");
+ case ST2_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "st2_add");
+ case ST4_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "st4_add");
+ case STNT1_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_UB, "stnt1_add");
+ case STNT2_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUW, "stnt2_add");
+ case STNT4_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEUL, "stnt4_add");
+ case STNT_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "stnt_add");
+ case ST_ADD_IMM8_OPCODE_X1:
+ return gen_st_add_opcode(dc, srca, srcb, imm, MO_TEQ, "st_add");
+ case MFSPR_IMM8_OPCODE_X1:
+ return gen_mfspr_x1(dc, dest, get_MF_Imm14_X1(bundle));
+ case MTSPR_IMM8_OPCODE_X1:
+ return gen_mtspr_x1(dc, get_MT_Imm14_X1(bundle), srca);
+ }
+ imm = (int8_t)get_Imm8_X1(bundle);
+ return gen_rri_opcode(dc, OE(opc, ext, X1), dest, srca, imm);
+
+ case BRANCH_OPCODE_X1:
+ ext = get_BrType_X1(bundle);
+ imm = sextract32(get_BrOff_X1(bundle), 0, 17);
+ return gen_branch_opcode_x1(dc, ext, srca, imm);
+
+ case JUMP_OPCODE_X1:
+ ext = get_JumpOpcodeExtension_X1(bundle);
+ imm = sextract32(get_JumpOff_X1(bundle), 0, 27);
+ return gen_jump_opcode_x1(dc, ext, imm);
+
+ case ADDLI_OPCODE_X1:
+ case SHL16INSLI_OPCODE_X1:
+ case ADDXLI_OPCODE_X1:
+ imm = (int16_t)get_Imm16_X1(bundle);
+ return gen_rri_opcode(dc, OE(opc, 0, X1), dest, srca, imm);
+
+ default:
+ return TILEGX_EXCP_OPCODE_UNKNOWN;
+ }
+}
+
+static void notice_excp(DisasContext *dc, uint64_t bundle,
+ const char *type, TileExcp excp)
+{
+ if (likely(excp == TILEGX_EXCP_NONE)) {
+ return;
+ }
+ gen_exception(dc, excp);
+ switch (excp) {
+ case TILEGX_EXCP_OPCODE_UNIMPLEMENTED:
+ qemu_log_mask(LOG_UNIMP, "UNIMP %s, [" FMT64X "]\n", type, bundle);
+ break;
+ case TILEGX_EXCP_OPCODE_UNKNOWN:
+ qemu_log_mask(LOG_UNIMP, "UNKNOWN %s, [" FMT64X "]\n", type, bundle);
+ break;
+ default:
+ break;
+ }
+}
+
+static void translate_one_bundle(DisasContext *dc, uint64_t bundle)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dc->wb); i++) {
+ DisasContextTemp *wb = &dc->wb[i];
+ wb->reg = TILEGX_R_NOREG;
+ TCGV_UNUSED_I64(wb->val);
+ }
+ dc->num_wb = 0;
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %" PRIx64 ": { ", dc->pc);
+ if (get_Mode(bundle)) {
+ notice_excp(dc, bundle, "y0", decode_y0(dc, bundle));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
+ notice_excp(dc, bundle, "y1", decode_y1(dc, bundle));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
+ notice_excp(dc, bundle, "y2", decode_y2(dc, bundle));
+ } else {
+ notice_excp(dc, bundle, "x0", decode_x0(dc, bundle));
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " ; ");
+ notice_excp(dc, bundle, "x1", decode_x1(dc, bundle));
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " }\n");
+
+ for (i = dc->num_wb - 1; i >= 0; --i) {
+ DisasContextTemp *wb = &dc->wb[i];
+ if (wb->reg < TILEGX_R_COUNT) {
+ tcg_gen_mov_i64(cpu_regs[wb->reg], wb->val);
+ }
+ tcg_temp_free_i64(wb->val);
+ }
+
+ if (dc->jmp.cond != TCG_COND_NEVER) {
+ if (dc->jmp.cond == TCG_COND_ALWAYS) {
+ tcg_gen_mov_i64(cpu_pc, dc->jmp.dest);
+ } else {
+ TCGv next = tcg_const_i64(dc->pc + TILEGX_BUNDLE_SIZE_IN_BYTES);
+ tcg_gen_movcond_i64(dc->jmp.cond, cpu_pc,
+ dc->jmp.val1, load_zero(dc),
+ dc->jmp.dest, next);
+ tcg_temp_free_i64(dc->jmp.val1);
+ tcg_temp_free_i64(next);
+ }
+ tcg_temp_free_i64(dc->jmp.dest);
+ tcg_gen_exit_tb(0);
+ dc->exit_tb = true;
+ } else if (dc->atomic_excp != TILEGX_EXCP_NONE) {
+ gen_exception(dc, dc->atomic_excp);
+ }
+}
+
+void gen_intermediate_code(CPUTLGState *env, struct TranslationBlock *tb)
+{
+ TileGXCPU *cpu = tilegx_env_get_cpu(env);
+ DisasContext ctx;
+ DisasContext *dc = &ctx;
+ CPUState *cs = CPU(cpu);
+ uint64_t pc_start = tb->pc;
+ uint64_t next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ int num_insns = 0;
+ int max_insns = tb->cflags & CF_COUNT_MASK;
+
+ dc->pc = pc_start;
+ dc->mmuidx = 0;
+ dc->exit_tb = false;
+ dc->atomic_excp = TILEGX_EXCP_NONE;
+ dc->jmp.cond = TCG_COND_NEVER;
+ TCGV_UNUSED_I64(dc->jmp.dest);
+ TCGV_UNUSED_I64(dc->jmp.val1);
+ TCGV_UNUSED_I64(dc->zero);
+
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ }
+ if (!max_insns) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (cs->singlestep_enabled || singlestep) {
+ max_insns = 1;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+ gen_tb_start(tb);
+
+ while (1) {
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+ translate_one_bundle(dc, cpu_ldq_data(env, dc->pc));
+
+ if (dc->exit_tb) {
+ /* PC updated and EXIT_TB/GOTO_TB/exception emitted. */
+ break;
+ }
+ dc->pc += TILEGX_BUNDLE_SIZE_IN_BYTES;
+ if (num_insns >= max_insns
+ || dc->pc >= next_page_start
+ || tcg_op_buf_full()) {
+ /* Ending the TB due to TB size or page boundary. Set PC. */
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ tcg_gen_exit_tb(0);
+ break;
+ }
+ }
+
+ gen_tb_end(tb, num_insns);
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+}
+
+void restore_state_to_opc(CPUTLGState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
+
+void tilegx_tcg_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_pc = tcg_global_mem_new_i64(cpu_env, offsetof(CPUTLGState, pc), "pc");
+ for (i = 0; i < TILEGX_R_COUNT; i++) {
+ cpu_regs[i] = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUTLGState, regs[i]),
+ reg_names[i]);
+ }
+}
diff --git a/target/tricore/Makefile.objs b/target/tricore/Makefile.objs
new file mode 100644
index 0000000000..7a05670718
--- /dev/null
+++ b/target/tricore/Makefile.objs
@@ -0,0 +1 @@
+obj-y += translate.o helper.o cpu.o op_helper.o fpu_helper.o
diff --git a/target/tricore/cpu-qom.h b/target/tricore/cpu-qom.h
new file mode 100644
index 0000000000..6a69756126
--- /dev/null
+++ b/target/tricore/cpu-qom.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_TRICORE_CPU_QOM_H
+#define QEMU_TRICORE_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+
+#define TYPE_TRICORE_CPU "tricore-cpu"
+
+#define TRICORE_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(TriCoreCPUClass, (klass), TYPE_TRICORE_CPU)
+#define TRICORE_CPU(obj) \
+ OBJECT_CHECK(TriCoreCPU, (obj), TYPE_TRICORE_CPU)
+#define TRICORE_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(TriCoreCPUClass, (obj), TYPE_TRICORE_CPU)
+
+typedef struct TriCoreCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+} TriCoreCPUClass;
+
+typedef struct TriCoreCPU TriCoreCPU;
+
+#endif /* QEMU_TRICORE_CPU_QOM_H */
diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c
new file mode 100644
index 0000000000..785b76bd3a
--- /dev/null
+++ b/target/tricore/cpu.c
@@ -0,0 +1,220 @@
+/*
+ * TriCore emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "exec/exec-all.h"
+
+static inline void set_feature(CPUTriCoreState *env, int feature)
+{
+ env->features |= 1ULL << feature;
+}
+
+static void tricore_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+
+ env->PC = value & ~(target_ulong)1;
+}
+
+static void tricore_cpu_synchronize_from_tb(CPUState *cs,
+ TranslationBlock *tb)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+
+ env->PC = tb->pc;
+}
+
+static void tricore_cpu_reset(CPUState *s)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(s);
+ TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu);
+ CPUTriCoreState *env = &cpu->env;
+
+ tcc->parent_reset(s);
+
+ tlb_flush(s, 1);
+
+ cpu_state_reset(env);
+}
+
+static bool tricore_cpu_has_work(CPUState *cs)
+{
+ return true;
+}
+
+static void tricore_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ TriCoreCPU *cpu = TRICORE_CPU(dev);
+ TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(dev);
+ CPUTriCoreState *env = &cpu->env;
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* Some features automatically imply others */
+ if (tricore_feature(env, TRICORE_FEATURE_161)) {
+ set_feature(env, TRICORE_FEATURE_16);
+ }
+
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ set_feature(env, TRICORE_FEATURE_131);
+ }
+ if (tricore_feature(env, TRICORE_FEATURE_131)) {
+ set_feature(env, TRICORE_FEATURE_13);
+ }
+ cpu_reset(cs);
+ qemu_init_vcpu(cs);
+
+ tcc->parent_realize(dev, errp);
+}
+
+
+static void tricore_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+ CPUTriCoreState *env = &cpu->env;
+
+ cs->env_ptr = env;
+
+ if (tcg_enabled()) {
+ tricore_tcg_init();
+ }
+}
+
+static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (!cpu_model) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_TRICORE_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (!oc || !object_class_dynamic_cast(oc, TYPE_TRICORE_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void tc1796_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ set_feature(&cpu->env, TRICORE_FEATURE_13);
+}
+
+static void tc1797_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ set_feature(&cpu->env, TRICORE_FEATURE_131);
+}
+
+static void tc27x_initfn(Object *obj)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(obj);
+
+ set_feature(&cpu->env, TRICORE_FEATURE_161);
+}
+
+typedef struct TriCoreCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+ void (*class_init)(ObjectClass *oc, void *data);
+} TriCoreCPUInfo;
+
+static const TriCoreCPUInfo tricore_cpus[] = {
+ { .name = "tc1796", .initfn = tc1796_initfn },
+ { .name = "tc1797", .initfn = tc1797_initfn },
+ { .name = "tc27x", .initfn = tc27x_initfn },
+ { .name = NULL }
+};
+
+static void tricore_cpu_class_init(ObjectClass *c, void *data)
+{
+ TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c);
+ CPUClass *cc = CPU_CLASS(c);
+ DeviceClass *dc = DEVICE_CLASS(c);
+
+ mcc->parent_realize = dc->realize;
+ dc->realize = tricore_cpu_realizefn;
+
+ mcc->parent_reset = cc->reset;
+ cc->reset = tricore_cpu_reset;
+ cc->class_by_name = tricore_cpu_class_by_name;
+ cc->has_work = tricore_cpu_has_work;
+
+ cc->dump_state = tricore_cpu_dump_state;
+ cc->set_pc = tricore_cpu_set_pc;
+ cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb;
+}
+
+static void cpu_register(const TriCoreCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_TRICORE_CPU,
+ .instance_size = sizeof(TriCoreCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(TriCoreCPUClass),
+ .class_init = info->class_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_TRICORE_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo tricore_cpu_type_info = {
+ .name = TYPE_TRICORE_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(TriCoreCPU),
+ .instance_init = tricore_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(TriCoreCPUClass),
+ .class_init = tricore_cpu_class_init,
+};
+
+static void tricore_cpu_register_types(void)
+{
+ const TriCoreCPUInfo *info = tricore_cpus;
+
+ type_register_static(&tricore_cpu_type_info);
+
+ while (info->name) {
+ cpu_register(info);
+ info++;
+ }
+}
+
+type_init(tricore_cpu_register_types)
diff --git a/target/tricore/cpu.h b/target/tricore/cpu.h
new file mode 100644
index 0000000000..a3493a123c
--- /dev/null
+++ b/target/tricore/cpu.h
@@ -0,0 +1,424 @@
+/*
+ * TriCore emulation for qemu: main CPU struct.
+ *
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TRICORE_CPU_H
+#define TRICORE_CPU_H
+
+#include "tricore-defs.h"
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define CPUArchState struct CPUTriCoreState
+
+struct CPUTriCoreState;
+
+struct tricore_boot_info;
+
+#define NB_MMU_MODES 3
+
+typedef struct tricore_def_t tricore_def_t;
+
+typedef struct CPUTriCoreState CPUTriCoreState;
+struct CPUTriCoreState {
+ /* GPR Register */
+ uint32_t gpr_a[16];
+ uint32_t gpr_d[16];
+ /* CSFR Register */
+ uint32_t PCXI;
+/* Frequently accessed PSW_USB bits are stored separately for efficiency.
+ This contains all the other bits. Use psw_{read,write} to access
+ the whole PSW. */
+ uint32_t PSW;
+
+ /* PSW flag cache for faster execution
+ */
+ uint32_t PSW_USB_C;
+ uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */
+ uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */
+ uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */
+ uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */
+
+ uint32_t PC;
+ uint32_t SYSCON;
+ uint32_t CPU_ID;
+ uint32_t BIV;
+ uint32_t BTV;
+ uint32_t ISP;
+ uint32_t ICR;
+ uint32_t FCX;
+ uint32_t LCX;
+ uint32_t COMPAT;
+
+ /* Mem Protection Register */
+ uint32_t DPR0_0L;
+ uint32_t DPR0_0U;
+ uint32_t DPR0_1L;
+ uint32_t DPR0_1U;
+ uint32_t DPR0_2L;
+ uint32_t DPR0_2U;
+ uint32_t DPR0_3L;
+ uint32_t DPR0_3U;
+
+ uint32_t DPR1_0L;
+ uint32_t DPR1_0U;
+ uint32_t DPR1_1L;
+ uint32_t DPR1_1U;
+ uint32_t DPR1_2L;
+ uint32_t DPR1_2U;
+ uint32_t DPR1_3L;
+ uint32_t DPR1_3U;
+
+ uint32_t DPR2_0L;
+ uint32_t DPR2_0U;
+ uint32_t DPR2_1L;
+ uint32_t DPR2_1U;
+ uint32_t DPR2_2L;
+ uint32_t DPR2_2U;
+ uint32_t DPR2_3L;
+ uint32_t DPR2_3U;
+
+ uint32_t DPR3_0L;
+ uint32_t DPR3_0U;
+ uint32_t DPR3_1L;
+ uint32_t DPR3_1U;
+ uint32_t DPR3_2L;
+ uint32_t DPR3_2U;
+ uint32_t DPR3_3L;
+ uint32_t DPR3_3U;
+
+ uint32_t CPR0_0L;
+ uint32_t CPR0_0U;
+ uint32_t CPR0_1L;
+ uint32_t CPR0_1U;
+ uint32_t CPR0_2L;
+ uint32_t CPR0_2U;
+ uint32_t CPR0_3L;
+ uint32_t CPR0_3U;
+
+ uint32_t CPR1_0L;
+ uint32_t CPR1_0U;
+ uint32_t CPR1_1L;
+ uint32_t CPR1_1U;
+ uint32_t CPR1_2L;
+ uint32_t CPR1_2U;
+ uint32_t CPR1_3L;
+ uint32_t CPR1_3U;
+
+ uint32_t CPR2_0L;
+ uint32_t CPR2_0U;
+ uint32_t CPR2_1L;
+ uint32_t CPR2_1U;
+ uint32_t CPR2_2L;
+ uint32_t CPR2_2U;
+ uint32_t CPR2_3L;
+ uint32_t CPR2_3U;
+
+ uint32_t CPR3_0L;
+ uint32_t CPR3_0U;
+ uint32_t CPR3_1L;
+ uint32_t CPR3_1U;
+ uint32_t CPR3_2L;
+ uint32_t CPR3_2U;
+ uint32_t CPR3_3L;
+ uint32_t CPR3_3U;
+
+ uint32_t DPM0;
+ uint32_t DPM1;
+ uint32_t DPM2;
+ uint32_t DPM3;
+
+ uint32_t CPM0;
+ uint32_t CPM1;
+ uint32_t CPM2;
+ uint32_t CPM3;
+
+ /* Memory Management Registers */
+ uint32_t MMU_CON;
+ uint32_t MMU_ASI;
+ uint32_t MMU_TVA;
+ uint32_t MMU_TPA;
+ uint32_t MMU_TPX;
+ uint32_t MMU_TFA;
+ /* {1.3.1 only */
+ uint32_t BMACON;
+ uint32_t SMACON;
+ uint32_t DIEAR;
+ uint32_t DIETR;
+ uint32_t CCDIER;
+ uint32_t MIECON;
+ uint32_t PIEAR;
+ uint32_t PIETR;
+ uint32_t CCPIER;
+ /*} */
+ /* Debug Registers */
+ uint32_t DBGSR;
+ uint32_t EXEVT;
+ uint32_t CREVT;
+ uint32_t SWEVT;
+ uint32_t TR0EVT;
+ uint32_t TR1EVT;
+ uint32_t DMS;
+ uint32_t DCX;
+ uint32_t DBGTCR;
+ uint32_t CCTRL;
+ uint32_t CCNT;
+ uint32_t ICNT;
+ uint32_t M1CNT;
+ uint32_t M2CNT;
+ uint32_t M3CNT;
+ /* Floating Point Registers */
+ float_status fp_status;
+ /* QEMU */
+ int error_code;
+ uint32_t hflags; /* CPU State */
+
+ CPU_COMMON
+
+ /* Internal CPU feature flags. */
+ uint64_t features;
+
+ const tricore_def_t *cpu_model;
+ void *irq[8];
+ struct QEMUTimer *timer; /* Internal timer */
+};
+
+/**
+ * TriCoreCPU:
+ * @env: #CPUTriCoreState
+ *
+ * A TriCore CPU.
+ */
+struct TriCoreCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUTriCoreState env;
+};
+
+static inline TriCoreCPU *tricore_env_get_cpu(CPUTriCoreState *env)
+{
+ return TRICORE_CPU(container_of(env, TriCoreCPU, env));
+}
+
+#define ENV_GET_CPU(e) CPU(tricore_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(TriCoreCPU, env)
+
+hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+void tricore_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+
+
+#define MASK_PCXI_PCPN 0xff000000
+#define MASK_PCXI_PIE 0x00800000
+#define MASK_PCXI_UL 0x00400000
+#define MASK_PCXI_PCXS 0x000f0000
+#define MASK_PCXI_PCXO 0x0000ffff
+
+#define MASK_PSW_USB 0xff000000
+#define MASK_USB_C 0x80000000
+#define MASK_USB_V 0x40000000
+#define MASK_USB_SV 0x20000000
+#define MASK_USB_AV 0x10000000
+#define MASK_USB_SAV 0x08000000
+#define MASK_PSW_PRS 0x00003000
+#define MASK_PSW_IO 0x00000c00
+#define MASK_PSW_IS 0x00000200
+#define MASK_PSW_GW 0x00000100
+#define MASK_PSW_CDE 0x00000080
+#define MASK_PSW_CDC 0x0000007f
+#define MASK_PSW_FPU_RM 0x3000000
+
+#define MASK_SYSCON_PRO_TEN 0x2
+#define MASK_SYSCON_FCD_SF 0x1
+
+#define MASK_CPUID_MOD 0xffff0000
+#define MASK_CPUID_MOD_32B 0x0000ff00
+#define MASK_CPUID_REV 0x000000ff
+
+#define MASK_ICR_PIPN 0x00ff0000
+#define MASK_ICR_IE 0x00000100
+#define MASK_ICR_CCPN 0x000000ff
+
+#define MASK_FCX_FCXS 0x000f0000
+#define MASK_FCX_FCXO 0x0000ffff
+
+#define MASK_LCX_LCXS 0x000f0000
+#define MASK_LCX_LCX0 0x0000ffff
+
+#define MASK_DBGSR_DE 0x1
+#define MASK_DBGSR_HALT 0x6
+#define MASK_DBGSR_SUSP 0x10
+#define MASK_DBGSR_PREVSUSP 0x20
+#define MASK_DBGSR_PEVT 0x40
+#define MASK_DBGSR_EVTSRC 0x1f00
+
+#define TRICORE_HFLAG_KUU 0x3
+#define TRICORE_HFLAG_UM0 0x00002 /* user mode-0 flag */
+#define TRICORE_HFLAG_UM1 0x00001 /* user mode-1 flag */
+#define TRICORE_HFLAG_SM 0x00000 /* kernel mode flag */
+
+enum tricore_features {
+ TRICORE_FEATURE_13,
+ TRICORE_FEATURE_131,
+ TRICORE_FEATURE_16,
+ TRICORE_FEATURE_161,
+};
+
+static inline int tricore_feature(CPUTriCoreState *env, int feature)
+{
+ return (env->features & (1ULL << feature)) != 0;
+}
+
+/* TriCore Traps Classes*/
+enum {
+ TRAPC_NONE = -1,
+ TRAPC_MMU = 0,
+ TRAPC_PROT = 1,
+ TRAPC_INSN_ERR = 2,
+ TRAPC_CTX_MNG = 3,
+ TRAPC_SYSBUS = 4,
+ TRAPC_ASSERT = 5,
+ TRAPC_SYSCALL = 6,
+ TRAPC_NMI = 7,
+ TRAPC_IRQ = 8
+};
+
+/* Class 0 TIN */
+enum {
+ TIN0_VAF = 0,
+ TIN0_VAP = 1,
+};
+
+/* Class 1 TIN */
+enum {
+ TIN1_PRIV = 1,
+ TIN1_MPR = 2,
+ TIN1_MPW = 3,
+ TIN1_MPX = 4,
+ TIN1_MPP = 5,
+ TIN1_MPN = 6,
+ TIN1_GRWP = 7,
+};
+
+/* Class 2 TIN */
+enum {
+ TIN2_IOPC = 1,
+ TIN2_UOPC = 2,
+ TIN2_OPD = 3,
+ TIN2_ALN = 4,
+ TIN2_MEM = 5,
+};
+
+/* Class 3 TIN */
+enum {
+ TIN3_FCD = 1,
+ TIN3_CDO = 2,
+ TIN3_CDU = 3,
+ TIN3_FCU = 4,
+ TIN3_CSU = 5,
+ TIN3_CTYP = 6,
+ TIN3_NEST = 7,
+};
+
+/* Class 4 TIN */
+enum {
+ TIN4_PSE = 1,
+ TIN4_DSE = 2,
+ TIN4_DAE = 3,
+ TIN4_CAE = 4,
+ TIN4_PIE = 5,
+ TIN4_DIE = 6,
+};
+
+/* Class 5 TIN */
+enum {
+ TIN5_OVF = 1,
+ TIN5_SOVF = 1,
+};
+
+/* Class 6 TIN
+ *
+ * Is always TIN6_SYS
+ */
+
+/* Class 7 TIN */
+enum {
+ TIN7_NMI = 0,
+};
+
+uint32_t psw_read(CPUTriCoreState *env);
+void psw_write(CPUTriCoreState *env, uint32_t val);
+
+void fpu_set_state(CPUTriCoreState *env);
+
+#define MMU_USER_IDX 2
+
+void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+
+#define cpu_signal_handler cpu_tricore_signal_handler
+#define cpu_list tricore_cpu_list
+
+static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
+{
+ return 0;
+}
+
+
+
+#include "exec/cpu-all.h"
+
+enum {
+ /* 1 bit to define user level / supervisor access */
+ ACCESS_USER = 0x00,
+ ACCESS_SUPER = 0x01,
+ /* 1 bit to indicate direction */
+ ACCESS_STORE = 0x02,
+ /* Type of instruction that generated the access */
+ ACCESS_CODE = 0x10, /* Code fetch access */
+ ACCESS_INT = 0x20, /* Integer load/store access */
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
+};
+
+void cpu_state_reset(CPUTriCoreState *s);
+void tricore_tcg_init(void);
+int cpu_tricore_signal_handler(int host_signum, void *pinfo, void *puc);
+
+static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->PC;
+ *cs_base = 0;
+ *flags = 0;
+}
+
+TriCoreCPU *cpu_tricore_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_tricore_init(cpu_model))
+
+
+/* helpers.c */
+int cpu_tricore_handle_mmu_fault(CPUState *cpu, target_ulong address,
+ int rw, int mmu_idx);
+#define cpu_handle_mmu_fault cpu_tricore_handle_mmu_fault
+
+#endif /* TRICORE_CPU_H */
diff --git a/target/tricore/csfr.def b/target/tricore/csfr.def
new file mode 100644
index 0000000000..05c45dd628
--- /dev/null
+++ b/target/tricore/csfr.def
@@ -0,0 +1,124 @@
+/* A(ll) access permited
+ R(ead only) access
+ E(nd init protected) access
+
+ A|R|E(offset, register, feature introducing reg)
+
+ NOTE: PSW is handled as a special case in gen_mtcr/mfcr */
+
+A(0xfe00, PCXI, TRICORE_FEATURE_13)
+A(0xfe08, PC, TRICORE_FEATURE_13)
+A(0xfe14, SYSCON, TRICORE_FEATURE_13)
+R(0xfe18, CPU_ID, TRICORE_FEATURE_13)
+E(0xfe20, BIV, TRICORE_FEATURE_13)
+E(0xfe24, BTV, TRICORE_FEATURE_13)
+E(0xfe28, ISP, TRICORE_FEATURE_13)
+A(0xfe2c, ICR, TRICORE_FEATURE_13)
+A(0xfe38, FCX, TRICORE_FEATURE_13)
+A(0xfe3c, LCX, TRICORE_FEATURE_13)
+E(0x9400, COMPAT, TRICORE_FEATURE_131)
+/* memory protection register */
+A(0xC000, DPR0_0L, TRICORE_FEATURE_13)
+A(0xC004, DPR0_0U, TRICORE_FEATURE_13)
+A(0xC008, DPR0_1L, TRICORE_FEATURE_13)
+A(0xC00C, DPR0_1U, TRICORE_FEATURE_13)
+A(0xC010, DPR0_2L, TRICORE_FEATURE_13)
+A(0xC014, DPR0_2U, TRICORE_FEATURE_13)
+A(0xC018, DPR0_3L, TRICORE_FEATURE_13)
+A(0xC01C, DPR0_3U, TRICORE_FEATURE_13)
+A(0xC400, DPR1_0L, TRICORE_FEATURE_13)
+A(0xC404, DPR1_0U, TRICORE_FEATURE_13)
+A(0xC408, DPR1_1L, TRICORE_FEATURE_13)
+A(0xC40C, DPR1_1U, TRICORE_FEATURE_13)
+A(0xC410, DPR1_2L, TRICORE_FEATURE_13)
+A(0xC414, DPR1_2U, TRICORE_FEATURE_13)
+A(0xC418, DPR1_3L, TRICORE_FEATURE_13)
+A(0xC41C, DPR1_3U, TRICORE_FEATURE_13)
+A(0xC800, DPR2_0L, TRICORE_FEATURE_13)
+A(0xC804, DPR2_0U, TRICORE_FEATURE_13)
+A(0xC808, DPR2_1L, TRICORE_FEATURE_13)
+A(0xC80C, DPR2_1U, TRICORE_FEATURE_13)
+A(0xC810, DPR2_2L, TRICORE_FEATURE_13)
+A(0xC814, DPR2_2U, TRICORE_FEATURE_13)
+A(0xC818, DPR2_3L, TRICORE_FEATURE_13)
+A(0xC81C, DPR2_3U, TRICORE_FEATURE_13)
+A(0xCC00, DPR3_0L, TRICORE_FEATURE_13)
+A(0xCC04, DPR3_0U, TRICORE_FEATURE_13)
+A(0xCC08, DPR3_1L, TRICORE_FEATURE_13)
+A(0xCC0C, DPR3_1U, TRICORE_FEATURE_13)
+A(0xCC10, DPR3_2L, TRICORE_FEATURE_13)
+A(0xCC14, DPR3_2U, TRICORE_FEATURE_13)
+A(0xCC18, DPR3_3L, TRICORE_FEATURE_13)
+A(0xCC1C, DPR3_3U, TRICORE_FEATURE_13)
+A(0xD000, CPR0_0L, TRICORE_FEATURE_13)
+A(0xD004, CPR0_0U, TRICORE_FEATURE_13)
+A(0xD008, CPR0_1L, TRICORE_FEATURE_13)
+A(0xD00C, CPR0_1U, TRICORE_FEATURE_13)
+A(0xD010, CPR0_2L, TRICORE_FEATURE_13)
+A(0xD014, CPR0_2U, TRICORE_FEATURE_13)
+A(0xD018, CPR0_3L, TRICORE_FEATURE_13)
+A(0xD01C, CPR0_3U, TRICORE_FEATURE_13)
+A(0xD400, CPR1_0L, TRICORE_FEATURE_13)
+A(0xD404, CPR1_0U, TRICORE_FEATURE_13)
+A(0xD408, CPR1_1L, TRICORE_FEATURE_13)
+A(0xD40C, CPR1_1U, TRICORE_FEATURE_13)
+A(0xD410, CPR1_2L, TRICORE_FEATURE_13)
+A(0xD414, CPR1_2U, TRICORE_FEATURE_13)
+A(0xD418, CPR1_3L, TRICORE_FEATURE_13)
+A(0xD41C, CPR1_3U, TRICORE_FEATURE_13)
+A(0xD800, CPR2_0L, TRICORE_FEATURE_13)
+A(0xD804, CPR2_0U, TRICORE_FEATURE_13)
+A(0xD808, CPR2_1L, TRICORE_FEATURE_13)
+A(0xD80C, CPR2_1U, TRICORE_FEATURE_13)
+A(0xD810, CPR2_2L, TRICORE_FEATURE_13)
+A(0xD814, CPR2_2U, TRICORE_FEATURE_13)
+A(0xD818, CPR2_3L, TRICORE_FEATURE_13)
+A(0xD81C, CPR2_3U, TRICORE_FEATURE_13)
+A(0xDC00, CPR3_0L, TRICORE_FEATURE_13)
+A(0xDC04, CPR3_0U, TRICORE_FEATURE_13)
+A(0xDC08, CPR3_1L, TRICORE_FEATURE_13)
+A(0xDC0C, CPR3_1U, TRICORE_FEATURE_13)
+A(0xDC10, CPR3_2L, TRICORE_FEATURE_13)
+A(0xDC14, CPR3_2U, TRICORE_FEATURE_13)
+A(0xDC18, CPR3_3L, TRICORE_FEATURE_13)
+A(0xDC1C, CPR3_3U, TRICORE_FEATURE_13)
+A(0xE000, DPM0, TRICORE_FEATURE_13)
+A(0xE080, DPM1, TRICORE_FEATURE_13)
+A(0xE100, DPM2, TRICORE_FEATURE_13)
+A(0xE180, DPM3, TRICORE_FEATURE_13)
+A(0xE200, CPM0, TRICORE_FEATURE_13)
+A(0xE280, CPM1, TRICORE_FEATURE_13)
+A(0xE300, CPM2, TRICORE_FEATURE_13)
+A(0xE380, CPM3, TRICORE_FEATURE_13)
+/* memory management registers */
+A(0x8000, MMU_CON, TRICORE_FEATURE_13)
+A(0x8004, MMU_ASI, TRICORE_FEATURE_13)
+A(0x800C, MMU_TVA, TRICORE_FEATURE_13)
+A(0x8010, MMU_TPA, TRICORE_FEATURE_13)
+A(0x8014, MMU_TPX, TRICORE_FEATURE_13)
+A(0x8018, MMU_TFA, TRICORE_FEATURE_13)
+E(0x9004, BMACON, TRICORE_FEATURE_131)
+E(0x900C, SMACON, TRICORE_FEATURE_131)
+A(0x9020, DIEAR, TRICORE_FEATURE_131)
+A(0x9024, DIETR, TRICORE_FEATURE_131)
+A(0x9028, CCDIER, TRICORE_FEATURE_131)
+E(0x9044, MIECON, TRICORE_FEATURE_131)
+A(0x9210, PIEAR, TRICORE_FEATURE_131)
+A(0x9214, PIETR, TRICORE_FEATURE_131)
+A(0x9218, CCPIER, TRICORE_FEATURE_131)
+/* debug registers */
+A(0xFD00, DBGSR, TRICORE_FEATURE_13)
+A(0xFD08, EXEVT, TRICORE_FEATURE_13)
+A(0xFD0C, CREVT, TRICORE_FEATURE_13)
+A(0xFD10, SWEVT, TRICORE_FEATURE_13)
+A(0xFD20, TR0EVT, TRICORE_FEATURE_13)
+A(0xFD24, TR1EVT, TRICORE_FEATURE_13)
+A(0xFD40, DMS, TRICORE_FEATURE_13)
+A(0xFD44, DCX, TRICORE_FEATURE_13)
+A(0xFD48, DBGTCR, TRICORE_FEATURE_131)
+A(0xFC00, CCTRL, TRICORE_FEATURE_131)
+A(0xFC04, CCNT, TRICORE_FEATURE_131)
+A(0xFC08, ICNT, TRICORE_FEATURE_131)
+A(0xFC0C, M1CNT, TRICORE_FEATURE_131)
+A(0xFC10, M2CNT, TRICORE_FEATURE_131)
+A(0xFC14, M3CNT, TRICORE_FEATURE_131)
diff --git a/target/tricore/fpu_helper.c b/target/tricore/fpu_helper.c
new file mode 100644
index 0000000000..98fe9472b1
--- /dev/null
+++ b/target/tricore/fpu_helper.c
@@ -0,0 +1,217 @@
+/*
+ * TriCore emulation for qemu: fpu helper.
+ *
+ * Copyright (c) 2016 Bastian Koppelmann University of Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+#define ADD_NAN 0x7cf00001
+#define DIV_NAN 0x7fc00008
+#define MUL_NAN 0x7fc00002
+#define FPU_FS PSW_USB_C
+#define FPU_FI PSW_USB_V
+#define FPU_FV PSW_USB_SV
+#define FPU_FZ PSW_USB_AV
+#define FPU_FU PSW_USB_SAV
+
+/* we don't care about input_denormal */
+static inline uint8_t f_get_excp_flags(CPUTriCoreState *env)
+{
+ return get_float_exception_flags(&env->fp_status)
+ & (float_flag_invalid
+ | float_flag_overflow
+ | float_flag_underflow
+ | float_flag_output_denormal
+ | float_flag_divbyzero
+ | float_flag_inexact);
+}
+
+static inline bool f_is_denormal(float32 arg)
+{
+ return float32_is_zero_or_denormal(arg) && !float32_is_zero(arg);
+}
+
+static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags)
+{
+ uint8_t some_excp = 0;
+ set_float_exception_flags(0, &env->fp_status);
+
+ if (flags & float_flag_invalid) {
+ env->FPU_FI = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_overflow) {
+ env->FPU_FV = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_underflow || flags & float_flag_output_denormal) {
+ env->FPU_FU = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_divbyzero) {
+ env->FPU_FZ = 1 << 31;
+ some_excp = 1;
+ }
+
+ if (flags & float_flag_inexact || flags & float_flag_output_denormal) {
+ env->PSW |= 1 << 26;
+ some_excp = 1;
+ }
+
+ env->FPU_FS = some_excp;
+}
+
+#define FADD_SUB(op) \
+uint32_t helper_f##op(CPUTriCoreState *env, uint32_t r1, uint32_t r2) \
+{ \
+ float32 arg1 = make_float32(r1); \
+ float32 arg2 = make_float32(r2); \
+ uint32_t flags; \
+ float32 f_result; \
+ \
+ f_result = float32_##op(arg2, arg1, &env->fp_status); \
+ flags = f_get_excp_flags(env); \
+ if (flags) { \
+ /* If the output is a NaN, but the inputs aren't, \
+ we return a unique value. */ \
+ if ((flags & float_flag_invalid) \
+ && !float32_is_any_nan(arg1) \
+ && !float32_is_any_nan(arg2)) { \
+ f_result = ADD_NAN; \
+ } \
+ f_update_psw_flags(env, flags); \
+ } else { \
+ env->FPU_FS = 0; \
+ } \
+ return (uint32_t)f_result; \
+}
+FADD_SUB(add)
+FADD_SUB(sub)
+
+uint32_t helper_fmul(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 f_result;
+
+ f_result = float32_mul(arg1, arg2, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ /* If the output is a NaN, but the inputs aren't,
+ we return a unique value. */
+ if ((flags & float_flag_invalid)
+ && !float32_is_any_nan(arg1)
+ && !float32_is_any_nan(arg2)) {
+ f_result = MUL_NAN;
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+
+}
+
+uint32_t helper_fdiv(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+ float32 f_result;
+
+ f_result = float32_div(arg1, arg2 , &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ /* If the output is a NaN, but the inputs aren't,
+ we return a unique value. */
+ if ((flags & float_flag_invalid)
+ && !float32_is_any_nan(arg1)
+ && !float32_is_any_nan(arg2)) {
+ f_result = DIV_NAN;
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ return (uint32_t)f_result;
+}
+
+uint32_t helper_fcmp(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t result, flags;
+ float32 arg1 = make_float32(r1);
+ float32 arg2 = make_float32(r2);
+
+ set_flush_inputs_to_zero(0, &env->fp_status);
+
+ result = 1 << (float32_compare_quiet(arg1, arg2, &env->fp_status) + 1);
+ result |= f_is_denormal(arg1) << 4;
+ result |= f_is_denormal(arg2) << 5;
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+
+ set_flush_inputs_to_zero(1, &env->fp_status);
+ return result;
+}
+
+uint32_t helper_ftoi(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_arg = make_float32(arg);
+ int32_t result, flags;
+
+ result = float32_to_int32(f_arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ if (float32_is_any_nan(f_arg)) {
+ result = 0;
+ }
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)result;
+}
+
+uint32_t helper_itof(CPUTriCoreState *env, uint32_t arg)
+{
+ float32 f_result;
+ uint32_t flags;
+ f_result = int32_to_float32(arg, &env->fp_status);
+
+ flags = f_get_excp_flags(env);
+ if (flags) {
+ f_update_psw_flags(env, flags);
+ } else {
+ env->FPU_FS = 0;
+ }
+ return (uint32_t)f_result;
+}
diff --git a/target/tricore/helper.c b/target/tricore/helper.c
new file mode 100644
index 0000000000..3118905eca
--- /dev/null
+++ b/target/tricore/helper.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+
+enum {
+ TLBRET_DIRTY = -4,
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+#if defined(CONFIG_SOFTMMU)
+static int get_physical_address(CPUTriCoreState *env, hwaddr *physical,
+ int *prot, target_ulong address,
+ int rw, int access_type)
+{
+ int ret = TLBRET_MATCH;
+
+ *physical = address & 0xFFFFFFFF;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+
+ return ret;
+}
+#endif
+
+/* TODO: Add exeption support*/
+static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address,
+ int rw, int tlb_error)
+{
+}
+
+int cpu_tricore_handle_mmu_fault(CPUState *cs, target_ulong address,
+ int rw, int mmu_idx)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+ hwaddr physical;
+ int prot;
+ int access_type;
+ int ret = 0;
+
+ rw &= 1;
+ access_type = ACCESS_INT;
+ ret = get_physical_address(env, &physical, &prot,
+ address, rw, access_type);
+ qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx
+ " prot %d\n", __func__, address, ret, physical, prot);
+
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
+ } else if (ret < 0) {
+ raise_mmu_exception(env, address, rw, ret);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+TriCoreCPU *cpu_tricore_init(const char *cpu_model)
+{
+ return TRICORE_CPU(cpu_generic_init(TYPE_TRICORE_CPU, cpu_model));
+}
+
+static void tricore_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ CPUListState *s = user_data;
+ const char *typename;
+ char *name;
+
+ typename = object_class_get_name(oc);
+ name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU));
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ name);
+ g_free(name);
+}
+
+void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ CPUListState s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_TRICORE_CPU, false);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, tricore_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+void fpu_set_state(CPUTriCoreState *env)
+{
+ set_float_rounding_mode(env->PSW & MASK_PSW_FPU_RM, &env->fp_status);
+ set_flush_inputs_to_zero(1, &env->fp_status);
+ set_flush_to_zero(1, &env->fp_status);
+ set_default_nan_mode(1, &env->fp_status);
+}
+
+uint32_t psw_read(CPUTriCoreState *env)
+{
+ /* clear all USB bits */
+ env->PSW &= 0x6ffffff;
+ /* now set them from the cache */
+ env->PSW |= ((env->PSW_USB_C != 0) << 31);
+ env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1);
+ env->PSW |= ((env->PSW_USB_SV & (1 << 31)) >> 2);
+ env->PSW |= ((env->PSW_USB_AV & (1 << 31)) >> 3);
+ env->PSW |= ((env->PSW_USB_SAV & (1 << 31)) >> 4);
+
+ return env->PSW;
+}
+
+void psw_write(CPUTriCoreState *env, uint32_t val)
+{
+ env->PSW_USB_C = (val & MASK_USB_C);
+ env->PSW_USB_V = (val & MASK_USB_V) << 1;
+ env->PSW_USB_SV = (val & MASK_USB_SV) << 2;
+ env->PSW_USB_AV = (val & MASK_USB_AV) << 3;
+ env->PSW_USB_SAV = (val & MASK_USB_SAV) << 4;
+ env->PSW = val;
+
+ fpu_set_state(env);
+}
diff --git a/target/tricore/helper.h b/target/tricore/helper.h
new file mode 100644
index 0000000000..9333e161ab
--- /dev/null
+++ b/target/tricore/helper.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Arithmetic */
+DEF_HELPER_3(add_ssov, i32, env, i32, i32)
+DEF_HELPER_3(add64_ssov, i64, env, i64, i64)
+DEF_HELPER_3(add_suov, i32, env, i32, i32)
+DEF_HELPER_3(add_h_ssov, i32, env, i32, i32)
+DEF_HELPER_3(add_h_suov, i32, env, i32, i32)
+DEF_HELPER_4(addr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_4(addsur_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_3(sub_ssov, i32, env, i32, i32)
+DEF_HELPER_3(sub64_ssov, i64, env, i64, i64)
+DEF_HELPER_3(sub_suov, i32, env, i32, i32)
+DEF_HELPER_3(sub_h_ssov, i32, env, i32, i32)
+DEF_HELPER_3(sub_h_suov, i32, env, i32, i32)
+DEF_HELPER_4(subr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_4(subadr_h_ssov, i32, env, i64, i32, i32)
+DEF_HELPER_3(mul_ssov, i32, env, i32, i32)
+DEF_HELPER_3(mul_suov, i32, env, i32, i32)
+DEF_HELPER_3(sha_ssov, i32, env, i32, i32)
+DEF_HELPER_3(absdif_ssov, i32, env, i32, i32)
+DEF_HELPER_4(madd32_ssov, i32, env, i32, i32, i32)
+DEF_HELPER_4(madd32_suov, i32, env, i32, i32, i32)
+DEF_HELPER_4(madd64_ssov, i64, env, i32, i64, i32)
+DEF_HELPER_5(madd64_q_ssov, i64, env, i64, i32, i32, i32)
+DEF_HELPER_3(madd32_q_add_ssov, i32, env, i64, i64)
+DEF_HELPER_5(maddr_q_ssov, i32, env, i32, i32, i32, i32)
+DEF_HELPER_4(madd64_suov, i64, env, i32, i64, i32)
+DEF_HELPER_4(msub32_ssov, i32, env, i32, i32, i32)
+DEF_HELPER_4(msub32_suov, i32, env, i32, i32, i32)
+DEF_HELPER_4(msub64_ssov, i64, env, i32, i64, i32)
+DEF_HELPER_5(msub64_q_ssov, i64, env, i64, i32, i32, i32)
+DEF_HELPER_3(msub32_q_sub_ssov, i32, env, i64, i64)
+DEF_HELPER_5(msubr_q_ssov, i32, env, i32, i32, i32, i32)
+DEF_HELPER_4(msub64_suov, i64, env, i32, i64, i32)
+DEF_HELPER_3(absdif_h_ssov, i32, env, i32, i32)
+DEF_HELPER_2(abs_ssov, i32, env, i32)
+DEF_HELPER_2(abs_h_ssov, i32, env, i32)
+/* hword/byte arithmetic */
+DEF_HELPER_2(abs_b, i32, env, i32)
+DEF_HELPER_2(abs_h, i32, env, i32)
+DEF_HELPER_3(absdif_b, i32, env, i32, i32)
+DEF_HELPER_3(absdif_h, i32, env, i32, i32)
+DEF_HELPER_4(addr_h, i32, env, i64, i32, i32)
+DEF_HELPER_4(addsur_h, i32, env, i64, i32, i32)
+DEF_HELPER_5(maddr_q, i32, env, i32, i32, i32, i32)
+DEF_HELPER_3(add_b, i32, env, i32, i32)
+DEF_HELPER_3(add_h, i32, env, i32, i32)
+DEF_HELPER_3(sub_b, i32, env, i32, i32)
+DEF_HELPER_3(sub_h, i32, env, i32, i32)
+DEF_HELPER_4(subr_h, i32, env, i64, i32, i32)
+DEF_HELPER_4(subadr_h, i32, env, i64, i32, i32)
+DEF_HELPER_5(msubr_q, i32, env, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eq_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eq_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eqany_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(eqany_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(lt_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(max_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(ixmax, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(ixmax_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(min_b, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(min_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(min_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(min_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(ixmin, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(ixmin_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+/* count leading ... */
+DEF_HELPER_FLAGS_1(clo, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clo_h, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(clz_h, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(cls, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(cls_h, TCG_CALL_NO_RWG_SE, i32, i32)
+/* sh */
+DEF_HELPER_FLAGS_2(sh, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_2(sh_h, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_3(sha, i32, env, i32, i32)
+DEF_HELPER_2(sha_h, i32, i32, i32)
+/* merge/split/parity */
+DEF_HELPER_FLAGS_2(bmerge, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+DEF_HELPER_FLAGS_1(bsplit, TCG_CALL_NO_RWG_SE, i64, i32)
+DEF_HELPER_FLAGS_1(parity, TCG_CALL_NO_RWG_SE, i32, i32)
+/* float */
+DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32)
+DEF_HELPER_1(unpack, i64, i32)
+DEF_HELPER_3(fadd, i32, env, i32, i32)
+DEF_HELPER_3(fsub, i32, env, i32, i32)
+DEF_HELPER_3(fmul, i32, env, i32, i32)
+DEF_HELPER_3(fdiv, i32, env, i32, i32)
+DEF_HELPER_3(fcmp, i32, env, i32, i32)
+DEF_HELPER_2(ftoi, i32, env, i32)
+DEF_HELPER_2(itof, i32, env, i32)
+/* dvinit */
+DEF_HELPER_3(dvinit_b_13, i64, env, i32, i32)
+DEF_HELPER_3(dvinit_b_131, i64, env, i32, i32)
+DEF_HELPER_3(dvinit_h_13, i64, env, i32, i32)
+DEF_HELPER_3(dvinit_h_131, i64, env, i32, i32)
+DEF_HELPER_FLAGS_2(dvadj, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(dvstep, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_FLAGS_2(dvstep_u, TCG_CALL_NO_RWG_SE, i64, i64, i32)
+DEF_HELPER_3(divide, i64, env, i32, i32)
+DEF_HELPER_3(divide_u, i64, env, i32, i32)
+/* mulh */
+DEF_HELPER_FLAGS_5(mul_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mulm_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32)
+DEF_HELPER_FLAGS_5(mulr_h, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32, i32)
+/* crc32 */
+DEF_HELPER_FLAGS_2(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32)
+/* CSA */
+DEF_HELPER_2(call, void, env, i32)
+DEF_HELPER_1(ret, void, env)
+DEF_HELPER_2(bisr, void, env, i32)
+DEF_HELPER_1(rfe, void, env)
+DEF_HELPER_1(rfm, void, env)
+DEF_HELPER_2(ldlcx, void, env, i32)
+DEF_HELPER_2(lducx, void, env, i32)
+DEF_HELPER_2(stlcx, void, env, i32)
+DEF_HELPER_2(stucx, void, env, i32)
+DEF_HELPER_1(svlcx, void, env)
+DEF_HELPER_1(svucx, void, env)
+DEF_HELPER_1(rslcx, void, env)
+/* Address mode helper */
+DEF_HELPER_1(br_update, i32, i32)
+DEF_HELPER_2(circ_update, i32, i32, i32)
+/* PSW cache helper */
+DEF_HELPER_2(psw_write, void, env, i32)
+DEF_HELPER_1(psw_read, i32, env)
+/* Exceptions */
+DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32)
diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c
new file mode 100644
index 0000000000..ac02e0a36b
--- /dev/null
+++ b/target/tricore/op_helper.c
@@ -0,0 +1,2842 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "qemu/host-utils.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include <zlib.h> /* for crc32 */
+
+
+/* Exception helpers */
+
+static void QEMU_NORETURN
+raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin,
+ uintptr_t pc, uint32_t fcd_pc)
+{
+ CPUState *cs = CPU(tricore_env_get_cpu(env));
+ /* in case we come from a helper-call we need to restore the PC */
+ if (pc) {
+ cpu_restore_state(cs, pc);
+ }
+
+ /* Tin is loaded into d[15] */
+ env->gpr_d[15] = tin;
+
+ if (class == TRAPC_CTX_MNG && tin == TIN3_FCU) {
+ /* upper context cannot be saved, if the context list is empty */
+ } else {
+ helper_svucx(env);
+ }
+
+ /* The return address in a[11] is updated */
+ if (class == TRAPC_CTX_MNG && tin == TIN3_FCD) {
+ env->SYSCON |= MASK_SYSCON_FCD_SF;
+ /* when we run out of CSAs after saving a context a FCD trap is taken
+ and the return address is the start of the trap handler which used
+ the last CSA */
+ env->gpr_a[11] = fcd_pc;
+ } else if (class == TRAPC_SYSCALL) {
+ env->gpr_a[11] = env->PC + 4;
+ } else {
+ env->gpr_a[11] = env->PC;
+ }
+ /* The stack pointer in A[10] is set to the Interrupt Stack Pointer (ISP)
+ when the processor was not previously using the interrupt stack
+ (in case of PSW.IS = 0). The stack pointer bit is set for using the
+ interrupt stack: PSW.IS = 1. */
+ if ((env->PSW & MASK_PSW_IS) == 0) {
+ env->gpr_a[10] = env->ISP;
+ }
+ env->PSW |= MASK_PSW_IS;
+ /* The I/O mode is set to Supervisor mode, which means all permissions
+ are enabled: PSW.IO = 10 B .*/
+ env->PSW |= (2 << 10);
+
+ /*The current Protection Register Set is set to 0: PSW.PRS = 00 B .*/
+ env->PSW &= ~MASK_PSW_PRS;
+
+ /* The Call Depth Counter (CDC) is cleared, and the call depth limit is
+ set for 64: PSW.CDC = 0000000 B .*/
+ env->PSW &= ~MASK_PSW_CDC;
+
+ /* Call Depth Counter is enabled, PSW.CDE = 1. */
+ env->PSW |= MASK_PSW_CDE;
+
+ /* Write permission to global registers A[0], A[1], A[8], A[9] is
+ disabled: PSW.GW = 0. */
+ env->PSW &= ~MASK_PSW_GW;
+
+ /*The interrupt system is globally disabled: ICR.IE = 0. The ‘old’
+ ICR.IE and ICR.CCPN are saved */
+
+ /* PCXI.PIE = ICR.IE */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) +
+ ((env->ICR & MASK_ICR_IE) << 15));
+ /* PCXI.PCPN = ICR.CCPN */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* Update PC using the trap vector table */
+ env->PC = env->BTV | (class << 5);
+
+ cpu_loop_exit(cs);
+}
+
+void helper_raise_exception_sync(CPUTriCoreState *env, uint32_t class,
+ uint32_t tin)
+{
+ raise_exception_sync_internal(env, class, tin, 0, 0);
+}
+
+static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class,
+ uint32_t tin, uintptr_t pc)
+{
+ raise_exception_sync_internal(env, class, tin, pc, 0);
+}
+
+/* Addressing mode helper */
+
+static uint16_t reverse16(uint16_t val)
+{
+ uint8_t high = (uint8_t)(val >> 8);
+ uint8_t low = (uint8_t)(val & 0xff);
+
+ uint16_t rh, rl;
+
+ rl = (uint16_t)((high * 0x0202020202ULL & 0x010884422010ULL) % 1023);
+ rh = (uint16_t)((low * 0x0202020202ULL & 0x010884422010ULL) % 1023);
+
+ return (rh << 8) | rl;
+}
+
+uint32_t helper_br_update(uint32_t reg)
+{
+ uint32_t index = reg & 0xffff;
+ uint32_t incr = reg >> 16;
+ uint32_t new_index = reverse16(reverse16(index) + reverse16(incr));
+ return reg - index + new_index;
+}
+
+uint32_t helper_circ_update(uint32_t reg, uint32_t off)
+{
+ uint32_t index = reg & 0xffff;
+ uint32_t length = reg >> 16;
+ int32_t new_index = index + off;
+ if (new_index < 0) {
+ new_index += length;
+ } else {
+ new_index %= length;
+ }
+ return reg - index + new_index;
+}
+
+static uint32_t ssov32(CPUTriCoreState *env, int64_t arg)
+{
+ uint32_t ret;
+ int64_t max_pos = INT32_MAX;
+ int64_t max_neg = INT32_MIN;
+ if (arg > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = (target_ulong)max_pos;
+ } else {
+ if (arg < max_neg) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = (target_ulong)max_neg;
+ } else {
+ env->PSW_USB_V = 0;
+ ret = (target_ulong)arg;
+ }
+ }
+ env->PSW_USB_AV = arg ^ arg * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg)
+{
+ uint32_t ret;
+ uint64_t max_pos = UINT32_MAX;
+ if (arg > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = (target_ulong)max_pos;
+ } else {
+ env->PSW_USB_V = 0;
+ ret = (target_ulong)arg;
+ }
+ env->PSW_USB_AV = arg ^ arg * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg)
+{
+ uint32_t ret;
+
+ if (arg < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ ret = 0;
+ } else {
+ env->PSW_USB_V = 0;
+ ret = (target_ulong)arg;
+ }
+ env->PSW_USB_AV = arg ^ arg * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+static uint32_t ssov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1)
+{
+ int32_t max_pos = INT16_MAX;
+ int32_t max_neg = INT16_MIN;
+ int32_t av0, av1;
+
+ env->PSW_USB_V = 0;
+ av0 = hw0 ^ hw0 * 2u;
+ if (hw0 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = max_pos;
+ } else if (hw0 < max_neg) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = max_neg;
+ }
+
+ av1 = hw1 ^ hw1 * 2u;
+ if (hw1 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = max_pos;
+ } else if (hw1 < max_neg) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = max_neg;
+ }
+
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = (av0 | av1) << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return (hw0 & 0xffff) | (hw1 << 16);
+}
+
+static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1)
+{
+ int32_t max_pos = UINT16_MAX;
+ int32_t av0, av1;
+
+ env->PSW_USB_V = 0;
+ av0 = hw0 ^ hw0 * 2u;
+ if (hw0 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = max_pos;
+ } else if (hw0 < 0) {
+ env->PSW_USB_V = (1 << 31);
+ hw0 = 0;
+ }
+
+ av1 = hw1 ^ hw1 * 2u;
+ if (hw1 > max_pos) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = max_pos;
+ } else if (hw1 < 0) {
+ env->PSW_USB_V = (1 << 31);
+ hw1 = 0;
+ }
+
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = (av0 | av1) << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return (hw0 & 0xffff) | (hw1 << 16);
+}
+
+target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result = t1 + t2;
+ return ssov32(env, result);
+}
+
+uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ uint64_t result;
+ int64_t ovf;
+
+ result = r1 + r2;
+ ovf = (result ^ r1) & ~(r1 ^ r2);
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if ((int64_t)r1 >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return result;
+}
+
+target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = sextract32(r1, 0, 16) + sextract32(r2, 0, 16);
+ ret_hw1 = sextract32(r1, 16, 16) + sextract32(r2, 16, 16);
+ return ssov16(env, ret_hw0, ret_hw1);
+}
+
+uint32_t helper_addr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+
+target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = extract64(r1, 0, 32);
+ int64_t t2 = extract64(r2, 0, 32);
+ int64_t result = t1 + t2;
+ return suov32_pos(env, result);
+}
+
+target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = extract32(r1, 0, 16) + extract32(r2, 0, 16);
+ ret_hw1 = extract32(r1, 16, 16) + extract32(r2, 16, 16);
+ return suov16(env, ret_hw0, ret_hw1);
+}
+
+target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result = t1 - t2;
+ return ssov32(env, result);
+}
+
+uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ uint64_t result;
+ int64_t ovf;
+
+ result = r1 - r2;
+ ovf = (result ^ r1) & (r1 ^ r2);
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if ((int64_t)r1 >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return result;
+}
+
+target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = sextract32(r1, 0, 16) - sextract32(r2, 0, 16);
+ ret_hw1 = sextract32(r1, 16, 16) - sextract32(r2, 16, 16);
+ return ssov16(env, ret_hw0, ret_hw1);
+}
+
+uint32_t helper_subr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ if (result0 > INT32_MAX) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MAX;
+ } else if (result0 < INT32_MIN) {
+ ovf0 = (1 << 31);
+ result0 = INT32_MIN;
+ }
+
+ if (result1 > INT32_MAX) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MAX;
+ } else if (result1 < INT32_MIN) {
+ ovf1 = (1 << 31);
+ result1 = INT32_MIN;
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = extract64(r1, 0, 32);
+ int64_t t2 = extract64(r2, 0, 32);
+ int64_t result = t1 - t2;
+ return suov32_neg(env, result);
+}
+
+target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+
+ ret_hw0 = extract32(r1, 0, 16) - extract32(r2, 0, 16);
+ ret_hw1 = extract32(r1, 16, 16) - extract32(r2, 16, 16);
+ return suov16(env, ret_hw0, ret_hw1);
+}
+
+target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result = t1 * t2;
+ return ssov32(env, result);
+}
+
+target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = extract64(r1, 0, 32);
+ int64_t t2 = extract64(r2, 0, 32);
+ int64_t result = t1 * t2;
+
+ return suov32_pos(env, result);
+}
+
+target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int32_t t2 = sextract64(r2, 0, 6);
+ int64_t result;
+ if (t2 == 0) {
+ result = t1;
+ } else if (t2 > 0) {
+ result = t1 << t2;
+ } else {
+ result = t1 >> -t2;
+ }
+ return ssov32(env, result);
+}
+
+uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1)
+{
+ target_ulong result;
+ result = ((int32_t)r1 >= 0) ? r1 : (0 - r1);
+ return ssov32(env, result);
+}
+
+uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1)
+{
+ int32_t ret_h0, ret_h1;
+
+ ret_h0 = sextract32(r1, 0, 16);
+ ret_h0 = (ret_h0 >= 0) ? ret_h0 : (0 - ret_h0);
+
+ ret_h1 = sextract32(r1, 16, 16);
+ ret_h1 = (ret_h1 >= 0) ? ret_h1 : (0 - ret_h1);
+
+ return ssov16(env, ret_h0, ret_h1);
+}
+
+target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t result;
+
+ if (t1 > t2) {
+ result = t1 - t2;
+ } else {
+ result = t2 - t1;
+ }
+ return ssov32(env, result);
+}
+
+uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2)
+{
+ int32_t t1, t2;
+ int32_t ret_h0, ret_h1;
+
+ t1 = sextract32(r1, 0, 16);
+ t2 = sextract32(r2, 0, 16);
+ if (t1 > t2) {
+ ret_h0 = t1 - t2;
+ } else {
+ ret_h0 = t2 - t1;
+ }
+
+ t1 = sextract32(r1, 16, 16);
+ t2 = sextract32(r2, 16, 16);
+ if (t1 > t2) {
+ ret_h1 = t1 - t2;
+ } else {
+ ret_h1 = t2 - t1;
+ }
+
+ return ssov16(env, ret_h0, ret_h1);
+}
+
+target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result;
+
+ result = t2 + (t1 * t3);
+ return ssov32(env, result);
+}
+
+target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t2 = extract64(r2, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+ int64_t result;
+
+ result = t2 + (t1 * t3);
+ return suov32_pos(env, result);
+}
+
+uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, ovf;
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul;
+
+ mul = t1 * t3;
+ ret = mul + r2;
+ ovf = (ret ^ mul) & ~(mul ^ r2);
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if ((int64_t)ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul >= 0) {
+ ret = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ ret = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+
+ return ret;
+}
+
+uint32_t
+helper_madd32_q_add_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ int64_t result;
+
+ result = (r1 + r2);
+
+ env->PSW_USB_AV = (result ^ result * 2u);
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if (r2 == 0x8000000000000000LL) {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+ return (uint32_t)result;
+}
+
+uint64_t helper_madd64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = (int64_t)r1;
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result, mul;
+ int64_t ovf;
+
+ mul = (t2 * t3) << n;
+ result = mul + t1;
+
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ ovf = (result ^ mul) & ~(mul ^ t1);
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if ((r2 == 0x80000000) && (r3 == 0x80000000) && (n == 1)) {
+ if (ovf >= 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul < 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+ return (uint64_t)result;
+}
+
+uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 + mul + 0x8000;
+
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret > 0x7fffffffll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MAX;
+ } else if (ret < -0x80000000ll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret & 0xffff0000ll;
+}
+
+uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, mul;
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+
+ mul = t1 * t3;
+ ret = mul + r2;
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret < r2) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* saturate */
+ ret = UINT64_MAX;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret;
+}
+
+target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result;
+
+ result = t2 - (t1 * t3);
+ return ssov32(env, result);
+}
+
+target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1,
+ target_ulong r2, target_ulong r3)
+{
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t2 = extract64(r2, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+ uint64_t result;
+ uint64_t mul;
+
+ mul = (t1 * t3);
+ result = t2 - mul;
+
+ env->PSW_USB_AV = result ^ result * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ /* we calculate ovf by hand here, because the multiplication can overflow on
+ the host, which would give false results if we compare to less than
+ zero */
+ if (mul > t2) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = 0;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return result;
+}
+
+uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, ovf;
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul;
+
+ mul = t1 * t3;
+ ret = r2 - mul;
+ ovf = (ret ^ r2) & (mul ^ r2);
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if ((int64_t)ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul < 0) {
+ ret = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ ret = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret;
+}
+
+uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1,
+ uint64_t r2, target_ulong r3)
+{
+ uint64_t ret, mul;
+ uint64_t t1 = extract64(r1, 0, 32);
+ uint64_t t3 = extract64(r3, 0, 32);
+
+ mul = t1 * t3;
+ ret = r2 - mul;
+
+ t1 = ret >> 32;
+ env->PSW_USB_AV = t1 ^ t1 * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret > r2) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* saturate */
+ ret = 0;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret;
+}
+
+uint32_t
+helper_msub32_q_sub_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2)
+{
+ int64_t result;
+ int64_t t1 = (int64_t)r1;
+ int64_t t2 = (int64_t)r2;
+
+ result = t1 - t2;
+
+ env->PSW_USB_AV = (result ^ result * 2u);
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if (r2 == 0x8000000000000000LL) {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (result > 0x7fffffffLL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MAX;
+ } else if (result < -0x80000000LL) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ result = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+ return (uint32_t)result;
+}
+
+uint64_t helper_msub64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = (int64_t)r1;
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t result, mul;
+ int64_t ovf;
+
+ mul = (t2 * t3) << n;
+ result = t1 - mul;
+
+ env->PSW_USB_AV = (result ^ result * 2u) >> 32;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ ovf = (result ^ t1) & (t1 ^ mul);
+ /* we do the saturation by hand, since we produce an overflow on the host
+ if the mul before was (0x80000000 * 0x80000000) << 1). If this is the
+ case, we flip the saturated value. */
+ if (mul == 0x8000000000000000LL) {
+ if (ovf >= 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul >= 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ } else {
+ if (ovf < 0) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV = (1 << 31);
+ /* ext_ret > MAX_INT */
+ if (mul < 0) {
+ result = INT64_MAX;
+ /* ext_ret < MIN_INT */
+ } else {
+ result = INT64_MIN;
+ }
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ }
+
+ return (uint64_t)result;
+}
+
+uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 - mul + 0x8000;
+
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ if (ret > 0x7fffffffll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MAX;
+ } else if (ret < -0x80000000ll) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = INT32_MIN;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ return ret & 0xffff0000ll;
+}
+
+uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg)
+{
+ int32_t b, i;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ b = sextract32(arg, i * 8, 8);
+ b = (b >= 0) ? b : (0 - b);
+ ovf |= (b > 0x7F) || (b < -0x80);
+ avf |= b ^ b * 2u;
+ ret |= (b & 0xff) << (i * 8);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg)
+{
+ int32_t h, i;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ h = sextract32(arg, i * 16, 16);
+ h = (h >= 0) ? h : (0 - h);
+ ovf |= (h > 0x7FFF) || (h < -0x8000);
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t b, i;
+ int32_t extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ extr_r2 = sextract32(r2, i * 8, 8);
+ b = sextract32(r1, i * 8, 8);
+ b = (b > extr_r2) ? (b - extr_r2) : (extr_r2 - b);
+ ovf |= (b > 0x7F) || (b < -0x80);
+ avf |= b ^ b * 2u;
+ ret |= (b & 0xff) << (i * 8);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+ return ret;
+}
+
+uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t h, i;
+ int32_t extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ extr_r2 = sextract32(r2, i * 16, 16);
+ h = sextract32(r1, i * 16, 16);
+ h = (h > extr_r2) ? (h - extr_r2) : (extr_r2 - h);
+ ovf |= (h > 0x7FFF) || (h < -0x8000);
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = ovf << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_addr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_addsur_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high + mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 + mul + 0x8000;
+
+ if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret & 0xffff0000ll;
+}
+
+uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t b, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ extr_r1 = sextract32(r1, i * 8, 8);
+ extr_r2 = sextract32(r2, i * 8, 8);
+
+ b = extr_r1 + extr_r2;
+ ovf |= ((b > 0x7f) || (b < -0x80));
+ avf |= b ^ b * 2u;
+ ret |= ((b & 0xff) << (i*8));
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t h, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ extr_r1 = sextract32(r1, i * 16, 16);
+ extr_r2 = sextract32(r2, i * 16, 16);
+ h = extr_r1 + extr_r2;
+ ovf |= ((h > 0x7fff) || (h < -0x8000));
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = (avf << 16);
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_subr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low - mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_subadr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l,
+ uint32_t r2_h)
+{
+ int64_t mul_res0 = sextract64(r1, 0, 32);
+ int64_t mul_res1 = sextract64(r1, 32, 32);
+ int64_t r2_low = sextract64(r2_l, 0, 32);
+ int64_t r2_high = sextract64(r2_h, 0, 32);
+ int64_t result0, result1;
+ uint32_t ovf0, ovf1;
+ uint32_t avf0, avf1;
+
+ ovf0 = ovf1 = 0;
+
+ result0 = r2_low + mul_res0 + 0x8000;
+ result1 = r2_high - mul_res1 + 0x8000;
+
+ if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) {
+ ovf0 = (1 << 31);
+ }
+
+ if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) {
+ ovf1 = (1 << 31);
+ }
+
+ env->PSW_USB_V = ovf0 | ovf1;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+
+ avf0 = result0 * 2u;
+ avf0 = result0 ^ avf0;
+ avf1 = result1 * 2u;
+ avf1 = result1 ^ avf1;
+
+ env->PSW_USB_AV = avf0 | avf1;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL);
+}
+
+uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2,
+ uint32_t r3, uint32_t n)
+{
+ int64_t t1 = sextract64(r1, 0, 32);
+ int64_t t2 = sextract64(r2, 0, 32);
+ int64_t t3 = sextract64(r3, 0, 32);
+ int64_t mul, ret;
+
+ if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) {
+ mul = 0x7fffffff;
+ } else {
+ mul = (t2 * t3) << n;
+ }
+
+ ret = t1 - mul + 0x8000;
+
+ if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) {
+ env->PSW_USB_V = (1 << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ } else {
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret & 0xffff0000ll;
+}
+
+uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t b, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ extr_r1 = sextract32(r1, i * 8, 8);
+ extr_r2 = sextract32(r2, i * 8, 8);
+
+ b = extr_r1 - extr_r2;
+ ovf |= ((b > 0x7f) || (b < -0x80));
+ avf |= b ^ b * 2u;
+ ret |= ((b & 0xff) << (i*8));
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 24;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t h, i;
+ int32_t extr_r1, extr_r2;
+ int32_t ovf = 0;
+ int32_t avf = 0;
+ int32_t ret = 0;
+
+ for (i = 0; i < 2; i++) {
+ extr_r1 = sextract32(r1, i * 16, 16);
+ extr_r2 = sextract32(r2, i * 16, 16);
+ h = extr_r1 - extr_r2;
+ ovf |= ((h > 0x7fff) || (h < -0x8000));
+ avf |= h ^ h * 2u;
+ ret |= (h & 0xffff) << (i * 16);
+ }
+
+ env->PSW_USB_V = (ovf << 31);
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = avf << 16;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_eq_b(target_ulong r1, target_ulong r2)
+{
+ int32_t ret;
+ int32_t i, msk;
+
+ ret = 0;
+ msk = 0xff;
+ for (i = 0; i < 4; i++) {
+ if ((r1 & msk) == (r2 & msk)) {
+ ret |= msk;
+ }
+ msk = msk << 8;
+ }
+
+ return ret;
+}
+
+uint32_t helper_eq_h(target_ulong r1, target_ulong r2)
+{
+ int32_t ret = 0;
+
+ if ((r1 & 0xffff) == (r2 & 0xffff)) {
+ ret = 0xffff;
+ }
+
+ if ((r1 & 0xffff0000) == (r2 & 0xffff0000)) {
+ ret |= 0xffff0000;
+ }
+
+ return ret;
+}
+
+uint32_t helper_eqany_b(target_ulong r1, target_ulong r2)
+{
+ int32_t i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ ret |= (sextract32(r1, i * 8, 8) == sextract32(r2, i * 8, 8));
+ }
+
+ return ret;
+}
+
+uint32_t helper_eqany_h(target_ulong r1, target_ulong r2)
+{
+ uint32_t ret;
+
+ ret = (sextract32(r1, 0, 16) == sextract32(r2, 0, 16));
+ ret |= (sextract32(r1, 16, 16) == sextract32(r2, 16, 16));
+
+ return ret;
+}
+
+uint32_t helper_lt_b(target_ulong r1, target_ulong r2)
+{
+ int32_t i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (sextract32(r1, i * 8, 8) < sextract32(r2, i * 8, 8)) {
+ ret |= (0xff << (i * 8));
+ }
+ }
+
+ return ret;
+}
+
+uint32_t helper_lt_bu(target_ulong r1, target_ulong r2)
+{
+ int32_t i;
+ uint32_t ret = 0;
+
+ for (i = 0; i < 4; i++) {
+ if (extract32(r1, i * 8, 8) < extract32(r2, i * 8, 8)) {
+ ret |= (0xff << (i * 8));
+ }
+ }
+
+ return ret;
+}
+
+uint32_t helper_lt_h(target_ulong r1, target_ulong r2)
+{
+ uint32_t ret = 0;
+
+ if (sextract32(r1, 0, 16) < sextract32(r2, 0, 16)) {
+ ret |= 0xffff;
+ }
+
+ if (sextract32(r1, 16, 16) < sextract32(r2, 16, 16)) {
+ ret |= 0xffff0000;
+ }
+
+ return ret;
+}
+
+uint32_t helper_lt_hu(target_ulong r1, target_ulong r2)
+{
+ uint32_t ret = 0;
+
+ if (extract32(r1, 0, 16) < extract32(r2, 0, 16)) {
+ ret |= 0xffff;
+ }
+
+ if (extract32(r1, 16, 16) < extract32(r2, 16, 16)) {
+ ret |= 0xffff0000;
+ }
+
+ return ret;
+}
+
+#define EXTREMA_H_B(name, op) \
+uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \
+{ \
+ int32_t i, extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ for (i = 0; i < 4; i++) { \
+ extr_r1 = sextract32(r1, i * 8, 8); \
+ extr_r2 = sextract32(r2, i * 8, 8); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= (extr_r1 & 0xff) << (i * 8); \
+ } \
+ return ret; \
+} \
+ \
+uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\
+{ \
+ int32_t i; \
+ uint32_t extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ for (i = 0; i < 4; i++) { \
+ extr_r1 = extract32(r1, i * 8, 8); \
+ extr_r2 = extract32(r2, i * 8, 8); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= (extr_r1 & 0xff) << (i * 8); \
+ } \
+ return ret; \
+} \
+ \
+uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \
+{ \
+ int32_t extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ extr_r1 = sextract32(r1, 0, 16); \
+ extr_r2 = sextract32(r2, 0, 16); \
+ ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret = ret & 0xffff; \
+ \
+ extr_r1 = sextract32(r1, 16, 16); \
+ extr_r2 = sextract32(r2, 16, 16); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= extr_r1 << 16; \
+ \
+ return ret; \
+} \
+ \
+uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\
+{ \
+ uint32_t extr_r1, extr_r2; \
+ uint32_t ret = 0; \
+ \
+ extr_r1 = extract32(r1, 0, 16); \
+ extr_r2 = extract32(r2, 0, 16); \
+ ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret = ret & 0xffff; \
+ \
+ extr_r1 = extract32(r1, 16, 16); \
+ extr_r2 = extract32(r2, 16, 16); \
+ extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \
+ ret |= extr_r1 << (16); \
+ \
+ return ret; \
+} \
+ \
+uint64_t helper_ix##name(uint64_t r1, uint32_t r2) \
+{ \
+ int64_t r2l, r2h, r1hl; \
+ uint64_t ret = 0; \
+ \
+ ret = ((r1 + 2) & 0xffff); \
+ r2l = sextract64(r2, 0, 16); \
+ r2h = sextract64(r2, 16, 16); \
+ r1hl = sextract64(r1, 32, 16); \
+ \
+ if ((r2l op ## = r2h) && (r2l op r1hl)) { \
+ ret |= (r2l & 0xffff) << 32; \
+ ret |= extract64(r1, 0, 16) << 16; \
+ } else if ((r2h op r2l) && (r2h op r1hl)) { \
+ ret |= extract64(r2, 16, 16) << 32; \
+ ret |= extract64(r1 + 1, 0, 16) << 16; \
+ } else { \
+ ret |= r1 & 0xffffffff0000ull; \
+ } \
+ return ret; \
+} \
+ \
+uint64_t helper_ix##name ##_u(uint64_t r1, uint32_t r2) \
+{ \
+ int64_t r2l, r2h, r1hl; \
+ uint64_t ret = 0; \
+ \
+ ret = ((r1 + 2) & 0xffff); \
+ r2l = extract64(r2, 0, 16); \
+ r2h = extract64(r2, 16, 16); \
+ r1hl = extract64(r1, 32, 16); \
+ \
+ if ((r2l op ## = r2h) && (r2l op r1hl)) { \
+ ret |= (r2l & 0xffff) << 32; \
+ ret |= extract64(r1, 0, 16) << 16; \
+ } else if ((r2h op r2l) && (r2h op r1hl)) { \
+ ret |= extract64(r2, 16, 16) << 32; \
+ ret |= extract64(r1 + 1, 0, 16) << 16; \
+ } else { \
+ ret |= r1 & 0xffffffff0000ull; \
+ } \
+ return ret; \
+}
+
+EXTREMA_H_B(max, >)
+EXTREMA_H_B(min, <)
+
+#undef EXTREMA_H_B
+
+uint32_t helper_clo(target_ulong r1)
+{
+ return clo32(r1);
+}
+
+uint32_t helper_clo_h(target_ulong r1)
+{
+ uint32_t ret_hw0 = extract32(r1, 0, 16);
+ uint32_t ret_hw1 = extract32(r1, 16, 16);
+
+ ret_hw0 = clo32(ret_hw0 << 16);
+ ret_hw1 = clo32(ret_hw1 << 16);
+
+ if (ret_hw0 > 16) {
+ ret_hw0 = 16;
+ }
+ if (ret_hw1 > 16) {
+ ret_hw1 = 16;
+ }
+
+ return ret_hw0 | (ret_hw1 << 16);
+}
+
+uint32_t helper_clz(target_ulong r1)
+{
+ return clz32(r1);
+}
+
+uint32_t helper_clz_h(target_ulong r1)
+{
+ uint32_t ret_hw0 = extract32(r1, 0, 16);
+ uint32_t ret_hw1 = extract32(r1, 16, 16);
+
+ ret_hw0 = clz32(ret_hw0 << 16);
+ ret_hw1 = clz32(ret_hw1 << 16);
+
+ if (ret_hw0 > 16) {
+ ret_hw0 = 16;
+ }
+ if (ret_hw1 > 16) {
+ ret_hw1 = 16;
+ }
+
+ return ret_hw0 | (ret_hw1 << 16);
+}
+
+uint32_t helper_cls(target_ulong r1)
+{
+ return clrsb32(r1);
+}
+
+uint32_t helper_cls_h(target_ulong r1)
+{
+ uint32_t ret_hw0 = extract32(r1, 0, 16);
+ uint32_t ret_hw1 = extract32(r1, 16, 16);
+
+ ret_hw0 = clrsb32(ret_hw0 << 16);
+ ret_hw1 = clrsb32(ret_hw1 << 16);
+
+ if (ret_hw0 > 15) {
+ ret_hw0 = 15;
+ }
+ if (ret_hw1 > 15) {
+ ret_hw1 = 15;
+ }
+
+ return ret_hw0 | (ret_hw1 << 16);
+}
+
+uint32_t helper_sh(target_ulong r1, target_ulong r2)
+{
+ int32_t shift_count = sextract32(r2, 0, 6);
+
+ if (shift_count == -32) {
+ return 0;
+ } else if (shift_count < 0) {
+ return r1 >> -shift_count;
+ } else {
+ return r1 << shift_count;
+ }
+}
+
+uint32_t helper_sh_h(target_ulong r1, target_ulong r2)
+{
+ int32_t ret_hw0, ret_hw1;
+ int32_t shift_count;
+
+ shift_count = sextract32(r2, 0, 5);
+
+ if (shift_count == -16) {
+ return 0;
+ } else if (shift_count < 0) {
+ ret_hw0 = extract32(r1, 0, 16) >> -shift_count;
+ ret_hw1 = extract32(r1, 16, 16) >> -shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ } else {
+ ret_hw0 = extract32(r1, 0, 16) << shift_count;
+ ret_hw1 = extract32(r1, 16, 16) << shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ }
+}
+
+uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2)
+{
+ int32_t shift_count;
+ int64_t result, t1;
+ uint32_t ret;
+
+ shift_count = sextract32(r2, 0, 6);
+ t1 = sextract32(r1, 0, 32);
+
+ if (shift_count == 0) {
+ env->PSW_USB_C = env->PSW_USB_V = 0;
+ ret = r1;
+ } else if (shift_count == -32) {
+ env->PSW_USB_C = r1;
+ env->PSW_USB_V = 0;
+ ret = t1 >> 31;
+ } else if (shift_count > 0) {
+ result = t1 << shift_count;
+ /* calc carry */
+ env->PSW_USB_C = ((result & 0xffffffff00000000ULL) != 0);
+ /* calc v */
+ env->PSW_USB_V = (((result > 0x7fffffffLL) ||
+ (result < -0x80000000LL)) << 31);
+ /* calc sv */
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ ret = (uint32_t)result;
+ } else {
+ env->PSW_USB_V = 0;
+ env->PSW_USB_C = (r1 & ((1 << -shift_count) - 1));
+ ret = t1 >> -shift_count;
+ }
+
+ env->PSW_USB_AV = ret ^ ret * 2u;
+ env->PSW_USB_SAV |= env->PSW_USB_AV;
+
+ return ret;
+}
+
+uint32_t helper_sha_h(target_ulong r1, target_ulong r2)
+{
+ int32_t shift_count;
+ int32_t ret_hw0, ret_hw1;
+
+ shift_count = sextract32(r2, 0, 5);
+
+ if (shift_count == 0) {
+ return r1;
+ } else if (shift_count < 0) {
+ ret_hw0 = sextract32(r1, 0, 16) >> -shift_count;
+ ret_hw1 = sextract32(r1, 16, 16) >> -shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ } else {
+ ret_hw0 = sextract32(r1, 0, 16) << shift_count;
+ ret_hw1 = sextract32(r1, 16, 16) << shift_count;
+ return (ret_hw0 & 0xffff) | (ret_hw1 << 16);
+ }
+}
+
+uint32_t helper_bmerge(target_ulong r1, target_ulong r2)
+{
+ uint32_t i, ret;
+
+ ret = 0;
+ for (i = 0; i < 16; i++) {
+ ret |= (r1 & 1) << (2 * i + 1);
+ ret |= (r2 & 1) << (2 * i);
+ r1 = r1 >> 1;
+ r2 = r2 >> 1;
+ }
+ return ret;
+}
+
+uint64_t helper_bsplit(uint32_t r1)
+{
+ int32_t i;
+ uint64_t ret;
+
+ ret = 0;
+ for (i = 0; i < 32; i = i + 2) {
+ /* even */
+ ret |= (r1 & 1) << (i/2);
+ r1 = r1 >> 1;
+ /* odd */
+ ret |= (uint64_t)(r1 & 1) << (i/2 + 32);
+ r1 = r1 >> 1;
+ }
+ return ret;
+}
+
+uint32_t helper_parity(target_ulong r1)
+{
+ uint32_t ret;
+ uint32_t nOnes, i;
+
+ ret = 0;
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ ret ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ /* second byte */
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ nOnes ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ ret |= nOnes << 8;
+ /* third byte */
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ nOnes ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ ret |= nOnes << 16;
+ /* fourth byte */
+ nOnes = 0;
+ for (i = 0; i < 8; i++) {
+ nOnes ^= (r1 & 1);
+ r1 = r1 >> 1;
+ }
+ ret |= nOnes << 24;
+
+ return ret;
+}
+
+uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high,
+ target_ulong r2)
+{
+ uint32_t ret;
+ int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac;
+ int32_t int_exp = r1_high;
+ int32_t int_mant = r1_low;
+ uint32_t flag_rnd = (int_mant & (1 << 7)) && (
+ (int_mant & (1 << 8)) ||
+ (int_mant & 0x7f) ||
+ (carry != 0));
+ if (((int_mant & (1<<31)) == 0) && (int_exp == 255)) {
+ fp_exp = 255;
+ fp_frac = extract32(int_mant, 8, 23);
+ } else if ((int_mant & (1<<31)) && (int_exp >= 127)) {
+ fp_exp = 255;
+ fp_frac = 0;
+ } else if ((int_mant & (1<<31)) && (int_exp <= -128)) {
+ fp_exp = 0;
+ fp_frac = 0;
+ } else if (int_mant == 0) {
+ fp_exp = 0;
+ fp_frac = 0;
+ } else {
+ if (((int_mant & (1 << 31)) == 0)) {
+ temp_exp = 0;
+ } else {
+ temp_exp = int_exp + 128;
+ }
+ fp_exp_frac = (((temp_exp & 0xff) << 23) |
+ extract32(int_mant, 8, 23))
+ + flag_rnd;
+ fp_exp = extract32(fp_exp_frac, 23, 8);
+ fp_frac = extract32(fp_exp_frac, 0, 23);
+ }
+ ret = r2 & (1 << 31);
+ ret = ret + (fp_exp << 23);
+ ret = ret + (fp_frac & 0x7fffff);
+
+ return ret;
+}
+
+uint64_t helper_unpack(target_ulong arg1)
+{
+ int32_t fp_exp = extract32(arg1, 23, 8);
+ int32_t fp_frac = extract32(arg1, 0, 23);
+ uint64_t ret;
+ int32_t int_exp, int_mant;
+
+ if (fp_exp == 255) {
+ int_exp = 255;
+ int_mant = (fp_frac << 7);
+ } else if ((fp_exp == 0) && (fp_frac == 0)) {
+ int_exp = -127;
+ int_mant = 0;
+ } else if ((fp_exp == 0) && (fp_frac != 0)) {
+ int_exp = -126;
+ int_mant = (fp_frac << 7);
+ } else {
+ int_exp = fp_exp - 127;
+ int_mant = (fp_frac << 7);
+ int_mant |= (1 << 30);
+ }
+ ret = int_exp;
+ ret = ret << 32;
+ ret |= int_mant;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_b_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret;
+ int32_t abs_sig_dividend, abs_divisor;
+
+ ret = sextract32(r1, 0, 32);
+ ret = ret << 24;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffffff;
+ }
+
+ abs_sig_dividend = abs((int32_t)r1) >> 8;
+ abs_divisor = abs((int32_t)r2);
+ /* calc overflow
+ ofv if (a/b >= 255) <=> (a/255 >= b) */
+ env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31;
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_b_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret = sextract32(r1, 0, 32);
+
+ ret = ret << 24;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffffff;
+ }
+ /* calc overflow */
+ env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffffff80)));
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_h_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret;
+ int32_t abs_sig_dividend, abs_divisor;
+
+ ret = sextract32(r1, 0, 32);
+ ret = ret << 16;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffff;
+ }
+
+ abs_sig_dividend = abs((int32_t)r1) >> 16;
+ abs_divisor = abs((int32_t)r2);
+ /* calc overflow
+ ofv if (a/b >= 0xffff) <=> (a/0xffff >= b) */
+ env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31;
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvinit_h_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint64_t ret = sextract32(r1, 0, 32);
+
+ ret = ret << 16;
+ if (!((r1 & 0x80000000) == (r2 & 0x80000000))) {
+ ret |= 0xffff;
+ }
+ /* calc overflow */
+ env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffff8000)));
+ env->PSW_USB_V = env->PSW_USB_V << 31;
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+
+ return ret;
+}
+
+uint64_t helper_dvadj(uint64_t r1, uint32_t r2)
+{
+ int32_t x_sign = (r1 >> 63);
+ int32_t q_sign = x_sign ^ (r2 >> 31);
+ int32_t eq_pos = x_sign & ((r1 >> 32) == r2);
+ int32_t eq_neg = x_sign & ((r1 >> 32) == -r2);
+ uint32_t quotient;
+ uint64_t remainder;
+
+ if ((q_sign & ~eq_neg) | eq_pos) {
+ quotient = (r1 + 1) & 0xffffffff;
+ } else {
+ quotient = r1 & 0xffffffff;
+ }
+
+ if (eq_pos | eq_neg) {
+ remainder = 0;
+ } else {
+ remainder = (r1 & 0xffffffff00000000ull);
+ }
+ return remainder | quotient;
+}
+
+uint64_t helper_dvstep(uint64_t r1, uint32_t r2)
+{
+ int32_t dividend_sign = extract64(r1, 63, 1);
+ int32_t divisor_sign = extract32(r2, 31, 1);
+ int32_t quotient_sign = (dividend_sign != divisor_sign);
+ int32_t addend, dividend_quotient, remainder;
+ int32_t i, temp;
+
+ if (quotient_sign) {
+ addend = r2;
+ } else {
+ addend = -r2;
+ }
+ dividend_quotient = (int32_t)r1;
+ remainder = (int32_t)(r1 >> 32);
+
+ for (i = 0; i < 8; i++) {
+ remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1);
+ dividend_quotient <<= 1;
+ temp = remainder + addend;
+ if ((temp < 0) == dividend_sign) {
+ remainder = temp;
+ }
+ if (((temp < 0) == dividend_sign)) {
+ dividend_quotient = dividend_quotient | !quotient_sign;
+ } else {
+ dividend_quotient = dividend_quotient | quotient_sign;
+ }
+ }
+ return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient;
+}
+
+uint64_t helper_dvstep_u(uint64_t r1, uint32_t r2)
+{
+ int32_t dividend_quotient = extract64(r1, 0, 32);
+ int64_t remainder = extract64(r1, 32, 32);
+ int32_t i;
+ int64_t temp;
+ for (i = 0; i < 8; i++) {
+ remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1);
+ dividend_quotient <<= 1;
+ temp = (remainder & 0xffffffff) - r2;
+ if (temp >= 0) {
+ remainder = temp;
+ }
+ dividend_quotient = dividend_quotient | !(temp < 0);
+ }
+ return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient;
+}
+
+uint64_t helper_divide(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ int32_t quotient, remainder;
+ int32_t dividend = (int32_t)r1;
+ int32_t divisor = (int32_t)r2;
+
+ if (divisor == 0) {
+ if (dividend >= 0) {
+ quotient = 0x7fffffff;
+ remainder = 0;
+ } else {
+ quotient = 0x80000000;
+ remainder = 0;
+ }
+ env->PSW_USB_V = (1 << 31);
+ } else if ((divisor == 0xffffffff) && (dividend == 0x80000000)) {
+ quotient = 0x7fffffff;
+ remainder = 0;
+ env->PSW_USB_V = (1 << 31);
+ } else {
+ remainder = dividend % divisor;
+ quotient = (dividend - remainder)/divisor;
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+ return ((uint64_t)remainder << 32) | (uint32_t)quotient;
+}
+
+uint64_t helper_divide_u(CPUTriCoreState *env, uint32_t r1, uint32_t r2)
+{
+ uint32_t quotient, remainder;
+ uint32_t dividend = r1;
+ uint32_t divisor = r2;
+
+ if (divisor == 0) {
+ quotient = 0xffffffff;
+ remainder = 0;
+ env->PSW_USB_V = (1 << 31);
+ } else {
+ remainder = dividend % divisor;
+ quotient = (dividend - remainder)/divisor;
+ env->PSW_USB_V = 0;
+ }
+ env->PSW_USB_SV |= env->PSW_USB_V;
+ env->PSW_USB_AV = 0;
+ return ((uint64_t)remainder << 32) | quotient;
+}
+
+uint64_t helper_mul_h(uint32_t arg00, uint32_t arg01,
+ uint32_t arg10, uint32_t arg11, uint32_t n)
+{
+ uint32_t result0, result1;
+
+ int32_t sc1 = ((arg00 & 0xffff) == 0x8000) &&
+ ((arg10 & 0xffff) == 0x8000) && (n == 1);
+ int32_t sc0 = ((arg01 & 0xffff) == 0x8000) &&
+ ((arg11 & 0xffff) == 0x8000) && (n == 1);
+ if (sc1) {
+ result1 = 0x7fffffff;
+ } else {
+ result1 = (((uint32_t)(arg00 * arg10)) << n);
+ }
+ if (sc0) {
+ result0 = 0x7fffffff;
+ } else {
+ result0 = (((uint32_t)(arg01 * arg11)) << n);
+ }
+ return (((uint64_t)result1 << 32)) | result0;
+}
+
+uint64_t helper_mulm_h(uint32_t arg00, uint32_t arg01,
+ uint32_t arg10, uint32_t arg11, uint32_t n)
+{
+ uint64_t ret;
+ int64_t result0, result1;
+
+ int32_t sc1 = ((arg00 & 0xffff) == 0x8000) &&
+ ((arg10 & 0xffff) == 0x8000) && (n == 1);
+ int32_t sc0 = ((arg01 & 0xffff) == 0x8000) &&
+ ((arg11 & 0xffff) == 0x8000) && (n == 1);
+
+ if (sc1) {
+ result1 = 0x7fffffff;
+ } else {
+ result1 = (((int32_t)arg00 * (int32_t)arg10) << n);
+ }
+ if (sc0) {
+ result0 = 0x7fffffff;
+ } else {
+ result0 = (((int32_t)arg01 * (int32_t)arg11) << n);
+ }
+ ret = (result1 + result0);
+ ret = ret << 16;
+ return ret;
+}
+uint32_t helper_mulr_h(uint32_t arg00, uint32_t arg01,
+ uint32_t arg10, uint32_t arg11, uint32_t n)
+{
+ uint32_t result0, result1;
+
+ int32_t sc1 = ((arg00 & 0xffff) == 0x8000) &&
+ ((arg10 & 0xffff) == 0x8000) && (n == 1);
+ int32_t sc0 = ((arg01 & 0xffff) == 0x8000) &&
+ ((arg11 & 0xffff) == 0x8000) && (n == 1);
+
+ if (sc1) {
+ result1 = 0x7fffffff;
+ } else {
+ result1 = ((arg00 * arg10) << n) + 0x8000;
+ }
+ if (sc0) {
+ result0 = 0x7fffffff;
+ } else {
+ result0 = ((arg01 * arg11) << n) + 0x8000;
+ }
+ return (result1 & 0xffff0000) | (result0 >> 16);
+}
+
+uint32_t helper_crc32(uint32_t arg0, uint32_t arg1)
+{
+ uint8_t buf[4];
+ stl_be_p(buf, arg0);
+
+ return crc32(arg1, buf, 4);
+}
+
+/* context save area (CSA) related helpers */
+
+static int cdc_increment(target_ulong *psw)
+{
+ if ((*psw & MASK_PSW_CDC) == 0x7f) {
+ return 0;
+ }
+
+ (*psw)++;
+ /* check for overflow */
+ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7));
+ int mask = (1u << (7 - lo)) - 1;
+ int count = *psw & mask;
+ if (count == 0) {
+ (*psw)--;
+ return 1;
+ }
+ return 0;
+}
+
+static int cdc_decrement(target_ulong *psw)
+{
+ if ((*psw & MASK_PSW_CDC) == 0x7f) {
+ return 0;
+ }
+ /* check for underflow */
+ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7));
+ int mask = (1u << (7 - lo)) - 1;
+ int count = *psw & mask;
+ if (count == 0) {
+ return 1;
+ }
+ (*psw)--;
+ return 0;
+}
+
+static bool cdc_zero(target_ulong *psw)
+{
+ int cdc = *psw & MASK_PSW_CDC;
+ /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC ==
+ 7'b1111111, otherwise returns FALSE. */
+ if (cdc == 0x7f) {
+ return true;
+ }
+ /* find CDC.COUNT */
+ int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7));
+ int mask = (1u << (7 - lo)) - 1;
+ int count = *psw & mask;
+ return count == 0;
+}
+
+static void save_context_upper(CPUTriCoreState *env, int ea)
+{
+ cpu_stl_data(env, ea, env->PCXI);
+ cpu_stl_data(env, ea+4, psw_read(env));
+ cpu_stl_data(env, ea+8, env->gpr_a[10]);
+ cpu_stl_data(env, ea+12, env->gpr_a[11]);
+ cpu_stl_data(env, ea+16, env->gpr_d[8]);
+ cpu_stl_data(env, ea+20, env->gpr_d[9]);
+ cpu_stl_data(env, ea+24, env->gpr_d[10]);
+ cpu_stl_data(env, ea+28, env->gpr_d[11]);
+ cpu_stl_data(env, ea+32, env->gpr_a[12]);
+ cpu_stl_data(env, ea+36, env->gpr_a[13]);
+ cpu_stl_data(env, ea+40, env->gpr_a[14]);
+ cpu_stl_data(env, ea+44, env->gpr_a[15]);
+ cpu_stl_data(env, ea+48, env->gpr_d[12]);
+ cpu_stl_data(env, ea+52, env->gpr_d[13]);
+ cpu_stl_data(env, ea+56, env->gpr_d[14]);
+ cpu_stl_data(env, ea+60, env->gpr_d[15]);
+}
+
+static void save_context_lower(CPUTriCoreState *env, int ea)
+{
+ cpu_stl_data(env, ea, env->PCXI);
+ cpu_stl_data(env, ea+4, env->gpr_a[11]);
+ cpu_stl_data(env, ea+8, env->gpr_a[2]);
+ cpu_stl_data(env, ea+12, env->gpr_a[3]);
+ cpu_stl_data(env, ea+16, env->gpr_d[0]);
+ cpu_stl_data(env, ea+20, env->gpr_d[1]);
+ cpu_stl_data(env, ea+24, env->gpr_d[2]);
+ cpu_stl_data(env, ea+28, env->gpr_d[3]);
+ cpu_stl_data(env, ea+32, env->gpr_a[4]);
+ cpu_stl_data(env, ea+36, env->gpr_a[5]);
+ cpu_stl_data(env, ea+40, env->gpr_a[6]);
+ cpu_stl_data(env, ea+44, env->gpr_a[7]);
+ cpu_stl_data(env, ea+48, env->gpr_d[4]);
+ cpu_stl_data(env, ea+52, env->gpr_d[5]);
+ cpu_stl_data(env, ea+56, env->gpr_d[6]);
+ cpu_stl_data(env, ea+60, env->gpr_d[7]);
+}
+
+static void restore_context_upper(CPUTriCoreState *env, int ea,
+ target_ulong *new_PCXI, target_ulong *new_PSW)
+{
+ *new_PCXI = cpu_ldl_data(env, ea);
+ *new_PSW = cpu_ldl_data(env, ea+4);
+ env->gpr_a[10] = cpu_ldl_data(env, ea+8);
+ env->gpr_a[11] = cpu_ldl_data(env, ea+12);
+ env->gpr_d[8] = cpu_ldl_data(env, ea+16);
+ env->gpr_d[9] = cpu_ldl_data(env, ea+20);
+ env->gpr_d[10] = cpu_ldl_data(env, ea+24);
+ env->gpr_d[11] = cpu_ldl_data(env, ea+28);
+ env->gpr_a[12] = cpu_ldl_data(env, ea+32);
+ env->gpr_a[13] = cpu_ldl_data(env, ea+36);
+ env->gpr_a[14] = cpu_ldl_data(env, ea+40);
+ env->gpr_a[15] = cpu_ldl_data(env, ea+44);
+ env->gpr_d[12] = cpu_ldl_data(env, ea+48);
+ env->gpr_d[13] = cpu_ldl_data(env, ea+52);
+ env->gpr_d[14] = cpu_ldl_data(env, ea+56);
+ env->gpr_d[15] = cpu_ldl_data(env, ea+60);
+}
+
+static void restore_context_lower(CPUTriCoreState *env, int ea,
+ target_ulong *ra, target_ulong *pcxi)
+{
+ *pcxi = cpu_ldl_data(env, ea);
+ *ra = cpu_ldl_data(env, ea+4);
+ env->gpr_a[2] = cpu_ldl_data(env, ea+8);
+ env->gpr_a[3] = cpu_ldl_data(env, ea+12);
+ env->gpr_d[0] = cpu_ldl_data(env, ea+16);
+ env->gpr_d[1] = cpu_ldl_data(env, ea+20);
+ env->gpr_d[2] = cpu_ldl_data(env, ea+24);
+ env->gpr_d[3] = cpu_ldl_data(env, ea+28);
+ env->gpr_a[4] = cpu_ldl_data(env, ea+32);
+ env->gpr_a[5] = cpu_ldl_data(env, ea+36);
+ env->gpr_a[6] = cpu_ldl_data(env, ea+40);
+ env->gpr_a[7] = cpu_ldl_data(env, ea+44);
+ env->gpr_d[4] = cpu_ldl_data(env, ea+48);
+ env->gpr_d[5] = cpu_ldl_data(env, ea+52);
+ env->gpr_d[6] = cpu_ldl_data(env, ea+56);
+ env->gpr_d[7] = cpu_ldl_data(env, ea+60);
+}
+
+void helper_call(CPUTriCoreState *env, uint32_t next_pc)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+ target_ulong psw;
+
+ psw = psw_read(env);
+ /* if (FCX == 0) trap(FCU); */
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+ /* if (PSW.CDE) then if (cdc_increment()) then trap(CDO); */
+ if (psw & MASK_PSW_CDE) {
+ if (cdc_increment(&psw)) {
+ /* CDO trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDO, GETPC());
+ }
+ }
+ /* PSW.CDE = 1;*/
+ psw |= MASK_PSW_CDE;
+ /* tmp_FCX = FCX; */
+ tmp_FCX = env->FCX;
+ /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+ ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+ ((env->FCX & MASK_FCX_FCXO) << 6);
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+ A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+ D[15]}; */
+ save_context_upper(env, ea);
+
+ /* PCXI.PCPN = ICR.CCPN; */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE; */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) +
+ ((env->ICR & MASK_ICR_IE) << 15));
+ /* PCXI.UL = 1; */
+ env->PCXI |= MASK_PCXI_UL;
+
+ /* PCXI[19: 0] = FCX[19: 0]; */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FCX[19: 0] = new_FCX[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+ /* A[11] = next_pc[31: 0]; */
+ env->gpr_a[11] = next_pc;
+
+ /* if (tmp_FCX == LCX) trap(FCD);*/
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+ psw_write(env, psw);
+}
+
+void helper_ret(CPUTriCoreState *env)
+{
+ target_ulong ea;
+ target_ulong new_PCXI;
+ target_ulong new_PSW, psw;
+
+ psw = psw_read(env);
+ /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/
+ if (psw & MASK_PSW_CDE) {
+ if (cdc_decrement(&psw)) {
+ /* CDU trap */
+ psw_write(env, psw);
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDU, GETPC());
+ }
+ }
+ /* if (PCXI[19: 0] == 0) then trap(CSU); */
+ if ((env->PCXI & 0xfffff) == 0) {
+ /* CSU trap */
+ psw_write(env, psw);
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC());
+ }
+ /* if (PCXI.UL == 0) then trap(CTYP); */
+ if ((env->PCXI & MASK_PCXI_UL) == 0) {
+ /* CTYP trap */
+ cdc_increment(&psw); /* restore to the start of helper */
+ psw_write(env, psw);
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC());
+ }
+ /* PC = {A11 [31: 1], 1’b0}; */
+ env->PC = env->gpr_a[11] & 0xfffffffe;
+
+ /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */
+ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+ ((env->PCXI & MASK_PCXI_PCXO) << 6);
+ /* {new_PCXI, new_PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+ A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+ restore_context_upper(env, ea, &new_PCXI, &new_PSW);
+ /* M(EA, word) = FCX; */
+ cpu_stl_data(env, ea, env->FCX);
+ /* FCX[19: 0] = PCXI[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+ /* PCXI = new_PCXI; */
+ env->PCXI = new_PCXI;
+
+ if (tricore_feature(env, TRICORE_FEATURE_13)) {
+ /* PSW = new_PSW */
+ psw_write(env, new_PSW);
+ } else {
+ /* PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; */
+ psw_write(env, (new_PSW & ~(0x3000000)) + (psw & (0x3000000)));
+ }
+}
+
+void helper_bisr(CPUTriCoreState *env, uint32_t const9)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+
+ tmp_FCX = env->FCX;
+ ea = ((env->FCX & 0xf0000) << 12) + ((env->FCX & 0xffff) << 6);
+
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4]
+ , A[5], A[6], A[7], D[4], D[5], D[6], D[7]}; */
+ save_context_lower(env, ea);
+
+
+ /* PCXI.PCPN = ICR.CCPN */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) +
+ ((env->ICR & MASK_ICR_IE) << 15));
+ /* PCXI.UL = 0 */
+ env->PCXI &= ~(MASK_PCXI_UL);
+ /* PCXI[19: 0] = FCX[19: 0] */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FXC[19: 0] = new_FCX[19: 0] */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+ /* ICR.IE = 1 */
+ env->ICR |= MASK_ICR_IE;
+
+ env->ICR |= const9; /* ICR.CCPN = const9[7: 0];*/
+
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+}
+
+void helper_rfe(CPUTriCoreState *env)
+{
+ target_ulong ea;
+ target_ulong new_PCXI;
+ target_ulong new_PSW;
+ /* if (PCXI[19: 0] == 0) then trap(CSU); */
+ if ((env->PCXI & 0xfffff) == 0) {
+ /* raise csu trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC());
+ }
+ /* if (PCXI.UL == 0) then trap(CTYP); */
+ if ((env->PCXI & MASK_PCXI_UL) == 0) {
+ /* raise CTYP trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC());
+ }
+ /* if (!cdc_zero() AND PSW.CDE) then trap(NEST); */
+ if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) {
+ /* raise NEST trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_NEST, GETPC());
+ }
+ env->PC = env->gpr_a[11] & ~0x1;
+ /* ICR.IE = PCXI.PIE; */
+ env->ICR = (env->ICR & ~MASK_ICR_IE) + ((env->PCXI & MASK_PCXI_PIE) >> 15);
+ /* ICR.CCPN = PCXI.PCPN; */
+ env->ICR = (env->ICR & ~MASK_ICR_CCPN) +
+ ((env->PCXI & MASK_PCXI_PCPN) >> 24);
+ /*EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0};*/
+ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+ ((env->PCXI & MASK_PCXI_PCXO) << 6);
+ /*{new_PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+ A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+ restore_context_upper(env, ea, &new_PCXI, &new_PSW);
+ /* M(EA, word) = FCX;*/
+ cpu_stl_data(env, ea, env->FCX);
+ /* FCX[19: 0] = PCXI[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+ /* PCXI = new_PCXI; */
+ env->PCXI = new_PCXI;
+ /* write psw */
+ psw_write(env, new_PSW);
+}
+
+void helper_rfm(CPUTriCoreState *env)
+{
+ env->PC = (env->gpr_a[11] & ~0x1);
+ /* ICR.IE = PCXI.PIE; */
+ env->ICR = (env->ICR & ~MASK_ICR_IE) |
+ ((env->PCXI & MASK_PCXI_PIE) >> 15);
+ /* ICR.CCPN = PCXI.PCPN; */
+ env->ICR = (env->ICR & ~MASK_ICR_CCPN) |
+ ((env->PCXI & MASK_PCXI_PCPN) >> 24);
+ /* {PCXI, PSW, A[10], A[11]} = M(DCX, 4 * word); */
+ env->PCXI = cpu_ldl_data(env, env->DCX);
+ psw_write(env, cpu_ldl_data(env, env->DCX+4));
+ env->gpr_a[10] = cpu_ldl_data(env, env->DCX+8);
+ env->gpr_a[11] = cpu_ldl_data(env, env->DCX+12);
+
+ if (tricore_feature(env, TRICORE_FEATURE_131)) {
+ env->DBGTCR = 0;
+ }
+}
+
+void helper_ldlcx(CPUTriCoreState *env, uint32_t ea)
+{
+ uint32_t dummy;
+ /* insn doesn't load PCXI and RA */
+ restore_context_lower(env, ea, &dummy, &dummy);
+}
+
+void helper_lducx(CPUTriCoreState *env, uint32_t ea)
+{
+ uint32_t dummy;
+ /* insn doesn't load PCXI and PSW */
+ restore_context_upper(env, ea, &dummy, &dummy);
+}
+
+void helper_stlcx(CPUTriCoreState *env, uint32_t ea)
+{
+ save_context_lower(env, ea);
+}
+
+void helper_stucx(CPUTriCoreState *env, uint32_t ea)
+{
+ save_context_upper(env, ea);
+}
+
+void helper_svlcx(CPUTriCoreState *env)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+ /* tmp_FCX = FCX; */
+ tmp_FCX = env->FCX;
+ /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+ ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+ ((env->FCX & MASK_FCX_FCXO) << 6);
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+ A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+ D[15]}; */
+ save_context_lower(env, ea);
+
+ /* PCXI.PCPN = ICR.CCPN; */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE; */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) +
+ ((env->ICR & MASK_ICR_IE) << 15));
+ /* PCXI.UL = 0; */
+ env->PCXI &= ~MASK_PCXI_UL;
+
+ /* PCXI[19: 0] = FCX[19: 0]; */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FCX[19: 0] = new_FCX[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+
+ /* if (tmp_FCX == LCX) trap(FCD);*/
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+}
+
+void helper_svucx(CPUTriCoreState *env)
+{
+ target_ulong tmp_FCX;
+ target_ulong ea;
+ target_ulong new_FCX;
+
+ if (env->FCX == 0) {
+ /* FCU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC());
+ }
+ /* tmp_FCX = FCX; */
+ tmp_FCX = env->FCX;
+ /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */
+ ea = ((env->FCX & MASK_FCX_FCXS) << 12) +
+ ((env->FCX & MASK_FCX_FCXO) << 6);
+ /* new_FCX = M(EA, word); */
+ new_FCX = cpu_ldl_data(env, ea);
+ /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11],
+ A[12], A[13], A[14], A[15], D[12], D[13], D[14],
+ D[15]}; */
+ save_context_upper(env, ea);
+
+ /* PCXI.PCPN = ICR.CCPN; */
+ env->PCXI = (env->PCXI & 0xffffff) +
+ ((env->ICR & MASK_ICR_CCPN) << 24);
+ /* PCXI.PIE = ICR.IE; */
+ env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE) +
+ ((env->ICR & MASK_ICR_IE) << 15));
+ /* PCXI.UL = 1; */
+ env->PCXI |= MASK_PCXI_UL;
+
+ /* PCXI[19: 0] = FCX[19: 0]; */
+ env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff);
+ /* FCX[19: 0] = new_FCX[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff);
+
+ /* if (tmp_FCX == LCX) trap(FCD);*/
+ if (tmp_FCX == env->LCX) {
+ /* FCD trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC());
+ }
+}
+
+void helper_rslcx(CPUTriCoreState *env)
+{
+ target_ulong ea;
+ target_ulong new_PCXI;
+ /* if (PCXI[19: 0] == 0) then trap(CSU); */
+ if ((env->PCXI & 0xfffff) == 0) {
+ /* CSU trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC());
+ }
+ /* if (PCXI.UL == 1) then trap(CTYP); */
+ if ((env->PCXI & MASK_PCXI_UL) != 0) {
+ /* CTYP trap */
+ raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC());
+ }
+ /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */
+ ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) +
+ ((env->PCXI & MASK_PCXI_PCXO) << 6);
+ /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12],
+ A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */
+ restore_context_lower(env, ea, &env->gpr_a[11], &new_PCXI);
+ /* M(EA, word) = FCX; */
+ cpu_stl_data(env, ea, env->FCX);
+ /* M(EA, word) = FCX; */
+ cpu_stl_data(env, ea, env->FCX);
+ /* FCX[19: 0] = PCXI[19: 0]; */
+ env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff);
+ /* PCXI = new_PCXI; */
+ env->PCXI = new_PCXI;
+}
+
+void helper_psw_write(CPUTriCoreState *env, uint32_t arg)
+{
+ psw_write(env, arg);
+}
+
+uint32_t helper_psw_read(CPUTriCoreState *env)
+{
+ return psw_read(env);
+}
+
+
+static inline void QEMU_NORETURN do_raise_exception_err(CPUTriCoreState *env,
+ uint32_t exception,
+ int error_code,
+ uintptr_t pc)
+{
+ CPUState *cs = CPU(tricore_env_get_cpu(env));
+ cs->exception_index = exception;
+ env->error_code = error_code;
+
+ if (pc) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, pc);
+ }
+
+ cpu_loop_exit(cs);
+}
+
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+ ret = cpu_tricore_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (ret) {
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+ do_raise_exception_err(env, cs->exception_index,
+ env->error_code, retaddr);
+ }
+}
diff --git a/target/tricore/translate.c b/target/tricore/translate.c
new file mode 100644
index 0000000000..36f734a662
--- /dev/null
+++ b/target/tricore/translate.c
@@ -0,0 +1,8869 @@
+/*
+ * TriCore emulation for qemu: main translation routines.
+ *
+ * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "tricore-opcodes.h"
+#include "exec/log.h"
+
+/*
+ * TCG registers
+ */
+static TCGv cpu_PC;
+static TCGv cpu_PCXI;
+static TCGv cpu_PSW;
+static TCGv cpu_ICR;
+/* GPR registers */
+static TCGv cpu_gpr_a[16];
+static TCGv cpu_gpr_d[16];
+/* PSW Flag cache */
+static TCGv cpu_PSW_C;
+static TCGv cpu_PSW_V;
+static TCGv cpu_PSW_SV;
+static TCGv cpu_PSW_AV;
+static TCGv cpu_PSW_SAV;
+/* CPU env */
+static TCGv_env cpu_env;
+
+#include "exec/gen-icount.h"
+
+static const char *regnames_a[] = {
+ "a0" , "a1" , "a2" , "a3" , "a4" , "a5" ,
+ "a6" , "a7" , "a8" , "a9" , "sp" , "a11" ,
+ "a12" , "a13" , "a14" , "a15",
+ };
+
+static const char *regnames_d[] = {
+ "d0" , "d1" , "d2" , "d3" , "d4" , "d5" ,
+ "d6" , "d7" , "d8" , "d9" , "d10" , "d11" ,
+ "d12" , "d13" , "d14" , "d15",
+ };
+
+typedef struct DisasContext {
+ struct TranslationBlock *tb;
+ target_ulong pc, saved_pc, next_pc;
+ uint32_t opcode;
+ int singlestep_enabled;
+ /* Routine used to access memory */
+ int mem_idx;
+ uint32_t hflags, saved_hflags;
+ int bstate;
+} DisasContext;
+
+enum {
+
+ BS_NONE = 0,
+ BS_STOP = 1,
+ BS_BRANCH = 2,
+ BS_EXCP = 3,
+};
+
+enum {
+ MODE_LL = 0,
+ MODE_LU = 1,
+ MODE_UL = 2,
+ MODE_UU = 3,
+};
+
+void tricore_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ TriCoreCPU *cpu = TRICORE_CPU(cs);
+ CPUTriCoreState *env = &cpu->env;
+ uint32_t psw;
+ int i;
+
+ psw = psw_read(env);
+
+ cpu_fprintf(f, "PC: " TARGET_FMT_lx, env->PC);
+ cpu_fprintf(f, " PSW: " TARGET_FMT_lx, psw);
+ cpu_fprintf(f, " ICR: " TARGET_FMT_lx, env->ICR);
+ cpu_fprintf(f, "\nPCXI: " TARGET_FMT_lx, env->PCXI);
+ cpu_fprintf(f, " FCX: " TARGET_FMT_lx, env->FCX);
+ cpu_fprintf(f, " LCX: " TARGET_FMT_lx, env->LCX);
+
+ for (i = 0; i < 16; ++i) {
+ if ((i & 3) == 0) {
+ cpu_fprintf(f, "\nGPR A%02d:", i);
+ }
+ cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_a[i]);
+ }
+ for (i = 0; i < 16; ++i) {
+ if ((i & 3) == 0) {
+ cpu_fprintf(f, "\nGPR D%02d:", i);
+ }
+ cpu_fprintf(f, " " TARGET_FMT_lx, env->gpr_d[i]);
+ }
+ cpu_fprintf(f, "\n");
+}
+
+/*
+ * Functions to generate micro-ops
+ */
+
+/* Makros for generating helpers */
+
+#define gen_helper_1arg(name, arg) do { \
+ TCGv_i32 helper_tmp = tcg_const_i32(arg); \
+ gen_helper_##name(cpu_env, helper_tmp); \
+ tcg_temp_free_i32(helper_tmp); \
+ } while (0)
+
+#define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg00, arg0, 16); \
+ tcg_gen_ext16s_tl(arg01, arg0); \
+ tcg_gen_ext16s_tl(arg11, arg1); \
+ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg10 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg00, arg0, 16); \
+ tcg_gen_ext16s_tl(arg01, arg0); \
+ tcg_gen_sari_tl(arg11, arg1, 16); \
+ tcg_gen_ext16s_tl(arg10, arg1); \
+ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg10); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg10 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg00, arg0, 16); \
+ tcg_gen_ext16s_tl(arg01, arg0); \
+ tcg_gen_sari_tl(arg10, arg1, 16); \
+ tcg_gen_ext16s_tl(arg11, arg1); \
+ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg10); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \
+ TCGv arg00 = tcg_temp_new(); \
+ TCGv arg01 = tcg_temp_new(); \
+ TCGv arg11 = tcg_temp_new(); \
+ tcg_gen_sari_tl(arg01, arg0, 16); \
+ tcg_gen_ext16s_tl(arg00, arg0); \
+ tcg_gen_sari_tl(arg11, arg1, 16); \
+ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \
+ tcg_temp_free(arg00); \
+ tcg_temp_free(arg01); \
+ tcg_temp_free(arg11); \
+} while (0)
+
+#define GEN_HELPER_RRR(name, rl, rh, al1, ah1, arg2) do { \
+ TCGv_i64 ret = tcg_temp_new_i64(); \
+ TCGv_i64 arg1 = tcg_temp_new_i64(); \
+ \
+ tcg_gen_concat_i32_i64(arg1, al1, ah1); \
+ gen_helper_##name(ret, arg1, arg2); \
+ tcg_gen_extr_i64_i32(rl, rh, ret); \
+ \
+ tcg_temp_free_i64(ret); \
+ tcg_temp_free_i64(arg1); \
+} while (0)
+
+#define GEN_HELPER_RR(name, rl, rh, arg1, arg2) do { \
+ TCGv_i64 ret = tcg_temp_new_i64(); \
+ \
+ gen_helper_##name(ret, cpu_env, arg1, arg2); \
+ tcg_gen_extr_i64_i32(rl, rh, ret); \
+ \
+ tcg_temp_free_i64(ret); \
+} while (0)
+
+#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF))
+#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \
+ ((offset & 0x0fffff) << 1))
+
+/* For two 32-bit registers used a 64-bit register, the first
+ registernumber needs to be even. Otherwise we trap. */
+static inline void generate_trap(DisasContext *ctx, int class, int tin);
+#define CHECK_REG_PAIR(reg) do { \
+ if (reg & 0x1) { \
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \
+ } \
+} while (0)
+
+/* Functions for load/save to/from memory */
+
+static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2,
+ int16_t con, TCGMemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, con);
+ tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2,
+ int16_t con, TCGMemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, con);
+ tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_temp_free(temp);
+}
+
+static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ tcg_gen_concat_i32_i64(temp, rl, rh);
+ tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEQ);
+
+ tcg_temp_free_i64(temp);
+}
+
+static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
+ DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, base, con);
+ gen_st_2regs_64(rh, rl, temp, ctx);
+ tcg_temp_free(temp);
+}
+
+static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ tcg_gen_qemu_ld_i64(temp, address, ctx->mem_idx, MO_LEQ);
+ /* write back to two 32 bit regs */
+ tcg_gen_extr_i64_i32(rl, rh, temp);
+
+ tcg_temp_free_i64(temp);
+}
+
+static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con,
+ DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, base, con);
+ gen_ld_2regs_64(rh, rl, temp, ctx);
+ tcg_temp_free(temp);
+}
+
+static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
+ TCGMemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, off);
+ tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_tl(r2, temp);
+ tcg_temp_free(temp);
+}
+
+static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off,
+ TCGMemOp mop)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, r2, off);
+ tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop);
+ tcg_gen_mov_tl(r2, temp);
+ tcg_temp_free(temp);
+}
+
+/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */
+static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ CHECK_REG_PAIR(ereg);
+ /* temp = (M(EA, word) */
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ /* temp = temp & ~E[a][63:32]) */
+ tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]);
+ /* temp2 = (E[a][31:0] & E[a][63:32]); */
+ tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]);
+ /* temp = temp | temp2; */
+ tcg_gen_or_tl(temp, temp, temp2);
+ /* M(EA, word) = temp; */
+ tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+/* tmp = M(EA, word);
+ M(EA, word) = D[a];
+ D[a] = tmp[31:0];*/
+static void gen_swap(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp,
+ cpu_gpr_d[reg], temp);
+ tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]);
+ tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]);
+ tcg_gen_or_tl(temp2, temp2, temp3);
+ tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL);
+ tcg_gen_mov_tl(cpu_gpr_d[reg], temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+
+/* We generate loads and store to core special function register (csfr) through
+ the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3
+ makros R, A and E, which allow read-only, all and endinit protected access.
+ These makros also specify in which ISA version the csfr was introduced. */
+#define R(ADDRESS, REG, FEATURE) \
+ case ADDRESS: \
+ if (tricore_feature(env, FEATURE)) { \
+ tcg_gen_ld_tl(ret, cpu_env, offsetof(CPUTriCoreState, REG)); \
+ } \
+ break;
+#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
+#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE)
+static inline void gen_mfcr(CPUTriCoreState *env, TCGv ret, int32_t offset)
+{
+ /* since we're caching PSW make this a special case */
+ if (offset == 0xfe04) {
+ gen_helper_psw_read(ret, cpu_env);
+ } else {
+ switch (offset) {
+#include "csfr.def"
+ }
+ }
+}
+#undef R
+#undef A
+#undef E
+
+#define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg,
+ since no execption occurs */
+#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \
+ case ADDRESS: \
+ if (tricore_feature(env, FEATURE)) { \
+ tcg_gen_st_tl(r1, cpu_env, offsetof(CPUTriCoreState, REG)); \
+ } \
+ break;
+/* Endinit protected registers
+ TODO: Since the endinit bit is in a register of a not yet implemented
+ watchdog device, we handle endinit protected registers like
+ all-access registers for now. */
+#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE)
+static inline void gen_mtcr(CPUTriCoreState *env, DisasContext *ctx, TCGv r1,
+ int32_t offset)
+{
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
+ /* since we're caching PSW make this a special case */
+ if (offset == 0xfe04) {
+ gen_helper_psw_write(cpu_env, r1);
+ } else {
+ switch (offset) {
+#include "csfr.def"
+ }
+ }
+ } else {
+ /* generate privilege trap */
+ }
+}
+
+/* Functions for arithmetic instructions */
+
+static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+ /* Addition and set V/SV bits */
+ tcg_gen_add_tl(result, r1, r2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(t0, r1, r2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(t0);
+}
+
+static inline void
+gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 result = tcg_temp_new_i64();
+
+ tcg_gen_add_i64(result, r1, r2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t1, result, r1);
+ tcg_gen_xor_i64(t0, r1, r2);
+ tcg_gen_andc_i64(t1, t1, t0);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* calc AV/SAV bits */
+ tcg_gen_extrh_i64_i32(temp, result);
+ tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_i64(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void
+gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, void(*op1)(TCGv, TCGv, TCGv),
+ void(*op2)(TCGv, TCGv, TCGv))
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv temp4 = tcg_temp_new();
+
+ (*op1)(temp, r1_low, r2);
+ /* calc V0 bit */
+ tcg_gen_xor_tl(temp2, temp, r1_low);
+ tcg_gen_xor_tl(temp3, r1_low, r2);
+ if (op1 == tcg_gen_add_tl) {
+ tcg_gen_andc_tl(temp2, temp2, temp3);
+ } else {
+ tcg_gen_and_tl(temp2, temp2, temp3);
+ }
+
+ (*op2)(temp3, r1_high, r3);
+ /* calc V1 bit */
+ tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high);
+ tcg_gen_xor_tl(temp4, r1_high, r3);
+ if (op2 == tcg_gen_add_tl) {
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ } else {
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4);
+ }
+ /* combine V0/V1 bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2);
+ /* calc sv bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* write result */
+ tcg_gen_mov_tl(ret_low, temp);
+ tcg_gen_mov_tl(ret_high, temp3);
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, ret_low, ret_low);
+ tcg_gen_xor_tl(temp, temp, ret_low);
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high);
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free(temp4);
+}
+
+/* ret = r2 + (r1 * r3); */
+static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, r1);
+ tcg_gen_ext_i32_i64(t2, r2);
+ tcg_gen_ext_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_add_i64(t1, t2, t1);
+
+ tcg_gen_extrl_i64_i32(ret, t1);
+ /* calc V
+ t1 > 0x7fffffff */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
+ /* t1 < -0x80000000 */
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_madd32_d(ret, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ tcg_gen_muls2_tl(t1, t2, r1, r3);
+ /* only the add can overflow */
+ tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_tl(t1, r2_high, t2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back the result */
+ tcg_gen_mov_tl(ret_low, t3);
+ tcg_gen_mov_tl(ret_high, t4);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ tcg_temp_free(t4);
+}
+
+static inline void
+gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t1, r1);
+ tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
+ tcg_gen_extu_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_add_i64(t2, t2, t1);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, t2);
+ /* only the add overflows, if t2 < t1
+ calc V bit */
+ tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_add_tl, tcg_gen_add_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_sub_tl, tcg_gen_add_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+
+ gen_add64_d(temp64_2, temp64_3, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2);
+
+static inline void
+gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_adds(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_adds(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+
+}
+
+static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2);
+
+static inline void
+gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_subs(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_adds(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+
+}
+
+static inline void
+gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+
+ gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+
+static inline void
+gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_add64_d(temp64_3, temp64_2, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_helper_add64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
+ uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_addr_h(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_addsur_h(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+
+static inline void
+gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
+ uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_addr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_addsur_h_ssov(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_maddr_q(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_maddr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift, CPUTriCoreState *env)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_shli_i64(t2, t2, n);
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ tcg_gen_sari_i64(t2, t2, up_shift);
+
+ tcg_gen_add_i64(t3, t1, t2);
+ tcg_gen_extrl_i64_i32(temp3, t3);
+ /* calc v bit */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
+ tcg_gen_or_i64(t1, t1, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* We produce an overflow on the host if the mul before was
+ (0x80000000 * 0x80000000) << 1). If this is the
+ case, we negate the ovf. */
+ if (n == 1) {
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_shli_tl(temp, temp, 31);
+ /* negate v bit, if special condition */
+ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ }
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_add_d(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_adds(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ gen_add64_d(t3, t1, t2);
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t3);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+
+ gen_helper_add64_ssov(t1, cpu_env, t1, t2);
+ tcg_gen_extr_i64_i32(rl, rh, t1);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static inline void
+gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n, CPUTriCoreState *env)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+ TCGv temp, temp2;
+
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ if (n != 0) {
+ tcg_gen_shli_i64(t2, t2, 1);
+ }
+ tcg_gen_add_i64(t4, t1, t2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t3, t4, t1);
+ tcg_gen_xor_i64(t2, t1, t2);
+ tcg_gen_andc_i64(t3, t3, t2);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
+ /* We produce an overflow on the host if the mul before was
+ (0x80000000 * 0x80000000) << 1). If this is the
+ case, we negate the ovf. */
+ if (n == 1) {
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_shli_tl(temp, temp, 31);
+ /* negate v bit, if special condition */
+ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ }
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t4);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ tcg_gen_sari_i64(t2, t2, up_shift - n);
+
+ gen_helper_madd32_q_add_ssov(ret, cpu_env, t1, t2);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv_i64 r1 = tcg_temp_new_i64();
+ TCGv temp = tcg_const_i32(n);
+
+ tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
+ gen_helper_madd64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ tcg_gen_extr_i64_i32(rl, rh, r1);
+
+ tcg_temp_free_i64(r1);
+ tcg_temp_free(temp);
+}
+/* ret = r2 - (r1 * r3); */
+static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, r1);
+ tcg_gen_ext_i32_i64(t2, r2);
+ tcg_gen_ext_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_sub_i64(t1, t2, t1);
+
+ tcg_gen_extrl_i64_i32(ret, t1);
+ /* calc V
+ t2 > 0x7fffffff */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL);
+ /* result < -0x80000000 */
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL);
+ tcg_gen_or_i64(t2, t2, t3);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msub32_d(ret, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv t1 = tcg_temp_new();
+ TCGv t2 = tcg_temp_new();
+ TCGv t3 = tcg_temp_new();
+ TCGv t4 = tcg_temp_new();
+
+ tcg_gen_muls2_tl(t1, t2, r1, r3);
+ /* only the sub can overflow */
+ tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high);
+ tcg_gen_xor_tl(t1, r2_high, t2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, t4, t4);
+ tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back the result */
+ tcg_gen_mov_tl(ret_low, t3);
+ tcg_gen_mov_tl(ret_high, t4);
+
+ tcg_temp_free(t1);
+ tcg_temp_free(t2);
+ tcg_temp_free(t3);
+ tcg_temp_free(t4);
+}
+
+static inline void
+gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ tcg_gen_extu_i32_i64(t1, r1);
+ tcg_gen_concat_i32_i64(t2, r2_low, r2_high);
+ tcg_gen_extu_i32_i64(t3, r3);
+
+ tcg_gen_mul_i64(t1, t1, t3);
+ tcg_gen_sub_i64(t3, t2, t1);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, t3);
+ /* calc V bit, only the sub can overflow, if t1 > t2 */
+ tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+}
+
+static inline void
+gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2)
+{
+ TCGv temp = tcg_const_i32(r2);
+ gen_add_d(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+/* calculate the carry bit too */
+static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_movi_tl(t0, 0);
+ /* Addition and set C/V/SV bits */
+ tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(t0, r1, r2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_add_CC(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv carry = tcg_temp_new_i32();
+ TCGv t0 = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_movi_tl(t0, 0);
+ tcg_gen_setcondi_tl(TCG_COND_NE, carry, cpu_PSW_C, 0);
+ /* Addition, carry and set C/V/SV bits */
+ tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, carry, t0);
+ tcg_gen_add2_i32(result, cpu_PSW_C, result, cpu_PSW_C, r2, t0);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(t0, r1, r2);
+ tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(t0);
+ tcg_temp_free(carry);
+}
+
+static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_addc_CC(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
+ TCGv r4)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv result = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv t0 = tcg_const_i32(0);
+
+ /* create mask for sticky bits */
+ tcg_gen_setcond_tl(cond, mask, r4, t0);
+ tcg_gen_shli_tl(mask, mask, 31);
+
+ tcg_gen_add_tl(result, r1, r2);
+ /* Calc PSW_V */
+ tcg_gen_xor_tl(temp, result, r1);
+ tcg_gen_xor_tl(temp2, r1, r2);
+ tcg_gen_andc_tl(temp, temp, temp2);
+ tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ /* Set PSW_SV */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, result, result);
+ tcg_gen_xor_tl(temp, temp, result);
+ tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ /* write back result */
+ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(result);
+ tcg_temp_free(mask);
+}
+
+static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2,
+ TCGv r3, TCGv r4)
+{
+ TCGv temp = tcg_const_i32(r2);
+ gen_cond_add(cond, r1, temp, r3, r4);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(result, r1, r2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(temp, r1, r2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(result);
+}
+
+static inline void
+gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 result = tcg_temp_new_i64();
+
+ tcg_gen_sub_i64(result, r1, r2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t1, result, r1);
+ tcg_gen_xor_i64(t0, r1, r2);
+ tcg_gen_and_i64(t1, t1, t0);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t1);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* calc AV/SAV bits */
+ tcg_gen_extrh_i64_i32(temp, result);
+ tcg_gen_add_tl(cpu_PSW_AV, temp, temp);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_i64(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+}
+
+static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv result = tcg_temp_new();
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_sub_tl(result, r1, r2);
+ /* calc C bit */
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2);
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(temp, r1, r2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(result);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new();
+ tcg_gen_not_tl(temp, r2);
+ gen_addc_CC(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3,
+ TCGv r4)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv result = tcg_temp_new();
+ TCGv mask = tcg_temp_new();
+ TCGv t0 = tcg_const_i32(0);
+
+ /* create mask for sticky bits */
+ tcg_gen_setcond_tl(cond, mask, r4, t0);
+ tcg_gen_shli_tl(mask, mask, 31);
+
+ tcg_gen_sub_tl(result, r1, r2);
+ /* Calc PSW_V */
+ tcg_gen_xor_tl(temp, result, r1);
+ tcg_gen_xor_tl(temp2, r1, r2);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V);
+ /* Set PSW_SV */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV);
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, result, result);
+ tcg_gen_xor_tl(temp, temp, result);
+ tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV);
+ /* write back result */
+ tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1);
+
+ tcg_temp_free(t0);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(result);
+ tcg_temp_free(mask);
+}
+
+static inline void
+gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_sub_tl, tcg_gen_sub_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_subs(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_subs(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_sub64_d(temp64_3, temp64_2, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mulm_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mulm_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+ gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n,
+ uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_subr_h(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3,
+ uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ gen_helper_subr_h_ssov(ret, cpu_env, temp64, r1_low, r1_high);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_msubr_q(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n)
+{
+ TCGv temp = tcg_const_i32(n);
+ gen_helper_msubr_q_ssov(ret, cpu_env, r1, r2, r3, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift, CPUTriCoreState *env)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ /* if we shift part of the fraction out, we need to round up */
+ tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
+ tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
+ tcg_gen_sari_i64(t2, t2, up_shift - n);
+ tcg_gen_add_i64(t2, t2, t4);
+
+ tcg_gen_sub_i64(t3, t1, t2);
+ tcg_gen_extrl_i64_i32(temp3, t3);
+ /* calc v bit */
+ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL);
+ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL);
+ tcg_gen_or_i64(t1, t1, t2);
+ tcg_gen_extrl_i64_i32(cpu_PSW_V, t1);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3);
+ tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_sub_d(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ gen_subs(ret, arg1, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ gen_sub64_d(t3, t1, t2);
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t3);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(temp, arg2, arg3);
+ tcg_gen_shli_tl(temp, temp, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000);
+ tcg_gen_sub_tl(temp, temp, temp2);
+ }
+ tcg_gen_ext_i32_i64(t2, temp);
+ tcg_gen_shli_i64(t2, t2, 16);
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+
+ gen_helper_sub64_ssov(t1, cpu_env, t1, t2);
+ tcg_gen_extr_i64_i32(rl, rh, t1);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+static inline void
+gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n, CPUTriCoreState *env)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+ TCGv temp, temp2;
+
+ tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ if (n != 0) {
+ tcg_gen_shli_i64(t2, t2, 1);
+ }
+ tcg_gen_sub_i64(t4, t1, t2);
+ /* calc v bit */
+ tcg_gen_xor_i64(t3, t4, t1);
+ tcg_gen_xor_i64(t2, t1, t2);
+ tcg_gen_and_i64(t3, t3, t2);
+ tcg_gen_extrh_i64_i32(cpu_PSW_V, t3);
+ /* We produce an overflow on the host if the mul before was
+ (0x80000000 * 0x80000000) << 1). If this is the
+ case, we negate the ovf. */
+ if (n == 1) {
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000);
+ tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_shli_tl(temp, temp, 31);
+ /* negate v bit, if special condition */
+ tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ }
+ /* write back result */
+ tcg_gen_extr_i64_i32(rl, rh, t4);
+ /* Calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV/SAV bits */
+ tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ /* calc SAV */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n,
+ uint32_t up_shift)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i64 t3 = tcg_temp_new_i64();
+ TCGv_i64 t4 = tcg_temp_new_i64();
+
+ tcg_gen_ext_i32_i64(t1, arg1);
+ tcg_gen_ext_i32_i64(t2, arg2);
+ tcg_gen_ext_i32_i64(t3, arg3);
+
+ tcg_gen_mul_i64(t2, t2, t3);
+ /* if we shift part of the fraction out, we need to round up */
+ tcg_gen_andi_i64(t4, t2, (1ll << (up_shift - n)) - 1);
+ tcg_gen_setcondi_i64(TCG_COND_NE, t4, t4, 0);
+ tcg_gen_sari_i64(t3, t2, up_shift - n);
+ tcg_gen_add_i64(t3, t3, t4);
+
+ gen_helper_msub32_q_sub_ssov(ret, cpu_env, t1, t3);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ tcg_temp_free_i64(t3);
+ tcg_temp_free_i64(t4);
+}
+
+static inline void
+gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2,
+ TCGv arg3, uint32_t n)
+{
+ TCGv_i64 r1 = tcg_temp_new_i64();
+ TCGv temp = tcg_const_i32(n);
+
+ tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high);
+ gen_helper_msub64_q_ssov(r1, cpu_env, r1, arg2, arg3, temp);
+ tcg_gen_extr_i64_i32(rl, rh, r1);
+
+ tcg_temp_free_i64(r1);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_addsub64_h(ret_low, ret_high, r1_low, r1_high, temp, temp2,
+ tcg_gen_add_tl, tcg_gen_sub_tl);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+ TCGv_i64 temp64_3 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_concat_i32_i64(temp64_3, r1_low, r1_high);
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+
+ gen_sub64_d(temp64_2, temp64_3, temp64);
+ /* write back result */
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+ tcg_temp_free_i64(temp64_3);
+}
+
+static inline void
+gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_subadr_h(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv temp3 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_extr_i64_i32(temp, temp2, temp64);
+ gen_adds(ret_low, r1_low, temp);
+ tcg_gen_mov_tl(temp, cpu_PSW_V);
+ tcg_gen_mov_tl(temp3, cpu_PSW_AV);
+ gen_subs(ret_high, r1_high, temp2);
+ /* combine v bits */
+ tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* combine av bits */
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2,
+ TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ TCGv_i64 temp64_2 = tcg_temp_new_i64();
+
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_sari_i64(temp64_2, temp64, 32); /* high */
+ tcg_gen_ext32s_i64(temp64, temp64); /* low */
+ tcg_gen_sub_i64(temp64, temp64_2, temp64);
+ tcg_gen_shli_i64(temp64, temp64, 16);
+ tcg_gen_concat_i32_i64(temp64_2, r1_low, r1_high);
+
+ gen_helper_sub64_ssov(temp64, cpu_env, temp64_2, temp64);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp64);
+ tcg_temp_free_i64(temp64_2);
+}
+
+static inline void
+gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode)
+{
+ TCGv temp = tcg_const_i32(n);
+ TCGv temp2 = tcg_temp_new();
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ switch (mode) {
+ case MODE_LL:
+ GEN_HELPER_LL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_LU:
+ GEN_HELPER_LU(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UL:
+ GEN_HELPER_UL(mul_h, temp64, r2, r3, temp);
+ break;
+ case MODE_UU:
+ GEN_HELPER_UU(mul_h, temp64, r2, r3, temp);
+ break;
+ }
+ tcg_gen_andi_tl(temp2, r1, 0xffff0000);
+ tcg_gen_shli_tl(temp, r1, 16);
+ gen_helper_subadr_h_ssov(ret, cpu_env, temp64, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void gen_abs(TCGv ret, TCGv r1)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv t0 = tcg_const_i32(0);
+
+ tcg_gen_neg_tl(temp, r1);
+ tcg_gen_movcond_tl(TCG_COND_GE, ret, r1, t0, r1, temp);
+ /* overflow can only happen, if r1 = 0x80000000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(t0);
+}
+
+static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new_i32();
+ TCGv result = tcg_temp_new_i32();
+
+ tcg_gen_sub_tl(result, r1, r2);
+ tcg_gen_sub_tl(temp, r2, r1);
+ tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp);
+
+ /* calc V bit */
+ tcg_gen_xor_tl(cpu_PSW_V, result, r1);
+ tcg_gen_xor_tl(temp, result, r2);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp);
+ tcg_gen_xor_tl(temp, r1, r2);
+ tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, result, result);
+ tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* write back result */
+ tcg_gen_mov_tl(ret, result);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(result);
+}
+
+static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_absdif(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_absdif_ssov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv high = tcg_temp_new();
+ TCGv low = tcg_temp_new();
+
+ tcg_gen_muls2_tl(low, high, r1, r2);
+ tcg_gen_mov_tl(ret, low);
+ /* calc V bit */
+ tcg_gen_sari_tl(low, low, 31);
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(high);
+ tcg_temp_free(low);
+}
+
+static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_mul_i32s(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+{
+ tcg_gen_muls2_tl(ret_low, ret_high, r1, r2);
+ /* clear V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_mul_i64s(ret_low, ret_high, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2)
+{
+ tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2);
+ /* clear V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* Calc AV bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+}
+
+static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_mul_i64u(ret_low, ret_high, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_mul_ssov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_mul_suov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+/* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */
+static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_madd32_ssov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_madd32_suov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static void
+gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv_i64 temp_64 = tcg_temp_new_i64();
+ TCGv_i64 temp2_64 = tcg_temp_new_i64();
+
+ if (n == 0) {
+ if (up_shift == 32) {
+ tcg_gen_muls2_tl(rh, rl, arg1, arg2);
+ } else if (up_shift == 16) {
+ tcg_gen_ext_i32_i64(temp_64, arg1);
+ tcg_gen_ext_i32_i64(temp2_64, arg2);
+
+ tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
+ tcg_gen_shri_i64(temp_64, temp_64, up_shift);
+ tcg_gen_extr_i64_i32(rl, rh, temp_64);
+ } else {
+ tcg_gen_muls2_tl(rl, rh, arg1, arg2);
+ }
+ /* reset v bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ } else { /* n is expected to be 1 */
+ tcg_gen_ext_i32_i64(temp_64, arg1);
+ tcg_gen_ext_i32_i64(temp2_64, arg2);
+
+ tcg_gen_mul_i64(temp_64, temp_64, temp2_64);
+
+ if (up_shift == 0) {
+ tcg_gen_shli_i64(temp_64, temp_64, 1);
+ } else {
+ tcg_gen_shri_i64(temp_64, temp_64, up_shift - 1);
+ }
+ tcg_gen_extr_i64_i32(rl, rh, temp_64);
+ /* overflow only occurs if r1 = r2 = 0x8000 */
+ if (up_shift == 0) {/* result is 64 bit */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh,
+ 0x80000000);
+ } else { /* result is 32 bit */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl,
+ 0x80000000);
+ }
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc sv overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ }
+ /* calc av overflow bit */
+ if (up_shift == 0) {
+ tcg_gen_add_tl(cpu_PSW_AV, rh, rh);
+ tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV);
+ } else {
+ tcg_gen_add_tl(cpu_PSW_AV, rl, rl);
+ tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV);
+ }
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_temp_free(temp);
+ tcg_temp_free_i64(temp_64);
+ tcg_temp_free_i64(temp2_64);
+}
+
+static void
+gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ } else { /* n is expected to be 1 */
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_shli_tl(ret, ret, 1);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000);
+ tcg_gen_sub_tl(ret, ret, temp);
+ }
+ /* reset v bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc av overflow bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n)
+{
+ TCGv temp = tcg_temp_new();
+ if (n == 0) {
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_addi_tl(ret, ret, 0x8000);
+ } else {
+ tcg_gen_mul_tl(ret, arg1, arg2);
+ tcg_gen_shli_tl(ret, ret, 1);
+ tcg_gen_addi_tl(ret, ret, 0x8000);
+ /* catch special case r1 = r2 = 0x8000 */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000);
+ tcg_gen_muli_tl(temp, temp, 0x8001);
+ tcg_gen_sub_tl(ret, ret, temp);
+ }
+ /* reset v bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc av overflow bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* cut halfword off */
+ tcg_gen_andi_tl(ret, ret, 0xffff0000);
+
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_madd64_ssov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_madd64_suov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_msub32_ssov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_msub32_suov(ret, cpu_env, r1, r2, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_msub64_ssov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void
+gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ TCGv r3)
+{
+ TCGv_i64 temp64 = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(temp64, r2_low, r2_high);
+ gen_helper_msub64_suov(temp64, cpu_env, r1, temp64, r3);
+ tcg_gen_extr_i64_i32(ret_low, ret_high, temp64);
+ tcg_temp_free_i64(temp64);
+}
+
+static inline void
+gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high,
+ int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp);
+ tcg_temp_free(temp);
+}
+
+static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low)
+{
+ TCGv sat_neg = tcg_const_i32(low);
+ TCGv temp = tcg_const_i32(up);
+
+ /* sat_neg = (arg < low ) ? low : arg; */
+ tcg_gen_movcond_tl(TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg);
+
+ /* ret = (sat_neg > up ) ? up : sat_neg; */
+ tcg_gen_movcond_tl(TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg);
+
+ tcg_temp_free(sat_neg);
+ tcg_temp_free(temp);
+}
+
+static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up)
+{
+ TCGv temp = tcg_const_i32(up);
+ /* sat_neg = (arg > up ) ? up : arg; */
+ tcg_gen_movcond_tl(TCG_COND_GTU, ret, arg, temp, temp, arg);
+ tcg_temp_free(temp);
+}
+
+static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count)
+{
+ if (shift_count == -32) {
+ tcg_gen_movi_tl(ret, 0);
+ } else if (shift_count >= 0) {
+ tcg_gen_shli_tl(ret, r1, shift_count);
+ } else {
+ tcg_gen_shri_tl(ret, r1, -shift_count);
+ }
+}
+
+static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount)
+{
+ TCGv temp_low, temp_high;
+
+ if (shiftcount == -16) {
+ tcg_gen_movi_tl(ret, 0);
+ } else {
+ temp_high = tcg_temp_new();
+ temp_low = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp_low, r1, 0xffff);
+ tcg_gen_andi_tl(temp_high, r1, 0xffff0000);
+ gen_shi(temp_low, temp_low, shiftcount);
+ gen_shi(ret, temp_high, shiftcount);
+ tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16);
+
+ tcg_temp_free(temp_low);
+ tcg_temp_free(temp_high);
+ }
+}
+
+static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count)
+{
+ uint32_t msk, msk_start;
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ TCGv t_0 = tcg_const_i32(0);
+
+ if (shift_count == 0) {
+ /* Clear PSW.C and PSW.V */
+ tcg_gen_movi_tl(cpu_PSW_C, 0);
+ tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C);
+ tcg_gen_mov_tl(ret, r1);
+ } else if (shift_count == -32) {
+ /* set PSW.C */
+ tcg_gen_mov_tl(cpu_PSW_C, r1);
+ /* fill ret completely with sign bit */
+ tcg_gen_sari_tl(ret, r1, 31);
+ /* clear PSW.V */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ } else if (shift_count > 0) {
+ TCGv t_max = tcg_const_i32(0x7FFFFFFF >> shift_count);
+ TCGv t_min = tcg_const_i32(((int32_t) -0x80000000) >> shift_count);
+
+ /* calc carry */
+ msk_start = 32 - shift_count;
+ msk = ((1 << shift_count) - 1) << msk_start;
+ tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ /* calc v/sv bits */
+ tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max);
+ tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min);
+ tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV);
+ /* do shift */
+ tcg_gen_shli_tl(ret, r1, shift_count);
+
+ tcg_temp_free(t_max);
+ tcg_temp_free(t_min);
+ } else {
+ /* clear PSW.V */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* calc carry */
+ msk = (1 << -shift_count) - 1;
+ tcg_gen_andi_tl(cpu_PSW_C, r1, msk);
+ /* do shift */
+ tcg_gen_sari_tl(ret, r1, -shift_count);
+ }
+ /* calc av overflow bit */
+ tcg_gen_add_tl(cpu_PSW_AV, ret, ret);
+ tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV);
+ /* calc sav overflow bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(t_0);
+}
+
+static void gen_shas(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_sha_ssov(ret, cpu_env, r1, r2);
+}
+
+static void gen_shasi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_shas(ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count)
+{
+ TCGv low, high;
+
+ if (shift_count == 0) {
+ tcg_gen_mov_tl(ret, r1);
+ } else if (shift_count > 0) {
+ low = tcg_temp_new();
+ high = tcg_temp_new();
+
+ tcg_gen_andi_tl(high, r1, 0xffff0000);
+ tcg_gen_shli_tl(low, r1, shift_count);
+ tcg_gen_shli_tl(ret, high, shift_count);
+ tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+
+ tcg_temp_free(low);
+ tcg_temp_free(high);
+ } else {
+ low = tcg_temp_new();
+ high = tcg_temp_new();
+
+ tcg_gen_ext16s_tl(low, r1);
+ tcg_gen_sari_tl(low, low, -shift_count);
+ tcg_gen_sari_tl(ret, r1, -shift_count);
+ tcg_gen_deposit_tl(ret, ret, low, 0, 16);
+
+ tcg_temp_free(low);
+ tcg_temp_free(high);
+ }
+
+}
+
+/* ret = {ret[30:0], (r1 cond r2)}; */
+static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2)
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_shli_tl(temp, ret, 1);
+ tcg_gen_setcond_tl(cond, temp2, r1, r2);
+ tcg_gen_or_tl(ret, temp, temp2);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_sh_cond(cond, ret, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_add_ssov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_add_ssov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_helper_add_suov(ret, cpu_env, r1, temp);
+ tcg_temp_free(temp);
+}
+
+static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_sub_ssov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2)
+{
+ gen_helper_sub_suov(ret, cpu_env, r1, r2);
+}
+
+static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv, TCGv, TCGv),
+ void(*op2)(TCGv, TCGv, TCGv))
+{
+ TCGv temp1, temp2;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp2, r2, pos2);
+ tcg_gen_shri_tl(temp1, r1, pos1);
+
+ (*op1)(temp1, temp1, temp2);
+ (*op2)(temp1 , ret, temp1);
+
+ tcg_gen_deposit_tl(ret, ret, temp1, 0, 1);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+/* ret = r1[pos1] op1 r2[pos2]; */
+static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2,
+ int pos1, int pos2,
+ void(*op1)(TCGv, TCGv, TCGv))
+{
+ TCGv temp1, temp2;
+
+ temp1 = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp2, r2, pos2);
+ tcg_gen_shri_tl(temp1, r1, pos1);
+
+ (*op1)(ret, temp1, temp2);
+
+ tcg_gen_andi_tl(ret, ret, 0x1);
+
+ tcg_temp_free(temp1);
+ tcg_temp_free(temp2);
+}
+
+static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2,
+ void(*op)(TCGv, TCGv, TCGv))
+{
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+ /* temp = (arg1 cond arg2 )*/
+ tcg_gen_setcond_tl(cond, temp, r1, r2);
+ /* temp2 = ret[0]*/
+ tcg_gen_andi_tl(temp2, ret, 0x1);
+ /* temp = temp insn temp2 */
+ (*op)(temp, temp, temp2);
+ /* ret = {ret[31:1], temp} */
+ tcg_gen_deposit_tl(ret, ret, temp, 0, 1);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void
+gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con,
+ void(*op)(TCGv, TCGv, TCGv))
+{
+ TCGv temp = tcg_const_i32(con);
+ gen_accumulating_cond(cond, ret, r1, temp, op);
+ tcg_temp_free(temp);
+}
+
+/* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/
+static inline void gen_cond_w(TCGCond cond, TCGv ret, TCGv r1, TCGv r2)
+{
+ tcg_gen_setcond_tl(cond, ret, r1, r2);
+ tcg_gen_neg_tl(ret, ret);
+}
+
+static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv b0 = tcg_temp_new();
+ TCGv b1 = tcg_temp_new();
+ TCGv b2 = tcg_temp_new();
+ TCGv b3 = tcg_temp_new();
+
+ /* byte 0 */
+ tcg_gen_andi_tl(b0, r1, 0xff);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff);
+
+ /* byte 1 */
+ tcg_gen_andi_tl(b1, r1, 0xff00);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00);
+
+ /* byte 2 */
+ tcg_gen_andi_tl(b2, r1, 0xff0000);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000);
+
+ /* byte 3 */
+ tcg_gen_andi_tl(b3, r1, 0xff000000);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000);
+
+ /* combine them */
+ tcg_gen_or_tl(ret, b0, b1);
+ tcg_gen_or_tl(ret, ret, b2);
+ tcg_gen_or_tl(ret, ret, b3);
+
+ tcg_temp_free(b0);
+ tcg_temp_free(b1);
+ tcg_temp_free(b2);
+ tcg_temp_free(b3);
+}
+
+static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con)
+{
+ TCGv h0 = tcg_temp_new();
+ TCGv h1 = tcg_temp_new();
+
+ /* halfword 0 */
+ tcg_gen_andi_tl(h0, r1, 0xffff);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff);
+
+ /* halfword 1 */
+ tcg_gen_andi_tl(h1, r1, 0xffff0000);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000);
+
+ /* combine them */
+ tcg_gen_or_tl(ret, h0, h1);
+
+ tcg_temp_free(h0);
+ tcg_temp_free(h1);
+}
+/* mask = ((1 << width) -1) << pos;
+ ret = (r1 & ~mask) | (r2 << pos) & mask); */
+static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos)
+{
+ TCGv mask = tcg_temp_new();
+ TCGv temp = tcg_temp_new();
+ TCGv temp2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(mask, 1);
+ tcg_gen_shl_tl(mask, mask, width);
+ tcg_gen_subi_tl(mask, mask, 1);
+ tcg_gen_shl_tl(mask, mask, pos);
+
+ tcg_gen_shl_tl(temp, r2, pos);
+ tcg_gen_and_tl(temp, temp, mask);
+ tcg_gen_andc_tl(temp2, r1, mask);
+ tcg_gen_or_tl(ret, temp, temp2);
+
+ tcg_temp_free(mask);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ gen_helper_bsplit(temp, r1);
+ tcg_gen_extr_i64_i32(rl, rh, temp);
+
+ tcg_temp_free_i64(temp);
+}
+
+static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1)
+{
+ TCGv_i64 temp = tcg_temp_new_i64();
+
+ gen_helper_unpack(temp, r1);
+ tcg_gen_extr_i64_i32(rl, rh, temp);
+
+ tcg_temp_free_i64(temp);
+}
+
+static inline void
+gen_dvinit_b(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+{
+ TCGv_i64 ret = tcg_temp_new_i64();
+
+ if (!tricore_feature(env, TRICORE_FEATURE_131)) {
+ gen_helper_dvinit_b_13(ret, cpu_env, r1, r2);
+ } else {
+ gen_helper_dvinit_b_131(ret, cpu_env, r1, r2);
+ }
+ tcg_gen_extr_i64_i32(rl, rh, ret);
+
+ tcg_temp_free_i64(ret);
+}
+
+static inline void
+gen_dvinit_h(CPUTriCoreState *env, TCGv rl, TCGv rh, TCGv r1, TCGv r2)
+{
+ TCGv_i64 ret = tcg_temp_new_i64();
+
+ if (!tricore_feature(env, TRICORE_FEATURE_131)) {
+ gen_helper_dvinit_h_13(ret, cpu_env, r1, r2);
+ } else {
+ gen_helper_dvinit_h_131(ret, cpu_env, r1, r2);
+ }
+ tcg_gen_extr_i64_i32(rl, rh, ret);
+
+ tcg_temp_free_i64(ret);
+}
+
+static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high)
+{
+ TCGv temp = tcg_temp_new();
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, arg_low, arg_low);
+ tcg_gen_xor_tl(temp, temp, arg_low);
+ tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high);
+ tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high);
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_temp_free(temp);
+}
+
+static void gen_calc_usb_mulr_h(TCGv arg)
+{
+ TCGv temp = tcg_temp_new();
+ /* calc AV bit */
+ tcg_gen_add_tl(temp, arg, arg);
+ tcg_gen_xor_tl(temp, temp, arg);
+ tcg_gen_shli_tl(cpu_PSW_AV, temp, 16);
+ tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp);
+ /* calc SAV bit */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ /* clear V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_temp_free(temp);
+}
+
+/* helpers for generating program flow micro-ops */
+
+static inline void gen_save_pc(target_ulong pc)
+{
+ tcg_gen_movi_tl(cpu_PC, pc);
+}
+
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
+{
+ if (unlikely(ctx->singlestep_enabled)) {
+ return false;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
+{
+ if (use_goto_tb(ctx, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_save_pc(dest);
+ tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
+ } else {
+ gen_save_pc(dest);
+ if (ctx->singlestep_enabled) {
+ /* raise exception debug */
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void generate_trap(DisasContext *ctx, int class, int tin)
+{
+ TCGv_i32 classtemp = tcg_const_i32(class);
+ TCGv_i32 tintemp = tcg_const_i32(tin);
+
+ gen_save_pc(ctx->pc);
+ gen_helper_raise_exception_sync(cpu_env, classtemp, tintemp);
+ ctx->bstate = BS_EXCP;
+
+ tcg_temp_free(classtemp);
+ tcg_temp_free(tintemp);
+}
+
+static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1,
+ TCGv r2, int16_t address)
+{
+ TCGLabel *jumpLabel = gen_new_label();
+ tcg_gen_brcond_tl(cond, r1, r2, jumpLabel);
+
+ gen_goto_tb(ctx, 1, ctx->next_pc);
+
+ gen_set_label(jumpLabel);
+ gen_goto_tb(ctx, 0, ctx->pc + address * 2);
+}
+
+static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1,
+ int r2, int16_t address)
+{
+ TCGv temp = tcg_const_i32(r2);
+ gen_branch_cond(ctx, cond, r1, temp, address);
+ tcg_temp_free(temp);
+}
+
+static void gen_loop(DisasContext *ctx, int r1, int32_t offset)
+{
+ TCGLabel *l1 = gen_new_label();
+
+ tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1);
+ gen_goto_tb(ctx, 1, ctx->pc + offset);
+ gen_set_label(l1);
+ gen_goto_tb(ctx, 0, ctx->next_pc);
+}
+
+static void gen_fcall_save_ctx(DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL);
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+ tcg_gen_mov_tl(cpu_gpr_a[10], temp);
+
+ tcg_temp_free(temp);
+}
+
+static void gen_fret(DisasContext *ctx)
+{
+ TCGv temp = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4);
+ tcg_gen_mov_tl(cpu_PC, temp);
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+
+ tcg_temp_free(temp);
+}
+
+static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1,
+ int r2 , int32_t constant , int32_t offset)
+{
+ TCGv temp, temp2;
+ int n;
+
+ switch (opc) {
+/* SB-format jumps */
+ case OPC1_16_SB_J:
+ case OPC1_32_B_J:
+ gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+ break;
+ case OPC1_32_B_CALL:
+ case OPC1_16_SB_CALL:
+ gen_helper_1arg(call, ctx->next_pc);
+ gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+ break;
+ case OPC1_16_SB_JZ:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], 0, offset);
+ break;
+ case OPC1_16_SB_JNZ:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], 0, offset);
+ break;
+/* SBC-format jumps */
+ case OPC1_16_SBC_JEQ:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[15], constant, offset);
+ break;
+ case OPC1_16_SBC_JNE:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[15], constant, offset);
+ break;
+/* SBRN-format jumps */
+ case OPC1_16_SBRN_JZ_T:
+ temp = tcg_temp_new();
+ tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_16_SBRN_JNZ_T:
+ temp = tcg_temp_new();
+ tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant);
+ gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
+ tcg_temp_free(temp);
+ break;
+/* SBR-format jumps */
+ case OPC1_16_SBR_JEQ:
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15],
+ offset);
+ break;
+ case OPC1_16_SBR_JNE:
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15],
+ offset);
+ break;
+ case OPC1_16_SBR_JNZ:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JNZ_A:
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JGEZ:
+ gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JGTZ:
+ gen_branch_condi(ctx, TCG_COND_GT, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JLEZ:
+ gen_branch_condi(ctx, TCG_COND_LE, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JLTZ:
+ gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JZ:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_JZ_A:
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
+ break;
+ case OPC1_16_SBR_LOOP:
+ gen_loop(ctx, r1, offset * 2 - 32);
+ break;
+/* SR-format jumps */
+ case OPC1_16_SR_JI:
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe);
+ tcg_gen_exit_tb(0);
+ break;
+ case OPC2_32_SYS_RET:
+ case OPC2_16_SR_RET:
+ gen_helper_ret(cpu_env);
+ tcg_gen_exit_tb(0);
+ break;
+/* B-format */
+ case OPC1_32_B_CALLA:
+ gen_helper_1arg(call, ctx->next_pc);
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
+ case OPC1_32_B_FCALL:
+ gen_fcall_save_ctx(ctx);
+ gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+ break;
+ case OPC1_32_B_FCALLA:
+ gen_fcall_save_ctx(ctx);
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
+ case OPC1_32_B_JLA:
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+ /* fall through */
+ case OPC1_32_B_JA:
+ gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset));
+ break;
+ case OPC1_32_B_JL:
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+ gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+ break;
+/* BOL format */
+ case OPCM_32_BRC_EQ_NEQ:
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) {
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_d[r1], constant, offset);
+ } else {
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_d[r1], constant, offset);
+ }
+ break;
+ case OPCM_32_BRC_GE:
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) {
+ gen_branch_condi(ctx, TCG_COND_GE, cpu_gpr_d[r1], constant, offset);
+ } else {
+ constant = MASK_OP_BRC_CONST4(ctx->opcode);
+ gen_branch_condi(ctx, TCG_COND_GEU, cpu_gpr_d[r1], constant,
+ offset);
+ }
+ break;
+ case OPCM_32_BRC_JLT:
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) {
+ gen_branch_condi(ctx, TCG_COND_LT, cpu_gpr_d[r1], constant, offset);
+ } else {
+ constant = MASK_OP_BRC_CONST4(ctx->opcode);
+ gen_branch_condi(ctx, TCG_COND_LTU, cpu_gpr_d[r1], constant,
+ offset);
+ }
+ break;
+ case OPCM_32_BRC_JNE:
+ temp = tcg_temp_new();
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* subi is unconditional */
+ tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* addi is unconditional */
+ tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset);
+ }
+ tcg_temp_free(temp);
+ break;
+/* BRN format */
+ case OPCM_32_BRN_JTT:
+ n = MASK_OP_BRN_N(ctx->opcode);
+
+ temp = tcg_temp_new();
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n));
+
+ if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) {
+ gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset);
+ } else {
+ gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset);
+ }
+ tcg_temp_free(temp);
+ break;
+/* BRR Format */
+ case OPCM_32_BRR_EQ_NEQ:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) {
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_ADDR_EQ_NEQ:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) {
+ gen_branch_cond(ctx, TCG_COND_EQ, cpu_gpr_a[r1], cpu_gpr_a[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_NE, cpu_gpr_a[r1], cpu_gpr_a[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_GE:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) {
+ gen_branch_cond(ctx, TCG_COND_GE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_GEU, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_JLT:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) {
+ gen_branch_cond(ctx, TCG_COND_LT, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ } else {
+ gen_branch_cond(ctx, TCG_COND_LTU, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ offset);
+ }
+ break;
+ case OPCM_32_BRR_LOOP:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) {
+ gen_loop(ctx, r2, offset * 2);
+ } else {
+ /* OPC2_32_BRR_LOOPU */
+ gen_goto_tb(ctx, 0, ctx->pc + offset * 2);
+ }
+ break;
+ case OPCM_32_BRR_JNE:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* also save r2, in case of r1 == r2, so r2 is not decremented */
+ tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ /* subi is unconditional */
+ tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
+ } else {
+ tcg_gen_mov_tl(temp, cpu_gpr_d[r1]);
+ /* also save r2, in case of r1 == r2, so r2 is not decremented */
+ tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]);
+ /* addi is unconditional */
+ tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1);
+ gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPCM_32_BRR_JNZ:
+ if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) {
+ gen_branch_condi(ctx, TCG_COND_NE, cpu_gpr_a[r1], 0, offset);
+ } else {
+ gen_branch_condi(ctx, TCG_COND_EQ, cpu_gpr_a[r1], 0, offset);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ ctx->bstate = BS_BRANCH;
+}
+
+
+/*
+ * Functions for decoding instructions
+ */
+
+static void decode_src_opc(CPUTriCoreState *env, DisasContext *ctx, int op1)
+{
+ int r1;
+ int32_t const4;
+ TCGv temp, temp2;
+
+ r1 = MASK_OP_SRC_S1D(ctx->opcode);
+ const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SRC_ADD:
+ gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_ADD_A15:
+ gen_addi_d(cpu_gpr_d[r1], cpu_gpr_d[15], const4);
+ break;
+ case OPC1_16_SRC_ADD_15A:
+ gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_ADD_A:
+ tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4);
+ break;
+ case OPC1_16_SRC_CADD:
+ gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
+ cpu_gpr_d[15]);
+ break;
+ case OPC1_16_SRC_CADDN:
+ gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const4, cpu_gpr_d[r1],
+ cpu_gpr_d[15]);
+ break;
+ case OPC1_16_SRC_CMOV:
+ temp = tcg_const_tl(0);
+ temp2 = tcg_const_tl(const4);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp2, cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC1_16_SRC_CMOVN:
+ temp = tcg_const_tl(0);
+ temp2 = tcg_const_tl(const4);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ temp2, cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC1_16_SRC_EQ:
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ const4);
+ break;
+ case OPC1_16_SRC_LT:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ const4);
+ break;
+ case OPC1_16_SRC_MOV:
+ tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_MOV_A:
+ const4 = MASK_OP_SRC_CONST4(ctx->opcode);
+ tcg_gen_movi_tl(cpu_gpr_a[r1], const4);
+ break;
+ case OPC1_16_SRC_MOV_E:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ tcg_gen_movi_tl(cpu_gpr_d[r1], const4);
+ tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_16_SRC_SH:
+ gen_shi(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
+ break;
+ case OPC1_16_SRC_SHA:
+ gen_shaci(cpu_gpr_d[r1], cpu_gpr_d[r1], const4);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_srr_opc(DisasContext *ctx, int op1)
+{
+ int r1, r2;
+ TCGv temp;
+
+ r1 = MASK_OP_SRR_S1D(ctx->opcode);
+ r2 = MASK_OP_SRR_S2(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SRR_ADD:
+ gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_ADD_A15:
+ gen_add_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_ADD_15A:
+ gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_ADD_A:
+ tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC1_16_SRR_ADDS:
+ gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_AND:
+ tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_CMOV:
+ temp = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ cpu_gpr_d[r2], cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_16_SRR_CMOVN:
+ temp = tcg_const_tl(0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp,
+ cpu_gpr_d[r2], cpu_gpr_d[r1]);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_16_SRR_EQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_LT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_MOV:
+ tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_MOV_A:
+ tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_MOV_AA:
+ tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC1_16_SRR_MOV_D:
+ tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC1_16_SRR_MUL:
+ gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_OR:
+ tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUB:
+ gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUB_A15B:
+ gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[15], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUB_15AB:
+ gen_sub_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_SUBS:
+ gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC1_16_SRR_XOR:
+ tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_ssr_opc(DisasContext *ctx, int op1)
+{
+ int r1, r2;
+
+ r1 = MASK_OP_SSR_S1(ctx->opcode);
+ r2 = MASK_OP_SSR_S2(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SSR_ST_A:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ break;
+ case OPC1_16_SSR_ST_A_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ case OPC1_16_SSR_ST_B:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ break;
+ case OPC1_16_SSR_ST_B_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ break;
+ case OPC1_16_SSR_ST_H:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ break;
+ case OPC1_16_SSR_ST_H_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ break;
+ case OPC1_16_SSR_ST_W:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ break;
+ case OPC1_16_SSR_ST_W_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sc_opc(DisasContext *ctx, int op1)
+{
+ int32_t const16;
+
+ const16 = MASK_OP_SC_CONST8(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_16_SC_AND:
+ tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ break;
+ case OPC1_16_SC_BISR:
+ gen_helper_1arg(bisr, const16 & 0xff);
+ break;
+ case OPC1_16_SC_LD_A:
+ gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_LD_W:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_MOV:
+ tcg_gen_movi_tl(cpu_gpr_d[15], const16);
+ break;
+ case OPC1_16_SC_OR:
+ tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16);
+ break;
+ case OPC1_16_SC_ST_A:
+ gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_ST_W:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SC_SUB_A:
+ tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_slr_opc(DisasContext *ctx, int op1)
+{
+ int r1, r2;
+
+ r1 = MASK_OP_SLR_D(ctx->opcode);
+ r2 = MASK_OP_SLR_S2(ctx->opcode);
+
+ switch (op1) {
+/* SLR-format */
+ case OPC1_16_SLR_LD_A:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ break;
+ case OPC1_16_SLR_LD_A_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ case OPC1_16_SLR_LD_BU:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ break;
+ case OPC1_16_SLR_LD_BU_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1);
+ break;
+ case OPC1_16_SLR_LD_H:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ break;
+ case OPC1_16_SLR_LD_H_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2);
+ break;
+ case OPC1_16_SLR_LD_W:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ break;
+ case OPC1_16_SLR_LD_W_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sro_opc(DisasContext *ctx, int op1)
+{
+ int r2;
+ int32_t address;
+
+ r2 = MASK_OP_SRO_S2(ctx->opcode);
+ address = MASK_OP_SRO_OFF4(ctx->opcode);
+
+/* SRO-format */
+ switch (op1) {
+ case OPC1_16_SRO_LD_A:
+ gen_offset_ld(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ case OPC1_16_SRO_LD_BU:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
+ break;
+ case OPC1_16_SRO_LD_H:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_LESW);
+ break;
+ case OPC1_16_SRO_LD_W:
+ gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ case OPC1_16_SRO_ST_A:
+ gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ case OPC1_16_SRO_ST_B:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address, MO_UB);
+ break;
+ case OPC1_16_SRO_ST_H:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 2, MO_LESW);
+ break;
+ case OPC1_16_SRO_ST_W:
+ gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[r2], address * 4, MO_LESL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sr_system(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ op2 = MASK_OP_SR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_16_SR_NOP:
+ break;
+ case OPC2_16_SR_RET:
+ gen_compute_branch(ctx, op2, 0, 0, 0, 0);
+ break;
+ case OPC2_16_SR_RFE:
+ gen_helper_rfe(cpu_env);
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+ break;
+ case OPC2_16_SR_DEBUG:
+ /* raise EXCP_DEBUG */
+ break;
+ case OPC2_16_SR_FRET:
+ gen_fret(ctx);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_sr_accu(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1;
+ TCGv temp;
+
+ r1 = MASK_OP_SR_S1D(ctx->opcode);
+ op2 = MASK_OP_SR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_16_SR_RSUB:
+ /* overflow only if r1 = -0x80000000 */
+ temp = tcg_const_i32(-0x80000000);
+ /* calc V bit */
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], temp);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* calc SV bit */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* sub */
+ tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ /* calc av */
+ tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV);
+ /* calc sav */
+ tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_16_SR_SAT_B:
+ gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80);
+ break;
+ case OPC2_16_SR_SAT_BU:
+ gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xff);
+ break;
+ case OPC2_16_SR_SAT_H:
+ gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7fff, -0x8000);
+ break;
+ case OPC2_16_SR_SAT_HU:
+ gen_saturate_u(cpu_gpr_d[r1], cpu_gpr_d[r1], 0xffff);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_16Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int op1;
+ int r1, r2;
+ int32_t const16;
+ int32_t address;
+ TCGv temp;
+
+ op1 = MASK_OP_MAJOR(ctx->opcode);
+
+ /* handle ADDSC.A opcode only being 6 bit long */
+ if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) {
+ op1 = OPC1_16_SRRS_ADDSC_A;
+ }
+
+ switch (op1) {
+ case OPC1_16_SRC_ADD:
+ case OPC1_16_SRC_ADD_A15:
+ case OPC1_16_SRC_ADD_15A:
+ case OPC1_16_SRC_ADD_A:
+ case OPC1_16_SRC_CADD:
+ case OPC1_16_SRC_CADDN:
+ case OPC1_16_SRC_CMOV:
+ case OPC1_16_SRC_CMOVN:
+ case OPC1_16_SRC_EQ:
+ case OPC1_16_SRC_LT:
+ case OPC1_16_SRC_MOV:
+ case OPC1_16_SRC_MOV_A:
+ case OPC1_16_SRC_MOV_E:
+ case OPC1_16_SRC_SH:
+ case OPC1_16_SRC_SHA:
+ decode_src_opc(env, ctx, op1);
+ break;
+/* SRR-format */
+ case OPC1_16_SRR_ADD:
+ case OPC1_16_SRR_ADD_A15:
+ case OPC1_16_SRR_ADD_15A:
+ case OPC1_16_SRR_ADD_A:
+ case OPC1_16_SRR_ADDS:
+ case OPC1_16_SRR_AND:
+ case OPC1_16_SRR_CMOV:
+ case OPC1_16_SRR_CMOVN:
+ case OPC1_16_SRR_EQ:
+ case OPC1_16_SRR_LT:
+ case OPC1_16_SRR_MOV:
+ case OPC1_16_SRR_MOV_A:
+ case OPC1_16_SRR_MOV_AA:
+ case OPC1_16_SRR_MOV_D:
+ case OPC1_16_SRR_MUL:
+ case OPC1_16_SRR_OR:
+ case OPC1_16_SRR_SUB:
+ case OPC1_16_SRR_SUB_A15B:
+ case OPC1_16_SRR_SUB_15AB:
+ case OPC1_16_SRR_SUBS:
+ case OPC1_16_SRR_XOR:
+ decode_srr_opc(ctx, op1);
+ break;
+/* SSR-format */
+ case OPC1_16_SSR_ST_A:
+ case OPC1_16_SSR_ST_A_POSTINC:
+ case OPC1_16_SSR_ST_B:
+ case OPC1_16_SSR_ST_B_POSTINC:
+ case OPC1_16_SSR_ST_H:
+ case OPC1_16_SSR_ST_H_POSTINC:
+ case OPC1_16_SSR_ST_W:
+ case OPC1_16_SSR_ST_W_POSTINC:
+ decode_ssr_opc(ctx, op1);
+ break;
+/* SRRS-format */
+ case OPC1_16_SRRS_ADDSC_A:
+ r2 = MASK_OP_SRRS_S2(ctx->opcode);
+ r1 = MASK_OP_SRRS_S1D(ctx->opcode);
+ const16 = MASK_OP_SRRS_N(ctx->opcode);
+ temp = tcg_temp_new();
+ tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16);
+ tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+/* SLRO-format */
+ case OPC1_16_SLRO_LD_A:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SLRO_LD_BU:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
+ break;
+ case OPC1_16_SLRO_LD_H:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
+ break;
+ case OPC1_16_SLRO_LD_W:
+ r1 = MASK_OP_SLRO_D(ctx->opcode);
+ const16 = MASK_OP_SLRO_OFF4(ctx->opcode);
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+/* SB-format */
+ case OPC1_16_SB_CALL:
+ case OPC1_16_SB_J:
+ case OPC1_16_SB_JNZ:
+ case OPC1_16_SB_JZ:
+ address = MASK_OP_SB_DISP8_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, 0, address);
+ break;
+/* SBC-format */
+ case OPC1_16_SBC_JEQ:
+ case OPC1_16_SBC_JNE:
+ address = MASK_OP_SBC_DISP4(ctx->opcode);
+ const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, const16, address);
+ break;
+/* SBRN-format */
+ case OPC1_16_SBRN_JNZ_T:
+ case OPC1_16_SBRN_JZ_T:
+ address = MASK_OP_SBRN_DISP4(ctx->opcode);
+ const16 = MASK_OP_SBRN_N(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, const16, address);
+ break;
+/* SBR-format */
+ case OPC1_16_SBR_JEQ:
+ case OPC1_16_SBR_JGEZ:
+ case OPC1_16_SBR_JGTZ:
+ case OPC1_16_SBR_JLEZ:
+ case OPC1_16_SBR_JLTZ:
+ case OPC1_16_SBR_JNE:
+ case OPC1_16_SBR_JNZ:
+ case OPC1_16_SBR_JNZ_A:
+ case OPC1_16_SBR_JZ:
+ case OPC1_16_SBR_JZ_A:
+ case OPC1_16_SBR_LOOP:
+ r1 = MASK_OP_SBR_S2(ctx->opcode);
+ address = MASK_OP_SBR_DISP4(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, address);
+ break;
+/* SC-format */
+ case OPC1_16_SC_AND:
+ case OPC1_16_SC_BISR:
+ case OPC1_16_SC_LD_A:
+ case OPC1_16_SC_LD_W:
+ case OPC1_16_SC_MOV:
+ case OPC1_16_SC_OR:
+ case OPC1_16_SC_ST_A:
+ case OPC1_16_SC_ST_W:
+ case OPC1_16_SC_SUB_A:
+ decode_sc_opc(ctx, op1);
+ break;
+/* SLR-format */
+ case OPC1_16_SLR_LD_A:
+ case OPC1_16_SLR_LD_A_POSTINC:
+ case OPC1_16_SLR_LD_BU:
+ case OPC1_16_SLR_LD_BU_POSTINC:
+ case OPC1_16_SLR_LD_H:
+ case OPC1_16_SLR_LD_H_POSTINC:
+ case OPC1_16_SLR_LD_W:
+ case OPC1_16_SLR_LD_W_POSTINC:
+ decode_slr_opc(ctx, op1);
+ break;
+/* SRO-format */
+ case OPC1_16_SRO_LD_A:
+ case OPC1_16_SRO_LD_BU:
+ case OPC1_16_SRO_LD_H:
+ case OPC1_16_SRO_LD_W:
+ case OPC1_16_SRO_ST_A:
+ case OPC1_16_SRO_ST_B:
+ case OPC1_16_SRO_ST_H:
+ case OPC1_16_SRO_ST_W:
+ decode_sro_opc(ctx, op1);
+ break;
+/* SSRO-format */
+ case OPC1_16_SSRO_ST_A:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+ case OPC1_16_SSRO_ST_B:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16, MO_UB);
+ break;
+ case OPC1_16_SSRO_ST_H:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 2, MO_LESW);
+ break;
+ case OPC1_16_SSRO_ST_W:
+ r1 = MASK_OP_SSRO_S1(ctx->opcode);
+ const16 = MASK_OP_SSRO_OFF4(ctx->opcode);
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[15], const16 * 4, MO_LESL);
+ break;
+/* SR-format */
+ case OPCM_16_SR_SYSTEM:
+ decode_sr_system(env, ctx);
+ break;
+ case OPCM_16_SR_ACCU:
+ decode_sr_accu(env, ctx);
+ break;
+ case OPC1_16_SR_JI:
+ r1 = MASK_OP_SR_S1D(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, 0);
+ break;
+ case OPC1_16_SR_NOT:
+ r1 = MASK_OP_SR_S1D(ctx->opcode);
+ tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/*
+ * 32 bit instructions
+ */
+
+/* ABS-format */
+static void decode_abs_ldw(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_LD_A:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ case OPC2_32_ABS_LD_D:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_LD_DA:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_LD_W:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_ldb(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_LD_B:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB);
+ break;
+ case OPC2_32_ABS_LD_BU:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ break;
+ case OPC2_32_ABS_LD_H:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW);
+ break;
+ case OPC2_32_ABS_LD_HU:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_ldst_swap(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_LDMST:
+ gen_ldmst(ctx, r1, temp);
+ break;
+ case OPC2_32_ABS_SWAP_W:
+ gen_swap(ctx, r1, temp);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_ldst_context(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int32_t off18;
+
+ off18 = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_ABS_LDLCX:
+ gen_helper_1arg(ldlcx, EA_ABS_FORMAT(off18));
+ break;
+ case OPC2_32_ABS_LDUCX:
+ gen_helper_1arg(lducx, EA_ABS_FORMAT(off18));
+ break;
+ case OPC2_32_ABS_STLCX:
+ gen_helper_1arg(stlcx, EA_ABS_FORMAT(off18));
+ break;
+ case OPC2_32_ABS_STUCX:
+ gen_helper_1arg(stucx, EA_ABS_FORMAT(off18));
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_abs_store(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_ST_A:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ case OPC2_32_ABS_ST_D:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_ST_DA:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ break;
+ case OPC2_32_ABS_ST_W:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_abs_storeb_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int32_t op2;
+ int32_t r1;
+ uint32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ op2 = MASK_OP_ABS_OP2(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ switch (op2) {
+ case OPC2_32_ABS_ST_B:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB);
+ break;
+ case OPC2_32_ABS_ST_H:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+/* Bit-format */
+
+static void decode_bit_andacc(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+
+
+ switch (op2) {
+ case OPC2_32_BIT_AND_AND_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_AND_ANDN_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_AND_NOR_T:
+ if (TCG_TARGET_HAS_andc_i32) {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl);
+ } else {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl);
+ }
+ break;
+ case OPC2_32_BIT_AND_OR_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_logical_t(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BIT_AND_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_ANDN_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl);
+ break;
+ case OPC2_32_BIT_NOR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl);
+ break;
+ case OPC2_32_BIT_OR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ TCGv temp;
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2);
+ if (op2 == OPC2_32_BIT_INSN_T) {
+ tcg_gen_not_tl(temp, temp);
+ }
+ tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1);
+ tcg_temp_free(temp);
+}
+
+static void decode_bit_logical_t2(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3;
+ int pos1, pos2;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BIT_NAND_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nand_tl);
+ break;
+ case OPC2_32_BIT_ORN_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_orc_tl);
+ break;
+ case OPC2_32_BIT_XNOR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_eqv_tl);
+ break;
+ case OPC2_32_BIT_XOR_T:
+ gen_bit_1op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_orand(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3;
+ int pos1, pos2;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BIT_OR_AND_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_BIT_OR_ANDN_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_BIT_OR_NOR_T:
+ if (TCG_TARGET_HAS_orc_i32) {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl);
+ } else {
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl);
+ }
+ break;
+ case OPC2_32_BIT_OR_OR_T:
+ gen_bit_2op(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bit_sh_logic1(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ TCGv temp;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_BIT_SH_AND_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_BIT_SH_ANDN_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_andc_tl);
+ break;
+ case OPC2_32_BIT_SH_NOR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_nor_tl);
+ break;
+ case OPC2_32_BIT_SH_OR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_or_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_temp_free(temp);
+}
+
+static void decode_bit_sh_logic2(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int pos1, pos2;
+ TCGv temp;
+
+ op2 = MASK_OP_BIT_OP2(ctx->opcode);
+ r1 = MASK_OP_BIT_S1(ctx->opcode);
+ r2 = MASK_OP_BIT_S2(ctx->opcode);
+ r3 = MASK_OP_BIT_D(ctx->opcode);
+ pos1 = MASK_OP_BIT_POS1(ctx->opcode);
+ pos2 = MASK_OP_BIT_POS2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_BIT_SH_NAND_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1] , cpu_gpr_d[r2] ,
+ pos1, pos2, &tcg_gen_nand_tl);
+ break;
+ case OPC2_32_BIT_SH_ORN_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_orc_tl);
+ break;
+ case OPC2_32_BIT_SH_XNOR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_eqv_tl);
+ break;
+ case OPC2_32_BIT_SH_XOR_T:
+ gen_bit_1op(temp, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ pos1, pos2, &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1);
+ tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_temp_free(temp);
+}
+
+/* BO-format */
+
+
+static void decode_bo_addrmode_post_pre_base(CPUTriCoreState *env,
+ DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int32_t r1, r2;
+ TCGv temp;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BO_CACHEA_WI_SHORTOFF:
+ case OPC2_32_BO_CACHEA_W_SHORTOFF:
+ case OPC2_32_BO_CACHEA_I_SHORTOFF:
+ /* instruction to access the cache */
+ break;
+ case OPC2_32_BO_CACHEA_WI_POSTINC:
+ case OPC2_32_BO_CACHEA_W_POSTINC:
+ case OPC2_32_BO_CACHEA_I_POSTINC:
+ /* instruction to access the cache, but we still need to handle
+ the addressing mode */
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CACHEA_WI_PREINC:
+ case OPC2_32_BO_CACHEA_W_PREINC:
+ case OPC2_32_BO_CACHEA_I_PREINC:
+ /* instruction to access the cache, but we still need to handle
+ the addressing mode */
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CACHEI_WI_SHORTOFF:
+ case OPC2_32_BO_CACHEI_W_SHORTOFF:
+ if (!tricore_feature(env, TRICORE_FEATURE_131)) {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_BO_CACHEI_W_POSTINC:
+ case OPC2_32_BO_CACHEI_WI_POSTINC:
+ if (tricore_feature(env, TRICORE_FEATURE_131)) {
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_BO_CACHEI_W_PREINC:
+ case OPC2_32_BO_CACHEI_WI_PREINC:
+ if (tricore_feature(env, TRICORE_FEATURE_131)) {
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_BO_ST_A_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
+ break;
+ case OPC2_32_BO_ST_A_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LESL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_A_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL);
+ break;
+ case OPC2_32_BO_ST_B_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+ break;
+ case OPC2_32_BO_ST_B_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_B_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+ break;
+ case OPC2_32_BO_ST_D_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_ST_D_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_D_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_DA_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_ST_DA_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_DA_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_H_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_ST_H_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_H_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_ST_Q_SHORTOFF:
+ temp = tcg_temp_new();
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_Q_POSTINC:
+ temp = tcg_temp_new();
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_Q_PREINC:
+ temp = tcg_temp_new();
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_ST_W_SHORTOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_ST_W_POSTINC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_ST_W_PREINC:
+ gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bo_addrmode_bitreverse_circular(CPUTriCoreState *env,
+ DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int32_t r1, r2;
+ TCGv temp, temp2, temp3;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_const_i32(off10);
+ CHECK_REG_PAIR(r2);
+ tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+ switch (op2) {
+ case OPC2_32_BO_CACHEA_WI_BR:
+ case OPC2_32_BO_CACHEA_W_BR:
+ case OPC2_32_BO_CACHEA_I_BR:
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_CACHEA_WI_CIRC:
+ case OPC2_32_BO_CACHEA_W_CIRC:
+ case OPC2_32_BO_CACHEA_I_CIRC:
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_A_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_A_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_B_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_B_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_D_BR:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_D_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_DA_BR:
+ CHECK_REG_PAIR(r1);
+ gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_DA_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_H_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_H_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_Q_BR:
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_Q_CIRC:
+ tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_ST_W_BR:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_ST_W_CIRC:
+ tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+static void decode_bo_addrmode_ld_post_pre_base(CPUTriCoreState *env,
+ DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int32_t r1, r2;
+ TCGv temp;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_BO_LD_A_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_LD_A_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_A_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_LD_B_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+ break;
+ case OPC2_32_BO_LD_B_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_SB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_B_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+ break;
+ case OPC2_32_BO_LD_BU_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB);
+ break;
+ case OPC2_32_BO_LD_BU_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_UB);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_BU_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB);
+ break;
+ case OPC2_32_BO_LD_D_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_LD_D_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_D_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_LD_DA_SHORTOFF:
+ CHECK_REG_PAIR(r1);
+ gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2],
+ off10, ctx);
+ break;
+ case OPC2_32_BO_LD_DA_POSTINC:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_DA_PREINC:
+ CHECK_REG_PAIR(r1);
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx);
+ tcg_gen_mov_tl(cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_BO_LD_H_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
+ break;
+ case OPC2_32_BO_LD_H_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LESW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_H_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW);
+ break;
+ case OPC2_32_BO_LD_HU_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_LD_HU_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_HU_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ break;
+ case OPC2_32_BO_LD_Q_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ break;
+ case OPC2_32_BO_LD_Q_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_Q_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ break;
+ case OPC2_32_BO_LD_W_SHORTOFF:
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ case OPC2_32_BO_LD_W_POSTINC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx,
+ MO_LEUL);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LD_W_PREINC:
+ gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_bo_addrmode_ld_bitreverse_circular(CPUTriCoreState *env,
+ DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int r1, r2;
+
+ TCGv temp, temp2, temp3;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_const_i32(off10);
+ CHECK_REG_PAIR(r2);
+ tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+
+ switch (op2) {
+ case OPC2_32_BO_LD_A_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_A_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_B_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_B_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_BU_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_BU_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_D_BR:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_D_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_DA_BR:
+ CHECK_REG_PAIR(r1);
+ gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_DA_CIRC:
+ CHECK_REG_PAIR(r1);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL);
+ tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16);
+ tcg_gen_addi_tl(temp, temp, 4);
+ tcg_gen_rem_tl(temp, temp, temp2);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_H_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_H_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_HU_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_HU_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_Q_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_Q_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_LD_W_BR:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LD_W_CIRC:
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+static void decode_bo_addrmode_stctx_post_pre_base(CPUTriCoreState *env,
+ DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int r1, r2;
+
+ TCGv temp, temp2;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_BO_LDLCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_ldlcx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_LDMST_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_ldmst(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_LDMST_POSTINC:
+ gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_LDMST_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_ldmst(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_LDUCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_lducx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_LEA_SHORTOFF:
+ tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_STLCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_stlcx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_STUCX_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_helper_stucx(cpu_env, temp);
+ break;
+ case OPC2_32_BO_SWAP_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_swap(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_SWAP_W_POSTINC:
+ gen_swap(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_SWAP_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_swap(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_cmpswap(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_POSTINC:
+ gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_cmpswap(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_SHORTOFF:
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10);
+ gen_swapmsk(ctx, r1, temp);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_POSTINC:
+ gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_PREINC:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10);
+ gen_swapmsk(ctx, r1, cpu_gpr_a[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void decode_bo_addrmode_ldmst_bitreverse_circular(CPUTriCoreState *env,
+ DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t off10;
+ int r1, r2;
+
+ TCGv temp, temp2, temp3;
+
+ r1 = MASK_OP_BO_S1D(ctx->opcode);
+ r2 = MASK_OP_BO_S2(ctx->opcode);
+ off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode);
+ op2 = MASK_OP_BO_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_const_i32(off10);
+ CHECK_REG_PAIR(r2);
+ tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]);
+ tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp);
+
+ switch (op2) {
+ case OPC2_32_BO_LDMST_BR:
+ gen_ldmst(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_LDMST_CIRC:
+ gen_ldmst(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_SWAP_W_BR:
+ gen_swap(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_SWAP_W_CIRC:
+ gen_swap(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_BR:
+ gen_cmpswap(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_CMPSWAP_W_CIRC:
+ gen_cmpswap(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_BR:
+ gen_swapmsk(ctx, r1, temp2);
+ gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]);
+ break;
+ case OPC2_32_BO_SWAPMSK_W_CIRC:
+ gen_swapmsk(ctx, r1, temp2);
+ gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+}
+
+static void decode_bol_opc(CPUTriCoreState *env, DisasContext *ctx, int32_t op1)
+{
+ int r1, r2;
+ int32_t address;
+ TCGv temp;
+
+ r1 = MASK_OP_BOL_S1D(ctx->opcode);
+ r2 = MASK_OP_BOL_S2(ctx->opcode);
+ address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_32_BOL_LD_A_LONGOFF:
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_BOL_LD_W_LONGOFF:
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address);
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_BOL_LEA_LONGOFF:
+ tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address);
+ break;
+ case OPC1_32_BOL_ST_A_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], address, MO_LEUL);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_ST_W_LONGOFF:
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUL);
+ break;
+ case OPC1_32_BOL_LD_B_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_LD_BU_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_UB);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_LD_H_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_LD_HU_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LEUW);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_ST_B_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_SB);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_BOL_ST_H_LONGOFF:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], address, MO_LESW);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RC format */
+static void decode_rc_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int32_t const9;
+ TCGv temp;
+
+ r2 = MASK_OP_RC_D(ctx->opcode);
+ r1 = MASK_OP_RC_S1(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RC_AND:
+ tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ANDN:
+ tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ break;
+ case OPC2_32_RC_NAND:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_NOR:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_OR:
+ tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ORN:
+ tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9);
+ break;
+ case OPC2_32_RC_SH:
+ const9 = sextract32(const9, 0, 6);
+ gen_shi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_H:
+ const9 = sextract32(const9, 0, 5);
+ gen_sh_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SHA:
+ const9 = sextract32(const9, 0, 6);
+ gen_shaci(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SHA_H:
+ const9 = sextract32(const9, 0, 5);
+ gen_sha_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SHAS:
+ gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_XNOR:
+ tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RC_XOR:
+ tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_rc_accumulator(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int16_t const9;
+
+ TCGv temp;
+
+ r2 = MASK_OP_RC_D(ctx->opcode);
+ r1 = MASK_OP_RC_S1(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
+
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RC_ABSDIF:
+ gen_absdifi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ABSDIFS:
+ gen_absdifsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADD:
+ gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDC:
+ gen_addci_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDS:
+ gen_addsi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDS_U:
+ gen_addsui(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_ADDX:
+ gen_addi_CC(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_AND_EQ:
+ gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_GE:
+ gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_LT:
+ gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_AND_NE:
+ gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RC_EQ:
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_EQANY_B:
+ gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_EQANY_H:
+ gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_GE:
+ tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_LT:
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MAX:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_MAX_U:
+ tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_MIN:
+ tcg_gen_movi_tl(temp, const9);
+ tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_MIN_U:
+ tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode));
+ tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp,
+ cpu_gpr_d[r1], temp);
+ break;
+ case OPC2_32_RC_NE:
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_OR_EQ:
+ gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_GE:
+ gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_LT:
+ gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_OR_NE:
+ gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RC_RSUB:
+ tcg_gen_movi_tl(temp, const9);
+ gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RC_RSUBS:
+ tcg_gen_movi_tl(temp, const9);
+ gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RC_RSUBS_U:
+ tcg_gen_movi_tl(temp, const9);
+ gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RC_SH_EQ:
+ gen_sh_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_GE:
+ gen_sh_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_sh_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_LT:
+ gen_sh_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_sh_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_SH_NE:
+ gen_sh_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_XOR_EQ:
+ gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_GE:
+ gen_accumulating_condi(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_GE_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_LT:
+ gen_accumulating_condi(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_LT_U:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_accumulating_condi(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RC_XOR_NE:
+ gen_accumulating_condi(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1],
+ const9, &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_rc_serviceroutine(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t const9;
+
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RC_BISR:
+ gen_helper_1arg(bisr, const9);
+ break;
+ case OPC2_32_RC_SYSCALL:
+ /* TODO: Add exception generation */
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rc_mul(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int16_t const9;
+
+ r2 = MASK_OP_RC_D(ctx->opcode);
+ r1 = MASK_OP_RC_S1(ctx->opcode);
+ const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode);
+
+ op2 = MASK_OP_RC_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RC_MUL_32:
+ gen_muli_i32s(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MUL_64:
+ CHECK_REG_PAIR(r2);
+ gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MULS_32:
+ gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MUL_U_64:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ CHECK_REG_PAIR(r2);
+ gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9);
+ break;
+ case OPC2_32_RC_MULS_U_32:
+ const9 = MASK_OP_RC_CONST9(ctx->opcode);
+ gen_mulsui_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RCPW format */
+static void decode_rcpw_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2;
+ int32_t pos, width, const4;
+
+ TCGv temp;
+
+ op2 = MASK_OP_RCPW_OP2(ctx->opcode);
+ r1 = MASK_OP_RCPW_S1(ctx->opcode);
+ r2 = MASK_OP_RCPW_D(ctx->opcode);
+ const4 = MASK_OP_RCPW_CONST4(ctx->opcode);
+ width = MASK_OP_RCPW_WIDTH(ctx->opcode);
+ pos = MASK_OP_RCPW_POS(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCPW_IMASK:
+ CHECK_REG_PAIR(r2);
+ /* if pos + width > 31 undefined result */
+ if (pos + width <= 31) {
+ tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos);
+ tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos));
+ }
+ break;
+ case OPC2_32_RCPW_INSERT:
+ /* if pos + width > 32 undefined result */
+ if (pos + width <= 32) {
+ temp = tcg_const_i32(const4);
+ tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width);
+ tcg_temp_free(temp);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RCRW format */
+
+static void decode_rcrw_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t width, const4;
+
+ TCGv temp, temp2, temp3;
+
+ op2 = MASK_OP_RCRW_OP2(ctx->opcode);
+ r1 = MASK_OP_RCRW_S1(ctx->opcode);
+ r3 = MASK_OP_RCRW_S3(ctx->opcode);
+ r4 = MASK_OP_RCRW_D(ctx->opcode);
+ width = MASK_OP_RCRW_WIDTH(ctx->opcode);
+ const4 = MASK_OP_RCRW_CONST4(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RCRW_IMASK:
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r4], 0x1f);
+ tcg_gen_movi_tl(temp2, (1 << width) - 1);
+ tcg_gen_shl_tl(cpu_gpr_d[r3 + 1], temp2, temp);
+ tcg_gen_movi_tl(temp2, const4);
+ tcg_gen_shl_tl(cpu_gpr_d[r3], temp2, temp);
+ break;
+ case OPC2_32_RCRW_INSERT:
+ temp3 = tcg_temp_new();
+
+ tcg_gen_movi_tl(temp, width);
+ tcg_gen_movi_tl(temp2, const4);
+ tcg_gen_andi_tl(temp3, cpu_gpr_d[r4], 0x1f);
+ gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp2, temp, temp3);
+
+ tcg_temp_free(temp3);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+/* RCR format */
+
+static void decode_rcr_cond_select(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t const9;
+
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RCR_OP2(ctx->opcode);
+ r1 = MASK_OP_RCR_S1(ctx->opcode);
+ const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+ r3 = MASK_OP_RCR_S3(ctx->opcode);
+ r4 = MASK_OP_RCR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCR_CADD:
+ gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
+ cpu_gpr_d[r4]);
+ break;
+ case OPC2_32_RCR_CADDN:
+ gen_condi_add(TCG_COND_EQ, cpu_gpr_d[r1], const9, cpu_gpr_d[r3],
+ cpu_gpr_d[r4]);
+ break;
+ case OPC2_32_RCR_SEL:
+ temp = tcg_const_i32(0);
+ temp2 = tcg_const_i32(const9);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], temp2);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC2_32_RCR_SELN:
+ temp = tcg_const_i32(0);
+ temp2 = tcg_const_i32(const9);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], temp2);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rcr_madd(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t const9;
+
+
+ op2 = MASK_OP_RCR_OP2(ctx->opcode);
+ r1 = MASK_OP_RCR_S1(ctx->opcode);
+ const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+ r3 = MASK_OP_RCR_S3(ctx->opcode);
+ r4 = MASK_OP_RCR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCR_MADD_32:
+ gen_maddi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MADD_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MADDS_32:
+ gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MADDS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MADD_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MADDS_U_32:
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_maddsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MADDS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rcr_msub(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r3, r4;
+ int32_t const9;
+
+
+ op2 = MASK_OP_RCR_OP2(ctx->opcode);
+ r1 = MASK_OP_RCR_S1(ctx->opcode);
+ const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode);
+ r3 = MASK_OP_RCR_S3(ctx->opcode);
+ r4 = MASK_OP_RCR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RCR_MSUB_32:
+ gen_msubi32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MSUB_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_32:
+ gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MSUB_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_U_32:
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_msubsui_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9);
+ break;
+ case OPC2_32_RCR_MSUBS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ const9 = MASK_OP_RCR_CONST9(ctx->opcode);
+ gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RLC format */
+
+static void decode_rlc_opc(CPUTriCoreState *env, DisasContext *ctx,
+ uint32_t op1)
+{
+ int32_t const16;
+ int r1, r2;
+
+ const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode);
+ r1 = MASK_OP_RLC_S1(ctx->opcode);
+ r2 = MASK_OP_RLC_D(ctx->opcode);
+
+ switch (op1) {
+ case OPC1_32_RLC_ADDI:
+ gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16);
+ break;
+ case OPC1_32_RLC_ADDIH:
+ gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16);
+ break;
+ case OPC1_32_RLC_ADDIH_A:
+ tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16);
+ break;
+ case OPC1_32_RLC_MFCR:
+ const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+ gen_mfcr(env, cpu_gpr_d[r2], const16);
+ break;
+ case OPC1_32_RLC_MOV:
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ break;
+ case OPC1_32_RLC_MOV_64:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ CHECK_REG_PAIR(r2);
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC1_32_RLC_MOV_U:
+ const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16);
+ break;
+ case OPC1_32_RLC_MOV_H:
+ tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16);
+ break;
+ case OPC1_32_RLC_MOVH_A:
+ tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16);
+ break;
+ case OPC1_32_RLC_MTCR:
+ const16 = MASK_OP_RLC_CONST16(ctx->opcode);
+ gen_mtcr(env, ctx, cpu_gpr_d[r1], const16);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RR format */
+static void decode_rr_accumulator(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r3, r2, r1;
+
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_ABS:
+ gen_abs(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABS_B:
+ gen_helper_abs_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABS_H:
+ gen_helper_abs_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIF:
+ gen_absdif(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIF_B:
+ gen_helper_absdif_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIF_H:
+ gen_helper_absdif_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIFS:
+ gen_helper_absdif_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSDIFS_H:
+ gen_helper_absdif_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSS:
+ gen_helper_abs_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ABSS_H:
+ gen_helper_abs_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADD:
+ gen_add_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADD_B:
+ gen_helper_add_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADD_H:
+ gen_helper_add_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDC:
+ gen_addc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS:
+ gen_adds(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS_H:
+ gen_helper_add_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS_HU:
+ gen_helper_add_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDS_U:
+ gen_helper_add_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ADDX:
+ gen_add_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_AND_EQ:
+ gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_GE:
+ gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_GE_U:
+ gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_LT:
+ gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_LT_U:
+ gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_AND_NE:
+ gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_and_tl);
+ break;
+ case OPC2_32_RR_EQ:
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQ_B:
+ gen_helper_eq_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQ_H:
+ gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQ_W:
+ gen_cond_w(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQANY_B:
+ gen_helper_eqany_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_EQANY_H:
+ gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_GE:
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_GE_U:
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT:
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_U:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_B:
+ gen_helper_lt_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_BU:
+ gen_helper_lt_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_H:
+ gen_helper_lt_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_HU:
+ gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_W:
+ gen_cond_w(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_LT_WU:
+ gen_cond_w(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX:
+ tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_U:
+ tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_B:
+ gen_helper_max_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_BU:
+ gen_helper_max_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_H:
+ gen_helper_max_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MAX_HU:
+ gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN:
+ tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_U:
+ tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_B:
+ gen_helper_min_b(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_BU:
+ gen_helper_min_bu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_H:
+ gen_helper_min_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MIN_HU:
+ gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MOV:
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_NE:
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_OR_EQ:
+ gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_GE:
+ gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_GE_U:
+ gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_LT:
+ gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_LT_U:
+ gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_OR_NE:
+ gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_or_tl);
+ break;
+ case OPC2_32_RR_SAT_B:
+ gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7f, -0x80);
+ break;
+ case OPC2_32_RR_SAT_BU:
+ gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xff);
+ break;
+ case OPC2_32_RR_SAT_H:
+ gen_saturate(cpu_gpr_d[r3], cpu_gpr_d[r1], 0x7fff, -0x8000);
+ break;
+ case OPC2_32_RR_SAT_HU:
+ gen_saturate_u(cpu_gpr_d[r3], cpu_gpr_d[r1], 0xffff);
+ break;
+ case OPC2_32_RR_SH_EQ:
+ gen_sh_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_GE:
+ gen_sh_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_GE_U:
+ gen_sh_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_LT:
+ gen_sh_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_LT_U:
+ gen_sh_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_NE:
+ gen_sh_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUB:
+ gen_sub_d(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUB_B:
+ gen_helper_sub_b(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUB_H:
+ gen_helper_sub_h(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBC:
+ gen_subc_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS:
+ gen_subs(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS_U:
+ gen_subsu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS_H:
+ gen_helper_sub_h_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBS_HU:
+ gen_helper_sub_h_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SUBX:
+ gen_sub_CC(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_XOR_EQ:
+ gen_accumulating_cond(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_GE:
+ gen_accumulating_cond(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_GE_U:
+ gen_accumulating_cond(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_LT:
+ gen_accumulating_cond(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_LT_U:
+ gen_accumulating_cond(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ case OPC2_32_RR_XOR_NE:
+ gen_accumulating_cond(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], &tcg_gen_xor_tl);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rr_logical_shift(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r3, r2, r1;
+ TCGv temp;
+
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+
+ temp = tcg_temp_new();
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_AND:
+ tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ANDN:
+ tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_CLO:
+ gen_helper_clo(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLO_H:
+ gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLS:
+ gen_helper_cls(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLS_H:
+ gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLZ:
+ gen_helper_clz(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CLZ_H:
+ gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_NAND:
+ tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_NOR:
+ tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_OR:
+ tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_ORN:
+ tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH:
+ gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SH_H:
+ gen_helper_sh_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SHA:
+ gen_helper_sha(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SHA_H:
+ gen_helper_sha_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_SHAS:
+ gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_XNOR:
+ tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_XOR:
+ tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+static void decode_rr_address(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2, n;
+ int r1, r2, r3;
+ TCGv temp;
+
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+ n = MASK_OP_RR_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_ADD_A:
+ tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_ADDSC_A:
+ temp = tcg_temp_new();
+ tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n);
+ tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_RR_ADDSC_AT:
+ temp = tcg_temp_new();
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3);
+ tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp);
+ tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_RR_EQ_A:
+ tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_EQZ:
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ break;
+ case OPC2_32_RR_GE_A:
+ tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_LT_A:
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_MOV_A:
+ tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_MOV_AA:
+ tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_MOV_D:
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_NE_A:
+ tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1],
+ cpu_gpr_a[r2]);
+ break;
+ case OPC2_32_RR_NEZ_A:
+ tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0);
+ break;
+ case OPC2_32_RR_SUB_A:
+ tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rr_idirect(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1;
+
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_JI:
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ case OPC2_32_RR_JLI:
+ tcg_gen_movi_tl(cpu_gpr_a[11], ctx->next_pc);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ case OPC2_32_RR_CALLI:
+ gen_helper_1arg(call, ctx->next_pc);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ case OPC2_32_RR_FCALLI:
+ gen_fcall_save_ctx(ctx);
+ tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+}
+
+static void decode_rr_divide(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+
+ TCGv temp, temp2, temp3;
+
+ op2 = MASK_OP_RR_OP2(ctx->opcode);
+ r3 = MASK_OP_RR_D(ctx->opcode);
+ r2 = MASK_OP_RR_S2(ctx->opcode);
+ r1 = MASK_OP_RR_S1(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR_BMERGE:
+ gen_helper_bmerge(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_BSPLIT:
+ CHECK_REG_PAIR(r3);
+ gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_DVINIT_B:
+ CHECK_REG_PAIR(r3);
+ gen_dvinit_b(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_DVINIT_BU:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_temp_new();
+ CHECK_REG_PAIR(r3);
+ tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 8);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ if (!tricore_feature(env, TRICORE_FEATURE_131)) {
+ /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
+ tcg_gen_neg_tl(temp, temp3);
+ /* use cpu_PSW_AV to compare against 0 */
+ tcg_gen_movcond_tl(TCG_COND_LT, temp, temp3, cpu_PSW_AV,
+ temp, temp3);
+ tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV,
+ temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ } else {
+ /* overflow = (D[b] == 0) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ }
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* write result */
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24);
+ tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ break;
+ case OPC2_32_RR_DVINIT_H:
+ CHECK_REG_PAIR(r3);
+ gen_dvinit_h(env, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_DVINIT_HU:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ temp3 = tcg_temp_new();
+ CHECK_REG_PAIR(r3);
+ tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 16);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ if (!tricore_feature(env, TRICORE_FEATURE_131)) {
+ /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */
+ tcg_gen_neg_tl(temp, temp3);
+ /* use cpu_PSW_AV to compare against 0 */
+ tcg_gen_movcond_tl(TCG_COND_LT, temp, temp3, cpu_PSW_AV,
+ temp, temp3);
+ tcg_gen_neg_tl(temp2, cpu_gpr_d[r2]);
+ tcg_gen_movcond_tl(TCG_COND_LT, temp2, cpu_gpr_d[r2], cpu_PSW_AV,
+ temp2, cpu_gpr_d[r2]);
+ tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2);
+ } else {
+ /* overflow = (D[b] == 0) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ }
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* write result */
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16);
+ tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ break;
+ case OPC2_32_RR_DVINIT:
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+ CHECK_REG_PAIR(r3);
+ /* overflow = ((D[b] == 0) ||
+ ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000);
+ tcg_gen_and_tl(temp, temp, temp2);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0);
+ tcg_gen_or_tl(cpu_PSW_V, temp, temp2);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ /* write result */
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ /* sign extend to high reg */
+ tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31);
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+ case OPC2_32_RR_DVINIT_U:
+ /* overflow = (D[b] == 0) */
+ tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0);
+ tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31);
+ /* sv */
+ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V);
+ /* reset av */
+ tcg_gen_movi_tl(cpu_PSW_AV, 0);
+ /* write result */
+ tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ /* zero extend to high reg*/
+ tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0);
+ break;
+ case OPC2_32_RR_PARITY:
+ gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_UNPACK:
+ CHECK_REG_PAIR(r3);
+ gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_CRC32:
+ if (tricore_feature(env, TRICORE_FEATURE_161)) {
+ gen_helper_crc32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_DIV:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_DIV_U:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_RR_MUL_F:
+ gen_helper_fmul(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_DIV_F:
+ gen_helper_fdiv(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_CMP_F:
+ gen_helper_fcmp(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR_FTOI:
+ gen_helper_ftoi(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RR_ITOF:
+ gen_helper_itof(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RR1 Format */
+static void decode_rr1_mul(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3;
+ TCGv n;
+ TCGv_i64 temp64;
+
+ r1 = MASK_OP_RR1_S1(ctx->opcode);
+ r2 = MASK_OP_RR1_S2(ctx->opcode);
+ r3 = MASK_OP_RR1_D(ctx->opcode);
+ n = tcg_const_i32(MASK_OP_RR1_N(ctx->opcode));
+ op2 = MASK_OP_RR1_OP2(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RR1_MUL_H_32_LL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MUL_H_32_LU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MUL_H_32_UL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MUL_H_32_UU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_LL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_LU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_UL:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+ break;
+ case OPC2_32_RR1_MULM_H_64_UU:
+ temp64 = tcg_temp_new_i64();
+ CHECK_REG_PAIR(r3);
+ GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64);
+ /* reset V bit */
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ /* reset AV bit */
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_temp_free_i64(temp64);
+
+ break;
+ case OPC2_32_RR1_MULR_H_16_LL:
+ GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RR1_MULR_H_16_LU:
+ GEN_HELPER_LU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RR1_MULR_H_16_UL:
+ GEN_HELPER_UL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RR1_MULR_H_16_UU:
+ GEN_HELPER_UU(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n);
+ gen_calc_usb_mulr_h(cpu_gpr_d[r3]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(n);
+}
+
+static void decode_rr1_mulq(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ uint32_t n;
+
+ TCGv temp, temp2;
+
+ r1 = MASK_OP_RR1_S1(ctx->opcode);
+ r2 = MASK_OP_RR1_S2(ctx->opcode);
+ r3 = MASK_OP_RR1_D(ctx->opcode);
+ n = MASK_OP_RR1_N(ctx->opcode);
+ op2 = MASK_OP_RR1_OP2(ctx->opcode);
+
+ temp = tcg_temp_new();
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RR1_MUL_Q_32:
+ gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RR1_MUL_Q_64:
+ CHECK_REG_PAIR(r3);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, 0);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
+ break;
+ case OPC2_32_RR1_MUL_Q_64_L:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16);
+ break;
+ case OPC2_32_RR1_MUL_Q_64_U:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RR1_MUL_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RR1_MULR_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RR1_MULR_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+/* RR2 format */
+static void decode_rr2_mul(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+
+ op2 = MASK_OP_RR2_OP2(ctx->opcode);
+ r1 = MASK_OP_RR2_S1(ctx->opcode);
+ r2 = MASK_OP_RR2_S2(ctx->opcode);
+ r3 = MASK_OP_RR2_D(ctx->opcode);
+ switch (op2) {
+ case OPC2_32_RR2_MUL_32:
+ gen_mul_i32s(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MUL_64:
+ CHECK_REG_PAIR(r3);
+ gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MULS_32:
+ gen_helper_mul_ssov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MUL_U_64:
+ CHECK_REG_PAIR(r3);
+ gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RR2_MULS_U_32:
+ gen_helper_mul_suov(cpu_gpr_d[r3], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRPW format */
+static void decode_rrpw_extract_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3;
+ int32_t pos, width;
+
+ op2 = MASK_OP_RRPW_OP2(ctx->opcode);
+ r1 = MASK_OP_RRPW_S1(ctx->opcode);
+ r2 = MASK_OP_RRPW_S2(ctx->opcode);
+ r3 = MASK_OP_RRPW_D(ctx->opcode);
+ pos = MASK_OP_RRPW_POS(ctx->opcode);
+ width = MASK_OP_RRPW_WIDTH(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRPW_EXTR:
+ if (pos + width <= 31) {
+ /* optimize special cases */
+ if ((pos == 0) && (width == 8)) {
+ tcg_gen_ext8s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ } else if ((pos == 0) && (width == 16)) {
+ tcg_gen_ext16s_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]);
+ } else {
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 32 - pos - width);
+ tcg_gen_sari_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 32 - width);
+ }
+ }
+ break;
+ case OPC2_32_RRPW_EXTR_U:
+ if (width == 0) {
+ tcg_gen_movi_tl(cpu_gpr_d[r3], 0);
+ } else {
+ tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos);
+ tcg_gen_andi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], ~0u >> (32-width));
+ }
+ break;
+ case OPC2_32_RRPW_IMASK:
+ CHECK_REG_PAIR(r3);
+ if (pos + width <= 31) {
+ tcg_gen_movi_tl(cpu_gpr_d[r3+1], ((1u << width) - 1) << pos);
+ tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos);
+ }
+ break;
+ case OPC2_32_RRPW_INSERT:
+ if (pos + width <= 31) {
+ tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ width, pos);
+ }
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRR format */
+static void decode_rrr_cond_select(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3, r4;
+ TCGv temp;
+
+ op2 = MASK_OP_RRR_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR_S1(ctx->opcode);
+ r2 = MASK_OP_RRR_S2(ctx->opcode);
+ r3 = MASK_OP_RRR_S3(ctx->opcode);
+ r4 = MASK_OP_RRR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR_CADD:
+ gen_cond_add(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2],
+ cpu_gpr_d[r4], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_CADDN:
+ gen_cond_add(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_CSUB:
+ gen_cond_sub(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_CSUBN:
+ gen_cond_sub(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r4],
+ cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_SEL:
+ temp = tcg_const_i32(0);
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_temp_free(temp);
+ break;
+ case OPC2_32_RRR_SELN:
+ temp = tcg_const_i32(0);
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp,
+ cpu_gpr_d[r1], cpu_gpr_d[r2]);
+ tcg_temp_free(temp);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr_divide(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+
+ int r1, r2, r3, r4;
+
+ op2 = MASK_OP_RRR_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR_S1(ctx->opcode);
+ r2 = MASK_OP_RRR_S2(ctx->opcode);
+ r3 = MASK_OP_RRR_S3(ctx->opcode);
+ r4 = MASK_OP_RRR_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR_DVADJ:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_DVSTEP:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_DVSTEP_U:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMAX:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMAX_U:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMIN:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_IXMIN_U:
+ CHECK_REG_PAIR(r3);
+ CHECK_REG_PAIR(r4);
+ GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR_PACK:
+ CHECK_REG_PAIR(r3);
+ gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1]);
+ break;
+ case OPC2_32_RRR_ADD_F:
+ gen_helper_fadd(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
+ break;
+ case OPC2_32_RRR_SUB_F:
+ gen_helper_fsub(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1], cpu_gpr_d[r3]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRR2 format */
+static void decode_rrr2_madd(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4;
+
+ op2 = MASK_OP_RRR2_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR2_S1(ctx->opcode);
+ r2 = MASK_OP_RRR2_S2(ctx->opcode);
+ r3 = MASK_OP_RRR2_S3(ctx->opcode);
+ r4 = MASK_OP_RRR2_D(ctx->opcode);
+ switch (op2) {
+ case OPC2_32_RRR2_MADD_32:
+ gen_madd32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADD_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_32:
+ gen_helper_madd32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADD_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_U_32:
+ gen_helper_madd32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MADDS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr2_msub(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4;
+
+ op2 = MASK_OP_RRR2_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR2_S1(ctx->opcode);
+ r2 = MASK_OP_RRR2_S2(ctx->opcode);
+ r3 = MASK_OP_RRR2_S3(ctx->opcode);
+ r4 = MASK_OP_RRR2_D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR2_MSUB_32:
+ gen_msub32_d(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3],
+ cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUB_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_32:
+ gen_helper_msub32_ssov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUB_U_64:
+ gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_U_32:
+ gen_helper_msub32_suov(cpu_gpr_d[r4], cpu_env, cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r2]);
+ break;
+ case OPC2_32_RRR2_MSUBS_U_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1],
+ cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRR1 format */
+static void decode_rrr1_madd(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MADD_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADD_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADD_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADD_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDM_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDM_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDM_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDM_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDMS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDR_H_LL:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDR_H_LU:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDR_H_UL:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDR_H_UU:
+ gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_LL:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_LU:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_UL:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_UU:
+ gen_maddr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr1_maddq_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ temp = tcg_const_i32(n);
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRR1_MADD_Q_32:
+ gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32, env);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, env);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16, env);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n, env);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16, env);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n, env);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADD_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32:
+ gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDS_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDR_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MADDRS_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MADDR_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDR_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDRS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MADDRS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void decode_rrr1_maddsu_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MADDSU_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSU_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSU_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSU_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUS_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUM_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUMS_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_LL:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_LU:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_UL:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSUR_H_16_UU:
+ gen_maddsur32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_LL:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_LU:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_UL:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MADDSURS_H_16_UU:
+ gen_maddsur32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr1_msub(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MSUB_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUB_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUB_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUB_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBM_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBMS_H_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_LL:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_LU:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_UL:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_UU:
+ gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_LL:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_LU:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_UL:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_UU:
+ gen_msubr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_rrr1_msubq_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ temp = tcg_const_i32(n);
+ temp2 = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRR1_MSUB_Q_32:
+ gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32, env);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, env);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16, env);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n, env);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16, env);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n, env);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUB_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32:
+ gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, 32);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_L:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_L:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_U:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ temp, n, 16);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_U:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16);
+ gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp,
+ n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBS_Q_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBR_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MSUBRS_H_64_UL:
+ CHECK_REG_PAIR(r3);
+ gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1],
+ cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2);
+ break;
+ case OPC2_32_RRR1_MSUBR_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBR_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBRS_Q_32_LL:
+ tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]);
+ tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]);
+ gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ case OPC2_32_RRR1_MSUBRS_Q_32_UU:
+ tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16);
+ tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16);
+ gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+}
+
+static void decode_rrr1_msubad_h(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1, r2, r3, r4, n;
+
+ op2 = MASK_OP_RRR1_OP2(ctx->opcode);
+ r1 = MASK_OP_RRR1_S1(ctx->opcode);
+ r2 = MASK_OP_RRR1_S2(ctx->opcode);
+ r3 = MASK_OP_RRR1_S3(ctx->opcode);
+ r4 = MASK_OP_RRR1_D(ctx->opcode);
+ n = MASK_OP_RRR1_N(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_RRR1_MSUBAD_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBAD_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBAD_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBAD_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADS_H_32_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADM_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_LL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_LU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_UL:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADMS_H_64_UU:
+ CHECK_REG_PAIR(r4);
+ CHECK_REG_PAIR(r3);
+ gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3],
+ cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2],
+ n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_LL:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_LU:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_UL:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADR_H_16_UU:
+ gen_msubadr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_LL:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LL);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_LU:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_LU);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_UL:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UL);
+ break;
+ case OPC2_32_RRR1_MSUBADRS_H_16_UU:
+ gen_msubadr32s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1],
+ cpu_gpr_d[r2], n, MODE_UU);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+/* RRRR format */
+static void decode_rrrr_extract_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3, r4;
+ TCGv tmp_width, tmp_pos;
+
+ r1 = MASK_OP_RRRR_S1(ctx->opcode);
+ r2 = MASK_OP_RRRR_S2(ctx->opcode);
+ r3 = MASK_OP_RRRR_S3(ctx->opcode);
+ r4 = MASK_OP_RRRR_D(ctx->opcode);
+ op2 = MASK_OP_RRRR_OP2(ctx->opcode);
+
+ tmp_pos = tcg_temp_new();
+ tmp_width = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRRR_DEXTR:
+ tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ if (r1 == r2) {
+ tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ } else {
+ tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
+ tcg_gen_shr_tl(tmp_pos, cpu_gpr_d[r2], tmp_pos);
+ tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, tmp_pos);
+ }
+ break;
+ case OPC2_32_RRRR_EXTR:
+ case OPC2_32_RRRR_EXTR_U:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
+ tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width);
+ tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos);
+ tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos);
+ tcg_gen_subfi_tl(tmp_width, 32, tmp_width);
+ if (op2 == OPC2_32_RRRR_EXTR) {
+ tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ } else {
+ tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width);
+ }
+ break;
+ case OPC2_32_RRRR_INSERT:
+ CHECK_REG_PAIR(r3);
+ tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f);
+ tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f);
+ gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width,
+ tmp_pos);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(tmp_pos);
+ tcg_temp_free(tmp_width);
+}
+
+/* RRRW format */
+static void decode_rrrw_extract_insert(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ int r1, r2, r3, r4;
+ int32_t width;
+
+ TCGv temp, temp2;
+
+ op2 = MASK_OP_RRRW_OP2(ctx->opcode);
+ r1 = MASK_OP_RRRW_S1(ctx->opcode);
+ r2 = MASK_OP_RRRW_S2(ctx->opcode);
+ r3 = MASK_OP_RRRW_S3(ctx->opcode);
+ r4 = MASK_OP_RRRW_D(ctx->opcode);
+ width = MASK_OP_RRRW_WIDTH(ctx->opcode);
+
+ temp = tcg_temp_new();
+
+ switch (op2) {
+ case OPC2_32_RRRW_EXTR:
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_addi_tl(temp, temp, width);
+ tcg_gen_subfi_tl(temp, 32, temp);
+ tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width);
+ break;
+ case OPC2_32_RRRW_EXTR_U:
+ if (width == 0) {
+ tcg_gen_movi_tl(cpu_gpr_d[r4], 0);
+ } else {
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp);
+ tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width));
+ }
+ break;
+ case OPC2_32_RRRW_IMASK:
+ temp2 = tcg_temp_new();
+
+ tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f);
+ tcg_gen_movi_tl(temp2, (1 << width) - 1);
+ tcg_gen_shl_tl(temp2, temp2, temp);
+ tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp);
+ tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2);
+
+ tcg_temp_free(temp2);
+ break;
+ case OPC2_32_RRRW_INSERT:
+ temp2 = tcg_temp_new();
+
+ tcg_gen_movi_tl(temp, width);
+ tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f);
+ gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2);
+
+ tcg_temp_free(temp2);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ tcg_temp_free(temp);
+}
+
+/* SYS Format*/
+static void decode_sys_interrupts(CPUTriCoreState *env, DisasContext *ctx)
+{
+ uint32_t op2;
+ uint32_t r1;
+ TCGLabel *l1;
+ TCGv tmp;
+
+ op2 = MASK_OP_SYS_OP2(ctx->opcode);
+ r1 = MASK_OP_SYS_S1D(ctx->opcode);
+
+ switch (op2) {
+ case OPC2_32_SYS_DEBUG:
+ /* raise EXCP_DEBUG */
+ break;
+ case OPC2_32_SYS_DISABLE:
+ tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~MASK_ICR_IE);
+ break;
+ case OPC2_32_SYS_DSYNC:
+ break;
+ case OPC2_32_SYS_ENABLE:
+ tcg_gen_ori_tl(cpu_ICR, cpu_ICR, MASK_ICR_IE);
+ break;
+ case OPC2_32_SYS_ISYNC:
+ break;
+ case OPC2_32_SYS_NOP:
+ break;
+ case OPC2_32_SYS_RET:
+ gen_compute_branch(ctx, op2, 0, 0, 0, 0);
+ break;
+ case OPC2_32_SYS_FRET:
+ gen_fret(ctx);
+ break;
+ case OPC2_32_SYS_RFE:
+ gen_helper_rfe(cpu_env);
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+ break;
+ case OPC2_32_SYS_RFM:
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) {
+ tmp = tcg_temp_new();
+ l1 = gen_new_label();
+
+ tcg_gen_ld32u_tl(tmp, cpu_env, offsetof(CPUTriCoreState, DBGSR));
+ tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1);
+ gen_helper_rfm(cpu_env);
+ gen_set_label(l1);
+ tcg_gen_exit_tb(0);
+ ctx->bstate = BS_BRANCH;
+ tcg_temp_free(tmp);
+ } else {
+ /* generate privilege trap */
+ }
+ break;
+ case OPC2_32_SYS_RSLCX:
+ gen_helper_rslcx(cpu_env);
+ break;
+ case OPC2_32_SYS_SVLCX:
+ gen_helper_svlcx(cpu_env);
+ break;
+ case OPC2_32_SYS_RESTORE:
+ if (tricore_feature(env, TRICORE_FEATURE_16)) {
+ if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM ||
+ (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) {
+ tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], 8, 1);
+ } /* else raise privilege trap */
+ } else {
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+ break;
+ case OPC2_32_SYS_TRAPSV:
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_SV, 0, l1);
+ generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF);
+ gen_set_label(l1);
+ break;
+ case OPC2_32_SYS_TRAPV:
+ l1 = gen_new_label();
+ tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_V, 0, l1);
+ generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF);
+ gen_set_label(l1);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_32Bit_opc(CPUTriCoreState *env, DisasContext *ctx)
+{
+ int op1;
+ int32_t r1, r2, r3;
+ int32_t address, const16;
+ int8_t b, const4;
+ int32_t bpos;
+ TCGv temp, temp2, temp3;
+
+ op1 = MASK_OP_MAJOR(ctx->opcode);
+
+ /* handle JNZ.T opcode only being 7 bit long */
+ if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) {
+ op1 = OPCM_32_BRN_JTT;
+ }
+
+ switch (op1) {
+/* ABS-format */
+ case OPCM_32_ABS_LDW:
+ decode_abs_ldw(env, ctx);
+ break;
+ case OPCM_32_ABS_LDB:
+ decode_abs_ldb(env, ctx);
+ break;
+ case OPCM_32_ABS_LDMST_SWAP:
+ decode_abs_ldst_swap(env, ctx);
+ break;
+ case OPCM_32_ABS_LDST_CONTEXT:
+ decode_abs_ldst_context(env, ctx);
+ break;
+ case OPCM_32_ABS_STORE:
+ decode_abs_store(env, ctx);
+ break;
+ case OPCM_32_ABS_STOREB_H:
+ decode_abs_storeb_h(env, ctx);
+ break;
+ case OPC1_32_ABS_STOREQ:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp2 = tcg_temp_new();
+
+ tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16);
+ tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW);
+
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_ABS_LD_Q:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+
+ tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW);
+ tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16);
+
+ tcg_temp_free(temp);
+ break;
+ case OPC1_32_ABS_LEA:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ r1 = MASK_OP_ABS_S1D(ctx->opcode);
+ tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address));
+ break;
+/* ABSB-format */
+ case OPC1_32_ABSB_ST_T:
+ address = MASK_OP_ABS_OFF18(ctx->opcode);
+ b = MASK_OP_ABSB_B(ctx->opcode);
+ bpos = MASK_OP_ABSB_BPOS(ctx->opcode);
+
+ temp = tcg_const_i32(EA_ABS_FORMAT(address));
+ temp2 = tcg_temp_new();
+
+ tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB);
+ tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos));
+ tcg_gen_ori_tl(temp2, temp2, (b << bpos));
+ tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ break;
+/* B-format */
+ case OPC1_32_B_CALL:
+ case OPC1_32_B_CALLA:
+ case OPC1_32_B_FCALL:
+ case OPC1_32_B_FCALLA:
+ case OPC1_32_B_J:
+ case OPC1_32_B_JA:
+ case OPC1_32_B_JL:
+ case OPC1_32_B_JLA:
+ address = MASK_OP_B_DISP24_SEXT(ctx->opcode);
+ gen_compute_branch(ctx, op1, 0, 0, 0, address);
+ break;
+/* Bit-format */
+ case OPCM_32_BIT_ANDACC:
+ decode_bit_andacc(env, ctx);
+ break;
+ case OPCM_32_BIT_LOGICAL_T1:
+ decode_bit_logical_t(env, ctx);
+ break;
+ case OPCM_32_BIT_INSERT:
+ decode_bit_insert(env, ctx);
+ break;
+ case OPCM_32_BIT_LOGICAL_T2:
+ decode_bit_logical_t2(env, ctx);
+ break;
+ case OPCM_32_BIT_ORAND:
+ decode_bit_orand(env, ctx);
+ break;
+ case OPCM_32_BIT_SH_LOGIC1:
+ decode_bit_sh_logic1(env, ctx);
+ break;
+ case OPCM_32_BIT_SH_LOGIC2:
+ decode_bit_sh_logic2(env, ctx);
+ break;
+ /* BO Format */
+ case OPCM_32_BO_ADDRMODE_POST_PRE_BASE:
+ decode_bo_addrmode_post_pre_base(env, ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR:
+ decode_bo_addrmode_bitreverse_circular(env, ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE:
+ decode_bo_addrmode_ld_post_pre_base(env, ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR:
+ decode_bo_addrmode_ld_bitreverse_circular(env, ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE:
+ decode_bo_addrmode_stctx_post_pre_base(env, ctx);
+ break;
+ case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR:
+ decode_bo_addrmode_ldmst_bitreverse_circular(env, ctx);
+ break;
+/* BOL-format */
+ case OPC1_32_BOL_LD_A_LONGOFF:
+ case OPC1_32_BOL_LD_W_LONGOFF:
+ case OPC1_32_BOL_LEA_LONGOFF:
+ case OPC1_32_BOL_ST_W_LONGOFF:
+ case OPC1_32_BOL_ST_A_LONGOFF:
+ case OPC1_32_BOL_LD_B_LONGOFF:
+ case OPC1_32_BOL_LD_BU_LONGOFF:
+ case OPC1_32_BOL_LD_H_LONGOFF:
+ case OPC1_32_BOL_LD_HU_LONGOFF:
+ case OPC1_32_BOL_ST_B_LONGOFF:
+ case OPC1_32_BOL_ST_H_LONGOFF:
+ decode_bol_opc(env, ctx, op1);
+ break;
+/* BRC Format */
+ case OPCM_32_BRC_EQ_NEQ:
+ case OPCM_32_BRC_GE:
+ case OPCM_32_BRC_JLT:
+ case OPCM_32_BRC_JNE:
+ const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode);
+ address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode);
+ r1 = MASK_OP_BRC_S1(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, const4, address);
+ break;
+/* BRN Format */
+ case OPCM_32_BRN_JTT:
+ address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode);
+ r1 = MASK_OP_BRN_S1(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, 0, 0, address);
+ break;
+/* BRR Format */
+ case OPCM_32_BRR_EQ_NEQ:
+ case OPCM_32_BRR_ADDR_EQ_NEQ:
+ case OPCM_32_BRR_GE:
+ case OPCM_32_BRR_JLT:
+ case OPCM_32_BRR_JNE:
+ case OPCM_32_BRR_JNZ:
+ case OPCM_32_BRR_LOOP:
+ address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode);
+ r2 = MASK_OP_BRR_S2(ctx->opcode);
+ r1 = MASK_OP_BRR_S1(ctx->opcode);
+ gen_compute_branch(ctx, op1, r1, r2, 0, address);
+ break;
+/* RC Format */
+ case OPCM_32_RC_LOGICAL_SHIFT:
+ decode_rc_logical_shift(env, ctx);
+ break;
+ case OPCM_32_RC_ACCUMULATOR:
+ decode_rc_accumulator(env, ctx);
+ break;
+ case OPCM_32_RC_SERVICEROUTINE:
+ decode_rc_serviceroutine(env, ctx);
+ break;
+ case OPCM_32_RC_MUL:
+ decode_rc_mul(env, ctx);
+ break;
+/* RCPW Format */
+ case OPCM_32_RCPW_MASK_INSERT:
+ decode_rcpw_insert(env, ctx);
+ break;
+/* RCRR Format */
+ case OPC1_32_RCRR_INSERT:
+ r1 = MASK_OP_RCRR_S1(ctx->opcode);
+ r2 = MASK_OP_RCRR_S3(ctx->opcode);
+ r3 = MASK_OP_RCRR_D(ctx->opcode);
+ const16 = MASK_OP_RCRR_CONST4(ctx->opcode);
+ temp = tcg_const_i32(const16);
+ temp2 = tcg_temp_new(); /* width*/
+ temp3 = tcg_temp_new(); /* pos */
+
+ CHECK_REG_PAIR(r3);
+
+ tcg_gen_andi_tl(temp2, cpu_gpr_d[r3+1], 0x1f);
+ tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f);
+
+ gen_insert(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, temp2, temp3);
+
+ tcg_temp_free(temp);
+ tcg_temp_free(temp2);
+ tcg_temp_free(temp3);
+ break;
+/* RCRW Format */
+ case OPCM_32_RCRW_MASK_INSERT:
+ decode_rcrw_insert(env, ctx);
+ break;
+/* RCR Format */
+ case OPCM_32_RCR_COND_SELECT:
+ decode_rcr_cond_select(env, ctx);
+ break;
+ case OPCM_32_RCR_MADD:
+ decode_rcr_madd(env, ctx);
+ break;
+ case OPCM_32_RCR_MSUB:
+ decode_rcr_msub(env, ctx);
+ break;
+/* RLC Format */
+ case OPC1_32_RLC_ADDI:
+ case OPC1_32_RLC_ADDIH:
+ case OPC1_32_RLC_ADDIH_A:
+ case OPC1_32_RLC_MFCR:
+ case OPC1_32_RLC_MOV:
+ case OPC1_32_RLC_MOV_64:
+ case OPC1_32_RLC_MOV_U:
+ case OPC1_32_RLC_MOV_H:
+ case OPC1_32_RLC_MOVH_A:
+ case OPC1_32_RLC_MTCR:
+ decode_rlc_opc(env, ctx, op1);
+ break;
+/* RR Format */
+ case OPCM_32_RR_ACCUMULATOR:
+ decode_rr_accumulator(env, ctx);
+ break;
+ case OPCM_32_RR_LOGICAL_SHIFT:
+ decode_rr_logical_shift(env, ctx);
+ break;
+ case OPCM_32_RR_ADDRESS:
+ decode_rr_address(env, ctx);
+ break;
+ case OPCM_32_RR_IDIRECT:
+ decode_rr_idirect(env, ctx);
+ break;
+ case OPCM_32_RR_DIVIDE:
+ decode_rr_divide(env, ctx);
+ break;
+/* RR1 Format */
+ case OPCM_32_RR1_MUL:
+ decode_rr1_mul(env, ctx);
+ break;
+ case OPCM_32_RR1_MULQ:
+ decode_rr1_mulq(env, ctx);
+ break;
+/* RR2 format */
+ case OPCM_32_RR2_MUL:
+ decode_rr2_mul(env, ctx);
+ break;
+/* RRPW format */
+ case OPCM_32_RRPW_EXTRACT_INSERT:
+ decode_rrpw_extract_insert(env, ctx);
+ break;
+ case OPC1_32_RRPW_DEXTR:
+ r1 = MASK_OP_RRPW_S1(ctx->opcode);
+ r2 = MASK_OP_RRPW_S2(ctx->opcode);
+ r3 = MASK_OP_RRPW_D(ctx->opcode);
+ const16 = MASK_OP_RRPW_POS(ctx->opcode);
+ if (r1 == r2) {
+ tcg_gen_rotli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], const16);
+ } else {
+ temp = tcg_temp_new();
+ tcg_gen_shli_tl(temp, cpu_gpr_d[r1], const16);
+ tcg_gen_shri_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], 32 - const16);
+ tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp);
+ tcg_temp_free(temp);
+ }
+ break;
+/* RRR Format */
+ case OPCM_32_RRR_COND_SELECT:
+ decode_rrr_cond_select(env, ctx);
+ break;
+ case OPCM_32_RRR_DIVIDE:
+ decode_rrr_divide(env, ctx);
+ break;
+/* RRR2 Format */
+ case OPCM_32_RRR2_MADD:
+ decode_rrr2_madd(env, ctx);
+ break;
+ case OPCM_32_RRR2_MSUB:
+ decode_rrr2_msub(env, ctx);
+ break;
+/* RRR1 format */
+ case OPCM_32_RRR1_MADD:
+ decode_rrr1_madd(env, ctx);
+ break;
+ case OPCM_32_RRR1_MADDQ_H:
+ decode_rrr1_maddq_h(env, ctx);
+ break;
+ case OPCM_32_RRR1_MADDSU_H:
+ decode_rrr1_maddsu_h(env, ctx);
+ break;
+ case OPCM_32_RRR1_MSUB_H:
+ decode_rrr1_msub(env, ctx);
+ break;
+ case OPCM_32_RRR1_MSUB_Q:
+ decode_rrr1_msubq_h(env, ctx);
+ break;
+ case OPCM_32_RRR1_MSUBAD_H:
+ decode_rrr1_msubad_h(env, ctx);
+ break;
+/* RRRR format */
+ case OPCM_32_RRRR_EXTRACT_INSERT:
+ decode_rrrr_extract_insert(env, ctx);
+ break;
+/* RRRW format */
+ case OPCM_32_RRRW_EXTRACT_INSERT:
+ decode_rrrw_extract_insert(env, ctx);
+ break;
+/* SYS format */
+ case OPCM_32_SYS_INTERRUPTS:
+ decode_sys_interrupts(env, ctx);
+ break;
+ case OPC1_32_SYS_RSTV:
+ tcg_gen_movi_tl(cpu_PSW_V, 0);
+ tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V);
+ tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V);
+ tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V);
+ break;
+ default:
+ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC);
+ }
+}
+
+static void decode_opc(CPUTriCoreState *env, DisasContext *ctx, int *is_branch)
+{
+ /* 16-Bit Instruction */
+ if ((ctx->opcode & 0x1) == 0) {
+ ctx->next_pc = ctx->pc + 2;
+ decode_16Bit_opc(env, ctx);
+ /* 32-Bit Instruction */
+ } else {
+ ctx->next_pc = ctx->pc + 4;
+ decode_32Bit_opc(env, ctx);
+ }
+}
+
+void gen_intermediate_code(CPUTriCoreState *env, struct TranslationBlock *tb)
+{
+ TriCoreCPU *cpu = tricore_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext ctx;
+ target_ulong pc_start;
+ int num_insns, max_insns;
+
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (singlestep) {
+ max_insns = 1;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ pc_start = tb->pc;
+ ctx.pc = pc_start;
+ ctx.saved_pc = -1;
+ ctx.tb = tb;
+ ctx.singlestep_enabled = cs->singlestep_enabled;
+ ctx.bstate = BS_NONE;
+ ctx.mem_idx = cpu_mmu_index(env, false);
+
+ tcg_clear_temp_count();
+ gen_tb_start(tb);
+ while (ctx.bstate == BS_NONE) {
+ tcg_gen_insn_start(ctx.pc);
+ num_insns++;
+
+ ctx.opcode = cpu_ldl_code(env, ctx.pc);
+ decode_opc(env, &ctx, 0);
+
+ if (num_insns >= max_insns || tcg_op_buf_full()) {
+ gen_save_pc(ctx.next_pc);
+ tcg_gen_exit_tb(0);
+ break;
+ }
+ ctx.pc = ctx.next_pc;
+ }
+
+ gen_tb_end(tb, num_insns);
+ tb->size = ctx.pc - pc_start;
+ tb->icount = num_insns;
+
+ if (tcg_check_temp_count()) {
+ printf("LEAK at %08x\n", env->PC);
+ }
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+}
+
+void
+restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->PC = data[0];
+}
+/*
+ *
+ * Initialization
+ *
+ */
+
+void cpu_state_reset(CPUTriCoreState *env)
+{
+ /* Reset Regs to Default Value */
+ env->PSW = 0xb80;
+ fpu_set_state(env);
+}
+
+static void tricore_tcg_init_csfr(void)
+{
+ cpu_PCXI = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PCXI), "PCXI");
+ cpu_PSW = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW), "PSW");
+ cpu_PC = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PC), "PC");
+ cpu_ICR = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, ICR), "ICR");
+}
+
+void tricore_tcg_init(void)
+{
+ int i;
+ static int inited;
+ if (inited) {
+ return;
+ }
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ /* reg init */
+ for (i = 0 ; i < 16 ; i++) {
+ cpu_gpr_a[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, gpr_a[i]),
+ regnames_a[i]);
+ }
+ for (i = 0 ; i < 16 ; i++) {
+ cpu_gpr_d[i] = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, gpr_d[i]),
+ regnames_d[i]);
+ }
+ tricore_tcg_init_csfr();
+ /* init PSW flag cache */
+ cpu_PSW_C = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_C),
+ "PSW_C");
+ cpu_PSW_V = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_V),
+ "PSW_V");
+ cpu_PSW_SV = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_SV),
+ "PSW_SV");
+ cpu_PSW_AV = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_AV),
+ "PSW_AV");
+ cpu_PSW_SAV = tcg_global_mem_new(cpu_env,
+ offsetof(CPUTriCoreState, PSW_USB_SAV),
+ "PSW_SAV");
+}
diff --git a/target/tricore/tricore-defs.h b/target/tricore/tricore-defs.h
new file mode 100644
index 0000000000..40abfaac14
--- /dev/null
+++ b/target/tricore/tricore-defs.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_TRICORE_DEFS_H
+#define QEMU_TRICORE_DEFS_H
+
+#define TARGET_PAGE_BITS 14
+#define TARGET_LONG_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define TRICORE_TLB_MAX 128
+
+#endif /* QEMU_TRICORE_DEFS_H */
diff --git a/target/tricore/tricore-opcodes.h b/target/tricore/tricore-opcodes.h
new file mode 100644
index 0000000000..df666b081f
--- /dev/null
+++ b/target/tricore/tricore-opcodes.h
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Opcode Masks for Tricore
+ * Format MASK_OP_InstrFormatName_Field
+ */
+
+/* This creates a mask with bits start .. end set to 1 and applies it to op */
+#define MASK_BITS_SHIFT(op, start, end) (extract32(op, (start), \
+ (end) - (start) + 1))
+#define MASK_BITS_SHIFT_SEXT(op, start, end) (sextract32(op, (start),\
+ (end) - (start) + 1))
+
+/* new opcode masks */
+
+#define MASK_OP_MAJOR(op) MASK_BITS_SHIFT(op, 0, 7)
+
+/* 16-Bit Formats */
+#define MASK_OP_SB_DISP8(op) MASK_BITS_SHIFT(op, 8, 15)
+#define MASK_OP_SB_DISP8_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 8, 15)
+
+#define MASK_OP_SBC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SBC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15)
+#define MASK_OP_SBC_DISP4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SBR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SBR_DISP4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SBRN_N(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SBRN_DISP4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SC_CONST8(op) MASK_BITS_SHIFT(op, 8, 15)
+
+#define MASK_OP_SLR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SLR_D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SLRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SLRO_D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SR_OP2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SR_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15)
+#define MASK_OP_SRC_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRO_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRO_OFF4(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRR_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SRRS_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SRRS_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+#define MASK_OP_SRRS_N(op) MASK_BITS_SHIFT(op, 6, 7)
+
+#define MASK_OP_SSR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SSR_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+#define MASK_OP_SSRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_SSRO_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* 32-Bit Formats */
+
+/* ABS Format */
+#define MASK_OP_ABS_OFF18(op) (MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6) + \
+ (MASK_BITS_SHIFT(op, 22, 25) << 10) +\
+ (MASK_BITS_SHIFT(op, 12, 15) << 14))
+#define MASK_OP_ABS_OP2(op) MASK_BITS_SHIFT(op, 26, 27)
+#define MASK_OP_ABS_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* ABSB Format */
+#define MASK_OP_ABSB_OFF18(op) MASK_OP_ABS_OFF18(op)
+#define MASK_OP_ABSB_OP2(op) MASK_BITS_SHIFT(op, 26, 27)
+#define MASK_OP_ABSB_B(op) MASK_BITS_SHIFT(op, 11, 11)
+#define MASK_OP_ABSB_BPOS(op) MASK_BITS_SHIFT(op, 8, 10)
+
+/* B Format */
+#define MASK_OP_B_DISP24(op) (MASK_BITS_SHIFT(op, 16, 31) + \
+ (MASK_BITS_SHIFT(op, 8, 15) << 16))
+#define MASK_OP_B_DISP24_SEXT(op) (MASK_BITS_SHIFT(op, 16, 31) + \
+ (MASK_BITS_SHIFT_SEXT(op, 8, 15) << 16))
+/* BIT Format */
+#define MASK_OP_BIT_D(op) MASK_BITS_SHIFT(op, 28, 31)
+#define MASK_OP_BIT_POS2(op) MASK_BITS_SHIFT(op, 23, 27)
+#define MASK_OP_BIT_OP2(op) MASK_BITS_SHIFT(op, 21, 22)
+#define MASK_OP_BIT_POS1(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_BIT_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BIT_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BO Format */
+#define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6))
+#define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT_SEXT(op, 28, 31) << 6))
+#define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27)
+#define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BO_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BOL Format */
+#define MASK_OP_BOL_OFF16(op) ((MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \
+ (MASK_BITS_SHIFT(op, 22, 27) << 10))
+#define MASK_OP_BOL_OFF16_SEXT(op) ((MASK_BITS_SHIFT(op, 16, 21) + \
+ (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \
+ (MASK_BITS_SHIFT_SEXT(op, 22, 27) << 10))
+#define MASK_OP_BOL_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BOL_S1D(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BRC Format */
+#define MASK_OP_BRC_OP2(op) MASK_BITS_SHIFT(op, 31, 31)
+#define MASK_OP_BRC_DISP15(op) MASK_BITS_SHIFT(op, 16, 30)
+#define MASK_OP_BRC_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30)
+#define MASK_OP_BRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15)
+#define MASK_OP_BRC_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* BRN Format */
+#define MASK_OP_BRN_OP2(op) MASK_BITS_SHIFT(op, 31, 31)
+#define MASK_OP_BRN_DISP15(op) MASK_BITS_SHIFT(op, 16, 30)
+#define MASK_OP_BRN_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30)
+#define MASK_OP_BRN_N(op) (MASK_BITS_SHIFT(op, 12, 15) + \
+ (MASK_BITS_SHIFT(op, 7, 7) << 4))
+#define MASK_OP_BRN_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+/* BRR Format */
+#define MASK_OP_BRR_OP2(op) MASK_BITS_SHIFT(op, 31, 31)
+#define MASK_OP_BRR_DISP15(op) MASK_BITS_SHIFT(op, 16, 30)
+#define MASK_OP_BRR_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30)
+#define MASK_OP_BRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_BRR_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* META MASK for similar instr Formats */
+#define MASK_OP_META_D(op) MASK_BITS_SHIFT(op, 28, 31)
+#define MASK_OP_META_S1(op) MASK_BITS_SHIFT(op, 8, 11)
+
+/* RC Format */
+#define MASK_OP_RC_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RC_OP2(op) MASK_BITS_SHIFT(op, 21, 27)
+#define MASK_OP_RC_CONST9(op) MASK_BITS_SHIFT(op, 12, 20)
+#define MASK_OP_RC_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20)
+#define MASK_OP_RC_S1(op) MASK_OP_META_S1(op)
+
+/* RCPW Format */
+
+#define MASK_OP_RCPW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCPW_POS(op) MASK_BITS_SHIFT(op, 23, 27)
+#define MASK_OP_RCPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22)
+#define MASK_OP_RCPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RCPW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RCPW_S1(op) MASK_OP_META_S1(op)
+
+/* RCR Format */
+
+#define MASK_OP_RCR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RCR_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RCR_CONST9(op) MASK_BITS_SHIFT(op, 12, 20)
+#define MASK_OP_RCR_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20)
+#define MASK_OP_RCR_S1(op) MASK_OP_META_S1(op)
+
+/* RCRR Format */
+
+#define MASK_OP_RCRR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCRR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RCRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RCRR_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RCRR_S1(op) MASK_OP_META_S1(op)
+
+/* RCRW Format */
+
+#define MASK_OP_RCRW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RCRW_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RCRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RCRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RCRW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RCRW_S1(op) MASK_OP_META_S1(op)
+
+/* RLC Format */
+
+#define MASK_OP_RLC_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RLC_CONST16(op) MASK_BITS_SHIFT(op, 12, 27)
+#define MASK_OP_RLC_CONST16_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 27)
+#define MASK_OP_RLC_S1(op) MASK_OP_META_S1(op)
+
+/* RR Format */
+#define MASK_OP_RR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RR_OP2(op) MASK_BITS_SHIFT(op, 20, 27)
+#define MASK_OP_RR_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RR_S1(op) MASK_OP_META_S1(op)
+
+/* RR1 Format */
+#define MASK_OP_RR1_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RR1_OP2(op) MASK_BITS_SHIFT(op, 18, 27)
+#define MASK_OP_RR1_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RR1_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RR1_S1(op) MASK_OP_META_S1(op)
+
+/* RR2 Format */
+#define MASK_OP_RR2_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RR2_OP2(op) MASK_BITS_SHIFT(op, 16, 27)
+#define MASK_OP_RR2_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RR2_S1(op) MASK_OP_META_S1(op)
+
+/* RRPW Format */
+#define MASK_OP_RRPW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRPW_POS(op) MASK_BITS_SHIFT(op, 23, 27)
+#define MASK_OP_RRPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22)
+#define MASK_OP_RRPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RRPW_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRPW_S1(op) MASK_OP_META_S1(op)
+
+/* RRR Format */
+#define MASK_OP_RRR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRR_OP2(op) MASK_BITS_SHIFT(op, 20, 23)
+#define MASK_OP_RRR_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRR_S1(op) MASK_OP_META_S1(op)
+
+/* RRR1 Format */
+#define MASK_OP_RRR1_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRR1_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRR1_OP2(op) MASK_BITS_SHIFT(op, 18, 23)
+#define MASK_OP_RRR1_N(op) MASK_BITS_SHIFT(op, 16, 17)
+#define MASK_OP_RRR1_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRR1_S1(op) MASK_OP_META_S1(op)
+
+/* RRR2 Format */
+#define MASK_OP_RRR2_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRR2_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRR2_OP2(op) MASK_BITS_SHIFT(op, 16, 23)
+#define MASK_OP_RRR2_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRR2_S1(op) MASK_OP_META_S1(op)
+
+/* RRRR Format */
+#define MASK_OP_RRRR_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRRR_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RRRR_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRRR_S1(op) MASK_OP_META_S1(op)
+
+/* RRRW Format */
+#define MASK_OP_RRRW_D(op) MASK_OP_META_D(op)
+#define MASK_OP_RRRW_S3(op) MASK_BITS_SHIFT(op, 24, 27)
+#define MASK_OP_RRRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23)
+#define MASK_OP_RRRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20)
+#define MASK_OP_RRRW_S2(op) MASK_BITS_SHIFT(op, 12, 15)
+#define MASK_OP_RRRW_S1(op) MASK_OP_META_S1(op)
+
+/* SYS Format */
+#define MASK_OP_SYS_OP2(op) MASK_BITS_SHIFT(op, 22, 27)
+#define MASK_OP_SYS_S1D(op) MASK_OP_META_S1(op)
+
+
+
+/*
+ * Tricore Opcodes Enums
+ *
+ * Format: OPC(1|2|M)_InstrLen_Name
+ * OPC1 = only op1 field is used
+ * OPC2 = op1 and op2 field used part of OPCM
+ * OPCM = op1 field used to group Instr
+ * InstrLen = 16|32
+ * Name = Name of Instr
+ */
+
+/* 16-Bit */
+enum {
+
+ OPCM_16_SR_SYSTEM = 0x00,
+ OPCM_16_SR_ACCU = 0x32,
+
+ OPC1_16_SRC_ADD = 0xc2,
+ OPC1_16_SRC_ADD_A15 = 0x92,
+ OPC1_16_SRC_ADD_15A = 0x9a,
+ OPC1_16_SRR_ADD = 0x42,
+ OPC1_16_SRR_ADD_A15 = 0x12,
+ OPC1_16_SRR_ADD_15A = 0x1a,
+ OPC1_16_SRC_ADD_A = 0xb0,
+ OPC1_16_SRR_ADD_A = 0x30,
+ OPC1_16_SRR_ADDS = 0x22,
+ OPC1_16_SRRS_ADDSC_A = 0x10,
+ OPC1_16_SC_AND = 0x16,
+ OPC1_16_SRR_AND = 0x26,
+ OPC1_16_SC_BISR = 0xe0,
+ OPC1_16_SRC_CADD = 0x8a,
+ OPC1_16_SRC_CADDN = 0xca,
+ OPC1_16_SB_CALL = 0x5c,
+ OPC1_16_SRC_CMOV = 0xaa,
+ OPC1_16_SRR_CMOV = 0x2a,
+ OPC1_16_SRC_CMOVN = 0xea,
+ OPC1_16_SRR_CMOVN = 0x6a,
+ OPC1_16_SRC_EQ = 0xba,
+ OPC1_16_SRR_EQ = 0x3a,
+ OPC1_16_SB_J = 0x3c,
+ OPC1_16_SBC_JEQ = 0x1e,
+ OPC1_16_SBR_JEQ = 0x3e,
+ OPC1_16_SBR_JGEZ = 0xce,
+ OPC1_16_SBR_JGTZ = 0x4e,
+ OPC1_16_SR_JI = 0xdc,
+ OPC1_16_SBR_JLEZ = 0x8e,
+ OPC1_16_SBR_JLTZ = 0x0e,
+ OPC1_16_SBC_JNE = 0x5e,
+ OPC1_16_SBR_JNE = 0x7e,
+ OPC1_16_SB_JNZ = 0xee,
+ OPC1_16_SBR_JNZ = 0xf6,
+ OPC1_16_SBR_JNZ_A = 0x7c,
+ OPC1_16_SBRN_JNZ_T = 0xae,
+ OPC1_16_SB_JZ = 0x6e,
+ OPC1_16_SBR_JZ = 0x76,
+ OPC1_16_SBR_JZ_A = 0xbc,
+ OPC1_16_SBRN_JZ_T = 0x2e,
+ OPC1_16_SC_LD_A = 0xd8,
+ OPC1_16_SLR_LD_A = 0xd4,
+ OPC1_16_SLR_LD_A_POSTINC = 0xc4,
+ OPC1_16_SLRO_LD_A = 0xc8,
+ OPC1_16_SRO_LD_A = 0xcc,
+ OPC1_16_SLR_LD_BU = 0x14,
+ OPC1_16_SLR_LD_BU_POSTINC = 0x04,
+ OPC1_16_SLRO_LD_BU = 0x08,
+ OPC1_16_SRO_LD_BU = 0x0c,
+ OPC1_16_SLR_LD_H = 0x94,
+ OPC1_16_SLR_LD_H_POSTINC = 0x84,
+ OPC1_16_SLRO_LD_H = 0x88,
+ OPC1_16_SRO_LD_H = 0x8c,
+ OPC1_16_SC_LD_W = 0x58,
+ OPC1_16_SLR_LD_W = 0x54,
+ OPC1_16_SLR_LD_W_POSTINC = 0x44,
+ OPC1_16_SLRO_LD_W = 0x48,
+ OPC1_16_SRO_LD_W = 0x4c,
+ OPC1_16_SBR_LOOP = 0xfc,
+ OPC1_16_SRC_LT = 0xfa,
+ OPC1_16_SRR_LT = 0x7a,
+ OPC1_16_SC_MOV = 0xda,
+ OPC1_16_SRC_MOV = 0x82,
+ OPC1_16_SRR_MOV = 0x02,
+ OPC1_16_SRC_MOV_E = 0xd2,/* 1.6 only */
+ OPC1_16_SRC_MOV_A = 0xa0,
+ OPC1_16_SRR_MOV_A = 0x60,
+ OPC1_16_SRR_MOV_AA = 0x40,
+ OPC1_16_SRR_MOV_D = 0x80,
+ OPC1_16_SRR_MUL = 0xe2,
+ OPC1_16_SR_NOT = 0x46,
+ OPC1_16_SC_OR = 0x96,
+ OPC1_16_SRR_OR = 0xa6,
+ OPC1_16_SRC_SH = 0x06,
+ OPC1_16_SRC_SHA = 0x86,
+ OPC1_16_SC_ST_A = 0xf8,
+ OPC1_16_SRO_ST_A = 0xec,
+ OPC1_16_SSR_ST_A = 0xf4,
+ OPC1_16_SSR_ST_A_POSTINC = 0xe4,
+ OPC1_16_SSRO_ST_A = 0xe8,
+ OPC1_16_SRO_ST_B = 0x2c,
+ OPC1_16_SSR_ST_B = 0x34,
+ OPC1_16_SSR_ST_B_POSTINC = 0x24,
+ OPC1_16_SSRO_ST_B = 0x28,
+ OPC1_16_SRO_ST_H = 0xac,
+ OPC1_16_SSR_ST_H = 0xb4,
+ OPC1_16_SSR_ST_H_POSTINC = 0xa4,
+ OPC1_16_SSRO_ST_H = 0xa8,
+ OPC1_16_SC_ST_W = 0x78,
+ OPC1_16_SRO_ST_W = 0x6c,
+ OPC1_16_SSR_ST_W = 0x74,
+ OPC1_16_SSR_ST_W_POSTINC = 0x64,
+ OPC1_16_SSRO_ST_W = 0x68,
+ OPC1_16_SRR_SUB = 0xa2,
+ OPC1_16_SRR_SUB_A15B = 0x52,
+ OPC1_16_SRR_SUB_15AB = 0x5a,
+ OPC1_16_SC_SUB_A = 0x20,
+ OPC1_16_SRR_SUBS = 0x62,
+ OPC1_16_SRR_XOR = 0xc6,
+
+};
+
+/*
+ * SR Format
+ */
+/* OPCM_16_SR_SYSTEM */
+enum {
+
+ OPC2_16_SR_NOP = 0x00,
+ OPC2_16_SR_RET = 0x09,
+ OPC2_16_SR_RFE = 0x08,
+ OPC2_16_SR_DEBUG = 0x0a,
+ OPC2_16_SR_FRET = 0x07,
+};
+/* OPCM_16_SR_ACCU */
+enum {
+ OPC2_16_SR_RSUB = 0x05,
+ OPC2_16_SR_SAT_B = 0x00,
+ OPC2_16_SR_SAT_BU = 0x01,
+ OPC2_16_SR_SAT_H = 0x02,
+ OPC2_16_SR_SAT_HU = 0x03,
+
+};
+
+/* 32-Bit */
+
+enum {
+/* ABS Format 1, M */
+ OPCM_32_ABS_LDW = 0x85,
+ OPCM_32_ABS_LDB = 0x05,
+ OPCM_32_ABS_LDMST_SWAP = 0xe5,
+ OPCM_32_ABS_LDST_CONTEXT = 0x15,
+ OPCM_32_ABS_STORE = 0xa5,
+ OPCM_32_ABS_STOREB_H = 0x25,
+ OPC1_32_ABS_STOREQ = 0x65,
+ OPC1_32_ABS_LD_Q = 0x45,
+ OPC1_32_ABS_LEA = 0xc5,
+/* ABSB Format */
+ OPC1_32_ABSB_ST_T = 0xd5,
+/* B Format */
+ OPC1_32_B_CALL = 0x6d,
+ OPC1_32_B_CALLA = 0xed,
+ OPC1_32_B_FCALL = 0x61,
+ OPC1_32_B_FCALLA = 0xe1,
+ OPC1_32_B_J = 0x1d,
+ OPC1_32_B_JA = 0x9d,
+ OPC1_32_B_JL = 0x5d,
+ OPC1_32_B_JLA = 0xdd,
+/* Bit Format */
+ OPCM_32_BIT_ANDACC = 0x47,
+ OPCM_32_BIT_LOGICAL_T1 = 0x87,
+ OPCM_32_BIT_INSERT = 0x67,
+ OPCM_32_BIT_LOGICAL_T2 = 0x07,
+ OPCM_32_BIT_ORAND = 0xc7,
+ OPCM_32_BIT_SH_LOGIC1 = 0x27,
+ OPCM_32_BIT_SH_LOGIC2 = 0xa7,
+/* BO Format */
+ OPCM_32_BO_ADDRMODE_POST_PRE_BASE = 0x89,
+ OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR = 0xa9,
+ OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE = 0x09,
+ OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR = 0x29,
+ OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE = 0x49,
+ OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR = 0x69,
+/* BOL Format */
+ OPC1_32_BOL_LD_A_LONGOFF = 0x99,
+ OPC1_32_BOL_LD_W_LONGOFF = 0x19,
+ OPC1_32_BOL_LEA_LONGOFF = 0xd9,
+ OPC1_32_BOL_ST_W_LONGOFF = 0x59,
+ OPC1_32_BOL_ST_A_LONGOFF = 0xb5, /* 1.6 only */
+ OPC1_32_BOL_LD_B_LONGOFF = 0x79, /* 1.6 only */
+ OPC1_32_BOL_LD_BU_LONGOFF = 0x39, /* 1.6 only */
+ OPC1_32_BOL_LD_H_LONGOFF = 0xc9, /* 1.6 only */
+ OPC1_32_BOL_LD_HU_LONGOFF = 0xb9, /* 1.6 only */
+ OPC1_32_BOL_ST_B_LONGOFF = 0xe9, /* 1.6 only */
+ OPC1_32_BOL_ST_H_LONGOFF = 0xf9, /* 1.6 only */
+/* BRC Format */
+ OPCM_32_BRC_EQ_NEQ = 0xdf,
+ OPCM_32_BRC_GE = 0xff,
+ OPCM_32_BRC_JLT = 0xbf,
+ OPCM_32_BRC_JNE = 0x9f,
+/* BRN Format */
+ OPCM_32_BRN_JTT = 0x6f,
+/* BRR Format */
+ OPCM_32_BRR_EQ_NEQ = 0x5f,
+ OPCM_32_BRR_ADDR_EQ_NEQ = 0x7d,
+ OPCM_32_BRR_GE = 0x7f,
+ OPCM_32_BRR_JLT = 0x3f,
+ OPCM_32_BRR_JNE = 0x1f,
+ OPCM_32_BRR_JNZ = 0xbd,
+ OPCM_32_BRR_LOOP = 0xfd,
+/* RC Format */
+ OPCM_32_RC_LOGICAL_SHIFT = 0x8f,
+ OPCM_32_RC_ACCUMULATOR = 0x8b,
+ OPCM_32_RC_SERVICEROUTINE = 0xad,
+ OPCM_32_RC_MUL = 0x53,
+/* RCPW Format */
+ OPCM_32_RCPW_MASK_INSERT = 0xb7,
+/* RCR Format */
+ OPCM_32_RCR_COND_SELECT = 0xab,
+ OPCM_32_RCR_MADD = 0x13,
+ OPCM_32_RCR_MSUB = 0x33,
+/* RCRR Format */
+ OPC1_32_RCRR_INSERT = 0x97,
+/* RCRW Format */
+ OPCM_32_RCRW_MASK_INSERT = 0xd7,
+/* RLC Format */
+ OPC1_32_RLC_ADDI = 0x1b,
+ OPC1_32_RLC_ADDIH = 0x9b,
+ OPC1_32_RLC_ADDIH_A = 0x11,
+ OPC1_32_RLC_MFCR = 0x4d,
+ OPC1_32_RLC_MOV = 0x3b,
+ OPC1_32_RLC_MOV_64 = 0xfb, /* 1.6 only */
+ OPC1_32_RLC_MOV_U = 0xbb,
+ OPC1_32_RLC_MOV_H = 0x7b,
+ OPC1_32_RLC_MOVH_A = 0x91,
+ OPC1_32_RLC_MTCR = 0xcd,
+/* RR Format */
+ OPCM_32_RR_LOGICAL_SHIFT = 0x0f,
+ OPCM_32_RR_ACCUMULATOR = 0x0b,
+ OPCM_32_RR_ADDRESS = 0x01,
+ OPCM_32_RR_DIVIDE = 0x4b,
+ OPCM_32_RR_IDIRECT = 0x2d,
+/* RR1 Format */
+ OPCM_32_RR1_MUL = 0xb3,
+ OPCM_32_RR1_MULQ = 0x93,
+/* RR2 Format */
+ OPCM_32_RR2_MUL = 0x73,
+/* RRPW Format */
+ OPCM_32_RRPW_EXTRACT_INSERT = 0x37,
+ OPC1_32_RRPW_DEXTR = 0x77,
+/* RRR Format */
+ OPCM_32_RRR_COND_SELECT = 0x2b,
+ OPCM_32_RRR_DIVIDE = 0x6b,
+/* RRR1 Format */
+ OPCM_32_RRR1_MADD = 0x83,
+ OPCM_32_RRR1_MADDQ_H = 0x43,
+ OPCM_32_RRR1_MADDSU_H = 0xc3,
+ OPCM_32_RRR1_MSUB_H = 0xa3,
+ OPCM_32_RRR1_MSUB_Q = 0x63,
+ OPCM_32_RRR1_MSUBAD_H = 0xe3,
+/* RRR2 Format */
+ OPCM_32_RRR2_MADD = 0x03,
+ OPCM_32_RRR2_MSUB = 0x23,
+/* RRRR Format */
+ OPCM_32_RRRR_EXTRACT_INSERT = 0x17,
+/* RRRW Format */
+ OPCM_32_RRRW_EXTRACT_INSERT = 0x57,
+/* SYS Format */
+ OPCM_32_SYS_INTERRUPTS = 0x0d,
+ OPC1_32_SYS_RSTV = 0x2f,
+};
+
+
+
+/*
+ * ABS Format
+ */
+
+/* OPCM_32_ABS_LDW */
+enum {
+
+ OPC2_32_ABS_LD_A = 0x02,
+ OPC2_32_ABS_LD_D = 0x01,
+ OPC2_32_ABS_LD_DA = 0x03,
+ OPC2_32_ABS_LD_W = 0x00,
+};
+
+/* OPCM_32_ABS_LDB */
+enum {
+ OPC2_32_ABS_LD_B = 0x00,
+ OPC2_32_ABS_LD_BU = 0x01,
+ OPC2_32_ABS_LD_H = 0x02,
+ OPC2_32_ABS_LD_HU = 0x03,
+};
+/* OPCM_32_ABS_LDMST_SWAP */
+enum {
+ OPC2_32_ABS_LDMST = 0x01,
+ OPC2_32_ABS_SWAP_W = 0x00,
+};
+/* OPCM_32_ABS_LDST_CONTEXT */
+enum {
+ OPC2_32_ABS_LDLCX = 0x02,
+ OPC2_32_ABS_LDUCX = 0x03,
+ OPC2_32_ABS_STLCX = 0x00,
+ OPC2_32_ABS_STUCX = 0x01,
+};
+/* OPCM_32_ABS_STORE */
+enum {
+ OPC2_32_ABS_ST_A = 0x02,
+ OPC2_32_ABS_ST_D = 0x01,
+ OPC2_32_ABS_ST_DA = 0x03,
+ OPC2_32_ABS_ST_W = 0x00,
+};
+/* OPCM_32_ABS_STOREB_H */
+enum {
+ OPC2_32_ABS_ST_B = 0x00,
+ OPC2_32_ABS_ST_H = 0x02,
+};
+/*
+ * Bit Format
+ */
+/* OPCM_32_BIT_ANDACC */
+enum {
+ OPC2_32_BIT_AND_AND_T = 0x00,
+ OPC2_32_BIT_AND_ANDN_T = 0x03,
+ OPC2_32_BIT_AND_NOR_T = 0x02,
+ OPC2_32_BIT_AND_OR_T = 0x01,
+};
+/* OPCM_32_BIT_LOGICAL_T */
+enum {
+ OPC2_32_BIT_AND_T = 0x00,
+ OPC2_32_BIT_ANDN_T = 0x03,
+ OPC2_32_BIT_NOR_T = 0x02,
+ OPC2_32_BIT_OR_T = 0x01,
+};
+/* OPCM_32_BIT_INSERT */
+enum {
+ OPC2_32_BIT_INS_T = 0x00,
+ OPC2_32_BIT_INSN_T = 0x01,
+};
+/* OPCM_32_BIT_LOGICAL_T2 */
+enum {
+ OPC2_32_BIT_NAND_T = 0x00,
+ OPC2_32_BIT_ORN_T = 0x01,
+ OPC2_32_BIT_XNOR_T = 0x02,
+ OPC2_32_BIT_XOR_T = 0x03,
+};
+/* OPCM_32_BIT_ORAND */
+enum {
+ OPC2_32_BIT_OR_AND_T = 0x00,
+ OPC2_32_BIT_OR_ANDN_T = 0x03,
+ OPC2_32_BIT_OR_NOR_T = 0x02,
+ OPC2_32_BIT_OR_OR_T = 0x01,
+};
+/*OPCM_32_BIT_SH_LOGIC1 */
+enum {
+ OPC2_32_BIT_SH_AND_T = 0x00,
+ OPC2_32_BIT_SH_ANDN_T = 0x03,
+ OPC2_32_BIT_SH_NOR_T = 0x02,
+ OPC2_32_BIT_SH_OR_T = 0x01,
+};
+/* OPCM_32_BIT_SH_LOGIC2 */
+enum {
+ OPC2_32_BIT_SH_NAND_T = 0x00,
+ OPC2_32_BIT_SH_ORN_T = 0x01,
+ OPC2_32_BIT_SH_XNOR_T = 0x02,
+ OPC2_32_BIT_SH_XOR_T = 0x03,
+};
+/*
+ * BO Format
+ */
+/* OPCM_32_BO_ADDRMODE_POST_PRE_BASE */
+enum {
+ OPC2_32_BO_CACHEA_I_SHORTOFF = 0x2e,
+ OPC2_32_BO_CACHEA_I_POSTINC = 0x0e,
+ OPC2_32_BO_CACHEA_I_PREINC = 0x1e,
+ OPC2_32_BO_CACHEA_W_SHORTOFF = 0x2c,
+ OPC2_32_BO_CACHEA_W_POSTINC = 0x0c,
+ OPC2_32_BO_CACHEA_W_PREINC = 0x1c,
+ OPC2_32_BO_CACHEA_WI_SHORTOFF = 0x2d,
+ OPC2_32_BO_CACHEA_WI_POSTINC = 0x0d,
+ OPC2_32_BO_CACHEA_WI_PREINC = 0x1d,
+ /* 1.3.1 only */
+ OPC2_32_BO_CACHEI_W_SHORTOFF = 0x2b,
+ OPC2_32_BO_CACHEI_W_POSTINC = 0x0b,
+ OPC2_32_BO_CACHEI_W_PREINC = 0x1b,
+ OPC2_32_BO_CACHEI_WI_SHORTOFF = 0x2f,
+ OPC2_32_BO_CACHEI_WI_POSTINC = 0x0f,
+ OPC2_32_BO_CACHEI_WI_PREINC = 0x1f,
+ /* end 1.3.1 only */
+ OPC2_32_BO_ST_A_SHORTOFF = 0x26,
+ OPC2_32_BO_ST_A_POSTINC = 0x06,
+ OPC2_32_BO_ST_A_PREINC = 0x16,
+ OPC2_32_BO_ST_B_SHORTOFF = 0x20,
+ OPC2_32_BO_ST_B_POSTINC = 0x00,
+ OPC2_32_BO_ST_B_PREINC = 0x10,
+ OPC2_32_BO_ST_D_SHORTOFF = 0x25,
+ OPC2_32_BO_ST_D_POSTINC = 0x05,
+ OPC2_32_BO_ST_D_PREINC = 0x15,
+ OPC2_32_BO_ST_DA_SHORTOFF = 0x27,
+ OPC2_32_BO_ST_DA_POSTINC = 0x07,
+ OPC2_32_BO_ST_DA_PREINC = 0x17,
+ OPC2_32_BO_ST_H_SHORTOFF = 0x22,
+ OPC2_32_BO_ST_H_POSTINC = 0x02,
+ OPC2_32_BO_ST_H_PREINC = 0x12,
+ OPC2_32_BO_ST_Q_SHORTOFF = 0x28,
+ OPC2_32_BO_ST_Q_POSTINC = 0x08,
+ OPC2_32_BO_ST_Q_PREINC = 0x18,
+ OPC2_32_BO_ST_W_SHORTOFF = 0x24,
+ OPC2_32_BO_ST_W_POSTINC = 0x04,
+ OPC2_32_BO_ST_W_PREINC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR */
+enum {
+ OPC2_32_BO_CACHEA_I_BR = 0x0e,
+ OPC2_32_BO_CACHEA_I_CIRC = 0x1e,
+ OPC2_32_BO_CACHEA_W_BR = 0x0c,
+ OPC2_32_BO_CACHEA_W_CIRC = 0x1c,
+ OPC2_32_BO_CACHEA_WI_BR = 0x0d,
+ OPC2_32_BO_CACHEA_WI_CIRC = 0x1d,
+ OPC2_32_BO_ST_A_BR = 0x06,
+ OPC2_32_BO_ST_A_CIRC = 0x16,
+ OPC2_32_BO_ST_B_BR = 0x00,
+ OPC2_32_BO_ST_B_CIRC = 0x10,
+ OPC2_32_BO_ST_D_BR = 0x05,
+ OPC2_32_BO_ST_D_CIRC = 0x15,
+ OPC2_32_BO_ST_DA_BR = 0x07,
+ OPC2_32_BO_ST_DA_CIRC = 0x17,
+ OPC2_32_BO_ST_H_BR = 0x02,
+ OPC2_32_BO_ST_H_CIRC = 0x12,
+ OPC2_32_BO_ST_Q_BR = 0x08,
+ OPC2_32_BO_ST_Q_CIRC = 0x18,
+ OPC2_32_BO_ST_W_BR = 0x04,
+ OPC2_32_BO_ST_W_CIRC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE */
+enum {
+ OPC2_32_BO_LD_A_SHORTOFF = 0x26,
+ OPC2_32_BO_LD_A_POSTINC = 0x06,
+ OPC2_32_BO_LD_A_PREINC = 0x16,
+ OPC2_32_BO_LD_B_SHORTOFF = 0x20,
+ OPC2_32_BO_LD_B_POSTINC = 0x00,
+ OPC2_32_BO_LD_B_PREINC = 0x10,
+ OPC2_32_BO_LD_BU_SHORTOFF = 0x21,
+ OPC2_32_BO_LD_BU_POSTINC = 0x01,
+ OPC2_32_BO_LD_BU_PREINC = 0x11,
+ OPC2_32_BO_LD_D_SHORTOFF = 0x25,
+ OPC2_32_BO_LD_D_POSTINC = 0x05,
+ OPC2_32_BO_LD_D_PREINC = 0x15,
+ OPC2_32_BO_LD_DA_SHORTOFF = 0x27,
+ OPC2_32_BO_LD_DA_POSTINC = 0x07,
+ OPC2_32_BO_LD_DA_PREINC = 0x17,
+ OPC2_32_BO_LD_H_SHORTOFF = 0x22,
+ OPC2_32_BO_LD_H_POSTINC = 0x02,
+ OPC2_32_BO_LD_H_PREINC = 0x12,
+ OPC2_32_BO_LD_HU_SHORTOFF = 0x23,
+ OPC2_32_BO_LD_HU_POSTINC = 0x03,
+ OPC2_32_BO_LD_HU_PREINC = 0x13,
+ OPC2_32_BO_LD_Q_SHORTOFF = 0x28,
+ OPC2_32_BO_LD_Q_POSTINC = 0x08,
+ OPC2_32_BO_LD_Q_PREINC = 0x18,
+ OPC2_32_BO_LD_W_SHORTOFF = 0x24,
+ OPC2_32_BO_LD_W_POSTINC = 0x04,
+ OPC2_32_BO_LD_W_PREINC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR */
+enum {
+ OPC2_32_BO_LD_A_BR = 0x06,
+ OPC2_32_BO_LD_A_CIRC = 0x16,
+ OPC2_32_BO_LD_B_BR = 0x00,
+ OPC2_32_BO_LD_B_CIRC = 0x10,
+ OPC2_32_BO_LD_BU_BR = 0x01,
+ OPC2_32_BO_LD_BU_CIRC = 0x11,
+ OPC2_32_BO_LD_D_BR = 0x05,
+ OPC2_32_BO_LD_D_CIRC = 0x15,
+ OPC2_32_BO_LD_DA_BR = 0x07,
+ OPC2_32_BO_LD_DA_CIRC = 0x17,
+ OPC2_32_BO_LD_H_BR = 0x02,
+ OPC2_32_BO_LD_H_CIRC = 0x12,
+ OPC2_32_BO_LD_HU_BR = 0x03,
+ OPC2_32_BO_LD_HU_CIRC = 0x13,
+ OPC2_32_BO_LD_Q_BR = 0x08,
+ OPC2_32_BO_LD_Q_CIRC = 0x18,
+ OPC2_32_BO_LD_W_BR = 0x04,
+ OPC2_32_BO_LD_W_CIRC = 0x14,
+};
+/* OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE */
+enum {
+ OPC2_32_BO_LDLCX_SHORTOFF = 0x24,
+ OPC2_32_BO_LDMST_SHORTOFF = 0x21,
+ OPC2_32_BO_LDMST_POSTINC = 0x01,
+ OPC2_32_BO_LDMST_PREINC = 0x11,
+ OPC2_32_BO_LDUCX_SHORTOFF = 0x25,
+ OPC2_32_BO_LEA_SHORTOFF = 0x28,
+ OPC2_32_BO_STLCX_SHORTOFF = 0x26,
+ OPC2_32_BO_STUCX_SHORTOFF = 0x27,
+ OPC2_32_BO_SWAP_W_SHORTOFF = 0x20,
+ OPC2_32_BO_SWAP_W_POSTINC = 0x00,
+ OPC2_32_BO_SWAP_W_PREINC = 0x10,
+ OPC2_32_BO_CMPSWAP_W_SHORTOFF = 0x23,
+ OPC2_32_BO_CMPSWAP_W_POSTINC = 0x03,
+ OPC2_32_BO_CMPSWAP_W_PREINC = 0x13,
+ OPC2_32_BO_SWAPMSK_W_SHORTOFF = 0x22,
+ OPC2_32_BO_SWAPMSK_W_POSTINC = 0x02,
+ OPC2_32_BO_SWAPMSK_W_PREINC = 0x12,
+};
+/*OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR */
+enum {
+ OPC2_32_BO_LDMST_BR = 0x01,
+ OPC2_32_BO_LDMST_CIRC = 0x11,
+ OPC2_32_BO_SWAP_W_BR = 0x00,
+ OPC2_32_BO_SWAP_W_CIRC = 0x10,
+ OPC2_32_BO_CMPSWAP_W_BR = 0x03,
+ OPC2_32_BO_CMPSWAP_W_CIRC = 0x13,
+ OPC2_32_BO_SWAPMSK_W_BR = 0x02,
+ OPC2_32_BO_SWAPMSK_W_CIRC = 0x12,
+};
+/*
+ * BRC Format
+ */
+/*OPCM_32_BRC_EQ_NEQ */
+enum {
+ OPC2_32_BRC_JEQ = 0x00,
+ OPC2_32_BRC_JNE = 0x01,
+};
+/* OPCM_32_BRC_GE */
+enum {
+ OP2_32_BRC_JGE = 0x00,
+ OPC_32_BRC_JGE_U = 0x01,
+};
+/* OPCM_32_BRC_JLT */
+enum {
+ OPC2_32_BRC_JLT = 0x00,
+ OPC2_32_BRC_JLT_U = 0x01,
+};
+/* OPCM_32_BRC_JNE */
+enum {
+ OPC2_32_BRC_JNED = 0x01,
+ OPC2_32_BRC_JNEI = 0x00,
+};
+/*
+ * BRN Format
+ */
+/* OPCM_32_BRN_JTT */
+enum {
+ OPC2_32_BRN_JNZ_T = 0x01,
+ OPC2_32_BRN_JZ_T = 0x00,
+};
+/*
+ * BRR Format
+ */
+/* OPCM_32_BRR_EQ_NEQ */
+enum {
+ OPC2_32_BRR_JEQ = 0x00,
+ OPC2_32_BRR_JNE = 0x01,
+};
+/* OPCM_32_BRR_ADDR_EQ_NEQ */
+enum {
+ OPC2_32_BRR_JEQ_A = 0x00,
+ OPC2_32_BRR_JNE_A = 0x01,
+};
+/*OPCM_32_BRR_GE */
+enum {
+ OPC2_32_BRR_JGE = 0x00,
+ OPC2_32_BRR_JGE_U = 0x01,
+};
+/* OPCM_32_BRR_JLT */
+enum {
+ OPC2_32_BRR_JLT = 0x00,
+ OPC2_32_BRR_JLT_U = 0x01,
+};
+/* OPCM_32_BRR_JNE */
+enum {
+ OPC2_32_BRR_JNED = 0x01,
+ OPC2_32_BRR_JNEI = 0x00,
+};
+/* OPCM_32_BRR_JNZ */
+enum {
+ OPC2_32_BRR_JNZ_A = 0x01,
+ OPC2_32_BRR_JZ_A = 0x00,
+};
+/* OPCM_32_BRR_LOOP */
+enum {
+ OPC2_32_BRR_LOOP = 0x00,
+ OPC2_32_BRR_LOOPU = 0x01,
+};
+/*
+ * RC Format
+ */
+/* OPCM_32_RC_LOGICAL_SHIFT */
+enum {
+ OPC2_32_RC_AND = 0x08,
+ OPC2_32_RC_ANDN = 0x0e,
+ OPC2_32_RC_NAND = 0x09,
+ OPC2_32_RC_NOR = 0x0b,
+ OPC2_32_RC_OR = 0x0a,
+ OPC2_32_RC_ORN = 0x0f,
+ OPC2_32_RC_SH = 0x00,
+ OPC2_32_RC_SH_H = 0x40,
+ OPC2_32_RC_SHA = 0x01,
+ OPC2_32_RC_SHA_H = 0x41,
+ OPC2_32_RC_SHAS = 0x02,
+ OPC2_32_RC_XNOR = 0x0d,
+ OPC2_32_RC_XOR = 0x0c,
+};
+/* OPCM_32_RC_ACCUMULATOR */
+enum {
+ OPC2_32_RC_ABSDIF = 0x0e,
+ OPC2_32_RC_ABSDIFS = 0x0f,
+ OPC2_32_RC_ADD = 0x00,
+ OPC2_32_RC_ADDC = 0x05,
+ OPC2_32_RC_ADDS = 0x02,
+ OPC2_32_RC_ADDS_U = 0x03,
+ OPC2_32_RC_ADDX = 0x04,
+ OPC2_32_RC_AND_EQ = 0x20,
+ OPC2_32_RC_AND_GE = 0x24,
+ OPC2_32_RC_AND_GE_U = 0x25,
+ OPC2_32_RC_AND_LT = 0x22,
+ OPC2_32_RC_AND_LT_U = 0x23,
+ OPC2_32_RC_AND_NE = 0x21,
+ OPC2_32_RC_EQ = 0x10,
+ OPC2_32_RC_EQANY_B = 0x56,
+ OPC2_32_RC_EQANY_H = 0x76,
+ OPC2_32_RC_GE = 0x14,
+ OPC2_32_RC_GE_U = 0x15,
+ OPC2_32_RC_LT = 0x12,
+ OPC2_32_RC_LT_U = 0x13,
+ OPC2_32_RC_MAX = 0x1a,
+ OPC2_32_RC_MAX_U = 0x1b,
+ OPC2_32_RC_MIN = 0x18,
+ OPC2_32_RC_MIN_U = 0x19,
+ OPC2_32_RC_NE = 0x11,
+ OPC2_32_RC_OR_EQ = 0x27,
+ OPC2_32_RC_OR_GE = 0x2b,
+ OPC2_32_RC_OR_GE_U = 0x2c,
+ OPC2_32_RC_OR_LT = 0x29,
+ OPC2_32_RC_OR_LT_U = 0x2a,
+ OPC2_32_RC_OR_NE = 0x28,
+ OPC2_32_RC_RSUB = 0x08,
+ OPC2_32_RC_RSUBS = 0x0a,
+ OPC2_32_RC_RSUBS_U = 0x0b,
+ OPC2_32_RC_SH_EQ = 0x37,
+ OPC2_32_RC_SH_GE = 0x3b,
+ OPC2_32_RC_SH_GE_U = 0x3c,
+ OPC2_32_RC_SH_LT = 0x39,
+ OPC2_32_RC_SH_LT_U = 0x3a,
+ OPC2_32_RC_SH_NE = 0x38,
+ OPC2_32_RC_XOR_EQ = 0x2f,
+ OPC2_32_RC_XOR_GE = 0x33,
+ OPC2_32_RC_XOR_GE_U = 0x34,
+ OPC2_32_RC_XOR_LT = 0x31,
+ OPC2_32_RC_XOR_LT_U = 0x32,
+ OPC2_32_RC_XOR_NE = 0x30,
+};
+/* OPCM_32_RC_SERVICEROUTINE */
+enum {
+ OPC2_32_RC_BISR = 0x00,
+ OPC2_32_RC_SYSCALL = 0x04,
+};
+/* OPCM_32_RC_MUL */
+enum {
+ OPC2_32_RC_MUL_32 = 0x01,
+ OPC2_32_RC_MUL_64 = 0x03,
+ OPC2_32_RC_MULS_32 = 0x05,
+ OPC2_32_RC_MUL_U_64 = 0x02,
+ OPC2_32_RC_MULS_U_32 = 0x04,
+};
+/*
+ * RCPW Format
+ */
+/* OPCM_32_RCPW_MASK_INSERT */
+enum {
+ OPC2_32_RCPW_IMASK = 0x01,
+ OPC2_32_RCPW_INSERT = 0x00,
+};
+/*
+ * RCR Format
+ */
+/* OPCM_32_RCR_COND_SELECT */
+enum {
+ OPC2_32_RCR_CADD = 0x00,
+ OPC2_32_RCR_CADDN = 0x01,
+ OPC2_32_RCR_SEL = 0x04,
+ OPC2_32_RCR_SELN = 0x05,
+};
+/* OPCM_32_RCR_MADD */
+enum {
+ OPC2_32_RCR_MADD_32 = 0x01,
+ OPC2_32_RCR_MADD_64 = 0x03,
+ OPC2_32_RCR_MADDS_32 = 0x05,
+ OPC2_32_RCR_MADDS_64 = 0x07,
+ OPC2_32_RCR_MADD_U_64 = 0x02,
+ OPC2_32_RCR_MADDS_U_32 = 0x04,
+ OPC2_32_RCR_MADDS_U_64 = 0x06,
+};
+/* OPCM_32_RCR_MSUB */
+enum {
+ OPC2_32_RCR_MSUB_32 = 0x01,
+ OPC2_32_RCR_MSUB_64 = 0x03,
+ OPC2_32_RCR_MSUBS_32 = 0x05,
+ OPC2_32_RCR_MSUBS_64 = 0x07,
+ OPC2_32_RCR_MSUB_U_64 = 0x02,
+ OPC2_32_RCR_MSUBS_U_32 = 0x04,
+ OPC2_32_RCR_MSUBS_U_64 = 0x06,
+};
+/*
+ * RCRW Format
+ */
+/* OPCM_32_RCRW_MASK_INSERT */
+enum {
+ OPC2_32_RCRW_IMASK = 0x01,
+ OPC2_32_RCRW_INSERT = 0x00,
+};
+
+/*
+ * RR Format
+ */
+/* OPCM_32_RR_LOGICAL_SHIFT */
+enum {
+ OPC2_32_RR_AND = 0x08,
+ OPC2_32_RR_ANDN = 0x0e,
+ OPC2_32_RR_CLO = 0x1c,
+ OPC2_32_RR_CLO_H = 0x7d,
+ OPC2_32_RR_CLS = 0x1d,
+ OPC2_32_RR_CLS_H = 0x7e,
+ OPC2_32_RR_CLZ = 0x1b,
+ OPC2_32_RR_CLZ_H = 0x7c,
+ OPC2_32_RR_NAND = 0x09,
+ OPC2_32_RR_NOR = 0x0b,
+ OPC2_32_RR_OR = 0x0a,
+ OPC2_32_RR_ORN = 0x0f,
+ OPC2_32_RR_SH = 0x00,
+ OPC2_32_RR_SH_H = 0x40,
+ OPC2_32_RR_SHA = 0x01,
+ OPC2_32_RR_SHA_H = 0x41,
+ OPC2_32_RR_SHAS = 0x02,
+ OPC2_32_RR_XNOR = 0x0d,
+ OPC2_32_RR_XOR = 0x0c,
+};
+/* OPCM_32_RR_ACCUMULATOR */
+enum {
+ OPC2_32_RR_ABS = 0x1c,
+ OPC2_32_RR_ABS_B = 0x5c,
+ OPC2_32_RR_ABS_H = 0x7c,
+ OPC2_32_RR_ABSDIF = 0x0e,
+ OPC2_32_RR_ABSDIF_B = 0x4e,
+ OPC2_32_RR_ABSDIF_H = 0x6e,
+ OPC2_32_RR_ABSDIFS = 0x0f,
+ OPC2_32_RR_ABSDIFS_H = 0x6f,
+ OPC2_32_RR_ABSS = 0x1d,
+ OPC2_32_RR_ABSS_H = 0x7d,
+ OPC2_32_RR_ADD = 0x00,
+ OPC2_32_RR_ADD_B = 0x40,
+ OPC2_32_RR_ADD_H = 0x60,
+ OPC2_32_RR_ADDC = 0x05,
+ OPC2_32_RR_ADDS = 0x02,
+ OPC2_32_RR_ADDS_H = 0x62,
+ OPC2_32_RR_ADDS_HU = 0x63,
+ OPC2_32_RR_ADDS_U = 0x03,
+ OPC2_32_RR_ADDX = 0x04,
+ OPC2_32_RR_AND_EQ = 0x20,
+ OPC2_32_RR_AND_GE = 0x24,
+ OPC2_32_RR_AND_GE_U = 0x25,
+ OPC2_32_RR_AND_LT = 0x22,
+ OPC2_32_RR_AND_LT_U = 0x23,
+ OPC2_32_RR_AND_NE = 0x21,
+ OPC2_32_RR_EQ = 0x10,
+ OPC2_32_RR_EQ_B = 0x50,
+ OPC2_32_RR_EQ_H = 0x70,
+ OPC2_32_RR_EQ_W = 0x90,
+ OPC2_32_RR_EQANY_B = 0x56,
+ OPC2_32_RR_EQANY_H = 0x76,
+ OPC2_32_RR_GE = 0x14,
+ OPC2_32_RR_GE_U = 0x15,
+ OPC2_32_RR_LT = 0x12,
+ OPC2_32_RR_LT_U = 0x13,
+ OPC2_32_RR_LT_B = 0x52,
+ OPC2_32_RR_LT_BU = 0x53,
+ OPC2_32_RR_LT_H = 0x72,
+ OPC2_32_RR_LT_HU = 0x73,
+ OPC2_32_RR_LT_W = 0x92,
+ OPC2_32_RR_LT_WU = 0x93,
+ OPC2_32_RR_MAX = 0x1a,
+ OPC2_32_RR_MAX_U = 0x1b,
+ OPC2_32_RR_MAX_B = 0x5a,
+ OPC2_32_RR_MAX_BU = 0x5b,
+ OPC2_32_RR_MAX_H = 0x7a,
+ OPC2_32_RR_MAX_HU = 0x7b,
+ OPC2_32_RR_MIN = 0x18,
+ OPC2_32_RR_MIN_U = 0x19,
+ OPC2_32_RR_MIN_B = 0x58,
+ OPC2_32_RR_MIN_BU = 0x59,
+ OPC2_32_RR_MIN_H = 0x78,
+ OPC2_32_RR_MIN_HU = 0x79,
+ OPC2_32_RR_MOV = 0x1f,
+ OPC2_32_RR_NE = 0x11,
+ OPC2_32_RR_OR_EQ = 0x27,
+ OPC2_32_RR_OR_GE = 0x2b,
+ OPC2_32_RR_OR_GE_U = 0x2c,
+ OPC2_32_RR_OR_LT = 0x29,
+ OPC2_32_RR_OR_LT_U = 0x2a,
+ OPC2_32_RR_OR_NE = 0x28,
+ OPC2_32_RR_SAT_B = 0x5e,
+ OPC2_32_RR_SAT_BU = 0x5f,
+ OPC2_32_RR_SAT_H = 0x7e,
+ OPC2_32_RR_SAT_HU = 0x7f,
+ OPC2_32_RR_SH_EQ = 0x37,
+ OPC2_32_RR_SH_GE = 0x3b,
+ OPC2_32_RR_SH_GE_U = 0x3c,
+ OPC2_32_RR_SH_LT = 0x39,
+ OPC2_32_RR_SH_LT_U = 0x3a,
+ OPC2_32_RR_SH_NE = 0x38,
+ OPC2_32_RR_SUB = 0x08,
+ OPC2_32_RR_SUB_B = 0x48,
+ OPC2_32_RR_SUB_H = 0x68,
+ OPC2_32_RR_SUBC = 0x0d,
+ OPC2_32_RR_SUBS = 0x0a,
+ OPC2_32_RR_SUBS_U = 0x0b,
+ OPC2_32_RR_SUBS_H = 0x6a,
+ OPC2_32_RR_SUBS_HU = 0x6b,
+ OPC2_32_RR_SUBX = 0x0c,
+ OPC2_32_RR_XOR_EQ = 0x2f,
+ OPC2_32_RR_XOR_GE = 0x33,
+ OPC2_32_RR_XOR_GE_U = 0x34,
+ OPC2_32_RR_XOR_LT = 0x31,
+ OPC2_32_RR_XOR_LT_U = 0x32,
+ OPC2_32_RR_XOR_NE = 0x30,
+};
+/* OPCM_32_RR_ADDRESS */
+enum {
+ OPC2_32_RR_ADD_A = 0x01,
+ OPC2_32_RR_ADDSC_A = 0x60,
+ OPC2_32_RR_ADDSC_AT = 0x62,
+ OPC2_32_RR_EQ_A = 0x40,
+ OPC2_32_RR_EQZ = 0x48,
+ OPC2_32_RR_GE_A = 0x43,
+ OPC2_32_RR_LT_A = 0x42,
+ OPC2_32_RR_MOV_A = 0x63,
+ OPC2_32_RR_MOV_AA = 0x00,
+ OPC2_32_RR_MOV_D = 0x4c,
+ OPC2_32_RR_NE_A = 0x41,
+ OPC2_32_RR_NEZ_A = 0x49,
+ OPC2_32_RR_SUB_A = 0x02,
+};
+/* OPCM_32_RR_FLOAT */
+enum {
+ OPC2_32_RR_BMERGE = 0x01,
+ OPC2_32_RR_BSPLIT = 0x09,
+ OPC2_32_RR_DVINIT_B = 0x5a,
+ OPC2_32_RR_DVINIT_BU = 0x4a,
+ OPC2_32_RR_DVINIT_H = 0x3a,
+ OPC2_32_RR_DVINIT_HU = 0x2a,
+ OPC2_32_RR_DVINIT = 0x1a,
+ OPC2_32_RR_DVINIT_U = 0x0a,
+ OPC2_32_RR_PARITY = 0x02,
+ OPC2_32_RR_UNPACK = 0x08,
+ OPC2_32_RR_CRC32 = 0x03,
+ OPC2_32_RR_DIV = 0x20,
+ OPC2_32_RR_DIV_U = 0x21,
+ OPC2_32_RR_MUL_F = 0x04,
+ OPC2_32_RR_DIV_F = 0x05,
+ OPC2_32_RR_FTOI = 0x10,
+ OPC2_32_RR_ITOF = 0x14,
+ OPC2_32_RR_CMP_F = 0x00,
+ OPC2_32_RR_FTOIZ = 0x13,
+ OPC2_32_RR_FTOQ31 = 0x11,
+ OPC2_32_RR_FTOQ31Z = 0x18,
+ OPC2_32_RR_FTOU = 0x12,
+ OPC2_32_RR_FTOUZ = 0x17,
+ OPC2_32_RR_Q31TOF = 0x15,
+ OPC2_32_RR_QSEED_F = 0x19,
+ OPC2_32_RR_UPDFL = 0x0c,
+ OPC2_32_RR_UTOF = 0x16,
+};
+/* OPCM_32_RR_IDIRECT */
+enum {
+ OPC2_32_RR_JI = 0x03,
+ OPC2_32_RR_JLI = 0x02,
+ OPC2_32_RR_CALLI = 0x00,
+ OPC2_32_RR_FCALLI = 0x01,
+};
+/*
+ * RR1 Format
+ */
+/* OPCM_32_RR1_MUL */
+enum {
+ OPC2_32_RR1_MUL_H_32_LL = 0x1a,
+ OPC2_32_RR1_MUL_H_32_LU = 0x19,
+ OPC2_32_RR1_MUL_H_32_UL = 0x18,
+ OPC2_32_RR1_MUL_H_32_UU = 0x1b,
+ OPC2_32_RR1_MULM_H_64_LL = 0x1e,
+ OPC2_32_RR1_MULM_H_64_LU = 0x1d,
+ OPC2_32_RR1_MULM_H_64_UL = 0x1c,
+ OPC2_32_RR1_MULM_H_64_UU = 0x1f,
+ OPC2_32_RR1_MULR_H_16_LL = 0x0e,
+ OPC2_32_RR1_MULR_H_16_LU = 0x0d,
+ OPC2_32_RR1_MULR_H_16_UL = 0x0c,
+ OPC2_32_RR1_MULR_H_16_UU = 0x0f,
+};
+/* OPCM_32_RR1_MULQ */
+enum {
+ OPC2_32_RR1_MUL_Q_32 = 0x02,
+ OPC2_32_RR1_MUL_Q_64 = 0x1b,
+ OPC2_32_RR1_MUL_Q_32_L = 0x01,
+ OPC2_32_RR1_MUL_Q_64_L = 0x19,
+ OPC2_32_RR1_MUL_Q_32_U = 0x00,
+ OPC2_32_RR1_MUL_Q_64_U = 0x18,
+ OPC2_32_RR1_MUL_Q_32_LL = 0x05,
+ OPC2_32_RR1_MUL_Q_32_UU = 0x04,
+ OPC2_32_RR1_MULR_Q_32_L = 0x07,
+ OPC2_32_RR1_MULR_Q_32_U = 0x06,
+};
+/*
+ * RR2 Format
+ */
+/* OPCM_32_RR2_MUL */
+enum {
+ OPC2_32_RR2_MUL_32 = 0x0a,
+ OPC2_32_RR2_MUL_64 = 0x6a,
+ OPC2_32_RR2_MULS_32 = 0x8a,
+ OPC2_32_RR2_MUL_U_64 = 0x68,
+ OPC2_32_RR2_MULS_U_32 = 0x88,
+};
+/*
+ * RRPW Format
+ */
+/* OPCM_32_RRPW_EXTRACT_INSERT */
+enum {
+
+ OPC2_32_RRPW_EXTR = 0x02,
+ OPC2_32_RRPW_EXTR_U = 0x03,
+ OPC2_32_RRPW_IMASK = 0x01,
+ OPC2_32_RRPW_INSERT = 0x00,
+};
+/*
+ * RRR Format
+ */
+/* OPCM_32_RRR_COND_SELECT */
+enum {
+ OPC2_32_RRR_CADD = 0x00,
+ OPC2_32_RRR_CADDN = 0x01,
+ OPC2_32_RRR_CSUB = 0x02,
+ OPC2_32_RRR_CSUBN = 0x03,
+ OPC2_32_RRR_SEL = 0x04,
+ OPC2_32_RRR_SELN = 0x05,
+};
+/* OPCM_32_RRR_FLOAT */
+enum {
+ OPC2_32_RRR_DVADJ = 0x0d,
+ OPC2_32_RRR_DVSTEP = 0x0f,
+ OPC2_32_RRR_DVSTEP_U = 0x0e,
+ OPC2_32_RRR_IXMAX = 0x0a,
+ OPC2_32_RRR_IXMAX_U = 0x0b,
+ OPC2_32_RRR_IXMIN = 0x08,
+ OPC2_32_RRR_IXMIN_U = 0x09,
+ OPC2_32_RRR_PACK = 0x00,
+ OPC2_32_RRR_ADD_F = 0x02,
+ OPC2_32_RRR_SUB_F = 0x03,
+ OPC2_32_RRR_MADD_F = 0x06,
+ OPC2_32_RRR_MSUB_F = 0x07,
+};
+/*
+ * RRR1 Format
+ */
+/* OPCM_32_RRR1_MADD */
+enum {
+ OPC2_32_RRR1_MADD_H_LL = 0x1a,
+ OPC2_32_RRR1_MADD_H_LU = 0x19,
+ OPC2_32_RRR1_MADD_H_UL = 0x18,
+ OPC2_32_RRR1_MADD_H_UU = 0x1b,
+ OPC2_32_RRR1_MADDS_H_LL = 0x3a,
+ OPC2_32_RRR1_MADDS_H_LU = 0x39,
+ OPC2_32_RRR1_MADDS_H_UL = 0x38,
+ OPC2_32_RRR1_MADDS_H_UU = 0x3b,
+ OPC2_32_RRR1_MADDM_H_LL = 0x1e,
+ OPC2_32_RRR1_MADDM_H_LU = 0x1d,
+ OPC2_32_RRR1_MADDM_H_UL = 0x1c,
+ OPC2_32_RRR1_MADDM_H_UU = 0x1f,
+ OPC2_32_RRR1_MADDMS_H_LL = 0x3e,
+ OPC2_32_RRR1_MADDMS_H_LU = 0x3d,
+ OPC2_32_RRR1_MADDMS_H_UL = 0x3c,
+ OPC2_32_RRR1_MADDMS_H_UU = 0x3f,
+ OPC2_32_RRR1_MADDR_H_LL = 0x0e,
+ OPC2_32_RRR1_MADDR_H_LU = 0x0d,
+ OPC2_32_RRR1_MADDR_H_UL = 0x0c,
+ OPC2_32_RRR1_MADDR_H_UU = 0x0f,
+ OPC2_32_RRR1_MADDRS_H_LL = 0x2e,
+ OPC2_32_RRR1_MADDRS_H_LU = 0x2d,
+ OPC2_32_RRR1_MADDRS_H_UL = 0x2c,
+ OPC2_32_RRR1_MADDRS_H_UU = 0x2f,
+};
+/* OPCM_32_RRR1_MADDQ_H */
+enum {
+ OPC2_32_RRR1_MADD_Q_32 = 0x02,
+ OPC2_32_RRR1_MADD_Q_64 = 0x1b,
+ OPC2_32_RRR1_MADD_Q_32_L = 0x01,
+ OPC2_32_RRR1_MADD_Q_64_L = 0x19,
+ OPC2_32_RRR1_MADD_Q_32_U = 0x00,
+ OPC2_32_RRR1_MADD_Q_64_U = 0x18,
+ OPC2_32_RRR1_MADD_Q_32_LL = 0x05,
+ OPC2_32_RRR1_MADD_Q_64_LL = 0x1d,
+ OPC2_32_RRR1_MADD_Q_32_UU = 0x04,
+ OPC2_32_RRR1_MADD_Q_64_UU = 0x1c,
+ OPC2_32_RRR1_MADDS_Q_32 = 0x22,
+ OPC2_32_RRR1_MADDS_Q_64 = 0x3b,
+ OPC2_32_RRR1_MADDS_Q_32_L = 0x21,
+ OPC2_32_RRR1_MADDS_Q_64_L = 0x39,
+ OPC2_32_RRR1_MADDS_Q_32_U = 0x20,
+ OPC2_32_RRR1_MADDS_Q_64_U = 0x38,
+ OPC2_32_RRR1_MADDS_Q_32_LL = 0x25,
+ OPC2_32_RRR1_MADDS_Q_64_LL = 0x3d,
+ OPC2_32_RRR1_MADDS_Q_32_UU = 0x24,
+ OPC2_32_RRR1_MADDS_Q_64_UU = 0x3c,
+ OPC2_32_RRR1_MADDR_H_64_UL = 0x1e,
+ OPC2_32_RRR1_MADDRS_H_64_UL = 0x3e,
+ OPC2_32_RRR1_MADDR_Q_32_LL = 0x07,
+ OPC2_32_RRR1_MADDR_Q_32_UU = 0x06,
+ OPC2_32_RRR1_MADDRS_Q_32_LL = 0x27,
+ OPC2_32_RRR1_MADDRS_Q_32_UU = 0x26,
+};
+/* OPCM_32_RRR1_MADDSU_H */
+enum {
+ OPC2_32_RRR1_MADDSU_H_32_LL = 0x1a,
+ OPC2_32_RRR1_MADDSU_H_32_LU = 0x19,
+ OPC2_32_RRR1_MADDSU_H_32_UL = 0x18,
+ OPC2_32_RRR1_MADDSU_H_32_UU = 0x1b,
+ OPC2_32_RRR1_MADDSUS_H_32_LL = 0x3a,
+ OPC2_32_RRR1_MADDSUS_H_32_LU = 0x39,
+ OPC2_32_RRR1_MADDSUS_H_32_UL = 0x38,
+ OPC2_32_RRR1_MADDSUS_H_32_UU = 0x3b,
+ OPC2_32_RRR1_MADDSUM_H_64_LL = 0x1e,
+ OPC2_32_RRR1_MADDSUM_H_64_LU = 0x1d,
+ OPC2_32_RRR1_MADDSUM_H_64_UL = 0x1c,
+ OPC2_32_RRR1_MADDSUM_H_64_UU = 0x1f,
+ OPC2_32_RRR1_MADDSUMS_H_64_LL = 0x3e,
+ OPC2_32_RRR1_MADDSUMS_H_64_LU = 0x3d,
+ OPC2_32_RRR1_MADDSUMS_H_64_UL = 0x3c,
+ OPC2_32_RRR1_MADDSUMS_H_64_UU = 0x3f,
+ OPC2_32_RRR1_MADDSUR_H_16_LL = 0x0e,
+ OPC2_32_RRR1_MADDSUR_H_16_LU = 0x0d,
+ OPC2_32_RRR1_MADDSUR_H_16_UL = 0x0c,
+ OPC2_32_RRR1_MADDSUR_H_16_UU = 0x0f,
+ OPC2_32_RRR1_MADDSURS_H_16_LL = 0x2e,
+ OPC2_32_RRR1_MADDSURS_H_16_LU = 0x2d,
+ OPC2_32_RRR1_MADDSURS_H_16_UL = 0x2c,
+ OPC2_32_RRR1_MADDSURS_H_16_UU = 0x2f,
+};
+/* OPCM_32_RRR1_MSUB_H */
+enum {
+ OPC2_32_RRR1_MSUB_H_LL = 0x1a,
+ OPC2_32_RRR1_MSUB_H_LU = 0x19,
+ OPC2_32_RRR1_MSUB_H_UL = 0x18,
+ OPC2_32_RRR1_MSUB_H_UU = 0x1b,
+ OPC2_32_RRR1_MSUBS_H_LL = 0x3a,
+ OPC2_32_RRR1_MSUBS_H_LU = 0x39,
+ OPC2_32_RRR1_MSUBS_H_UL = 0x38,
+ OPC2_32_RRR1_MSUBS_H_UU = 0x3b,
+ OPC2_32_RRR1_MSUBM_H_LL = 0x1e,
+ OPC2_32_RRR1_MSUBM_H_LU = 0x1d,
+ OPC2_32_RRR1_MSUBM_H_UL = 0x1c,
+ OPC2_32_RRR1_MSUBM_H_UU = 0x1f,
+ OPC2_32_RRR1_MSUBMS_H_LL = 0x3e,
+ OPC2_32_RRR1_MSUBMS_H_LU = 0x3d,
+ OPC2_32_RRR1_MSUBMS_H_UL = 0x3c,
+ OPC2_32_RRR1_MSUBMS_H_UU = 0x3f,
+ OPC2_32_RRR1_MSUBR_H_LL = 0x0e,
+ OPC2_32_RRR1_MSUBR_H_LU = 0x0d,
+ OPC2_32_RRR1_MSUBR_H_UL = 0x0c,
+ OPC2_32_RRR1_MSUBR_H_UU = 0x0f,
+ OPC2_32_RRR1_MSUBRS_H_LL = 0x2e,
+ OPC2_32_RRR1_MSUBRS_H_LU = 0x2d,
+ OPC2_32_RRR1_MSUBRS_H_UL = 0x2c,
+ OPC2_32_RRR1_MSUBRS_H_UU = 0x2f,
+};
+/* OPCM_32_RRR1_MSUB_Q */
+enum {
+ OPC2_32_RRR1_MSUB_Q_32 = 0x02,
+ OPC2_32_RRR1_MSUB_Q_64 = 0x1b,
+ OPC2_32_RRR1_MSUB_Q_32_L = 0x01,
+ OPC2_32_RRR1_MSUB_Q_64_L = 0x19,
+ OPC2_32_RRR1_MSUB_Q_32_U = 0x00,
+ OPC2_32_RRR1_MSUB_Q_64_U = 0x18,
+ OPC2_32_RRR1_MSUB_Q_32_LL = 0x05,
+ OPC2_32_RRR1_MSUB_Q_64_LL = 0x1d,
+ OPC2_32_RRR1_MSUB_Q_32_UU = 0x04,
+ OPC2_32_RRR1_MSUB_Q_64_UU = 0x1c,
+ OPC2_32_RRR1_MSUBS_Q_32 = 0x22,
+ OPC2_32_RRR1_MSUBS_Q_64 = 0x3b,
+ OPC2_32_RRR1_MSUBS_Q_32_L = 0x21,
+ OPC2_32_RRR1_MSUBS_Q_64_L = 0x39,
+ OPC2_32_RRR1_MSUBS_Q_32_U = 0x20,
+ OPC2_32_RRR1_MSUBS_Q_64_U = 0x38,
+ OPC2_32_RRR1_MSUBS_Q_32_LL = 0x25,
+ OPC2_32_RRR1_MSUBS_Q_64_LL = 0x3d,
+ OPC2_32_RRR1_MSUBS_Q_32_UU = 0x24,
+ OPC2_32_RRR1_MSUBS_Q_64_UU = 0x3c,
+ OPC2_32_RRR1_MSUBR_H_64_UL = 0x1e,
+ OPC2_32_RRR1_MSUBRS_H_64_UL = 0x3e,
+ OPC2_32_RRR1_MSUBR_Q_32_LL = 0x07,
+ OPC2_32_RRR1_MSUBR_Q_32_UU = 0x06,
+ OPC2_32_RRR1_MSUBRS_Q_32_LL = 0x27,
+ OPC2_32_RRR1_MSUBRS_Q_32_UU = 0x26,
+};
+/* OPCM_32_RRR1_MSUBADS_H */
+enum {
+ OPC2_32_RRR1_MSUBAD_H_32_LL = 0x1a,
+ OPC2_32_RRR1_MSUBAD_H_32_LU = 0x19,
+ OPC2_32_RRR1_MSUBAD_H_32_UL = 0x18,
+ OPC2_32_RRR1_MSUBAD_H_32_UU = 0x1b,
+ OPC2_32_RRR1_MSUBADS_H_32_LL = 0x3a,
+ OPC2_32_RRR1_MSUBADS_H_32_LU = 0x39,
+ OPC2_32_RRR1_MSUBADS_H_32_UL = 0x38,
+ OPC2_32_RRR1_MSUBADS_H_32_UU = 0x3b,
+ OPC2_32_RRR1_MSUBADM_H_64_LL = 0x1e,
+ OPC2_32_RRR1_MSUBADM_H_64_LU = 0x1d,
+ OPC2_32_RRR1_MSUBADM_H_64_UL = 0x1c,
+ OPC2_32_RRR1_MSUBADM_H_64_UU = 0x1f,
+ OPC2_32_RRR1_MSUBADMS_H_64_LL = 0x3e,
+ OPC2_32_RRR1_MSUBADMS_H_64_LU = 0x3d,
+ OPC2_32_RRR1_MSUBADMS_H_64_UL = 0x3c,
+ OPC2_32_RRR1_MSUBADMS_H_64_UU = 0x3f,
+ OPC2_32_RRR1_MSUBADR_H_16_LL = 0x0e,
+ OPC2_32_RRR1_MSUBADR_H_16_LU = 0x0d,
+ OPC2_32_RRR1_MSUBADR_H_16_UL = 0x0c,
+ OPC2_32_RRR1_MSUBADR_H_16_UU = 0x0f,
+ OPC2_32_RRR1_MSUBADRS_H_16_LL = 0x2e,
+ OPC2_32_RRR1_MSUBADRS_H_16_LU = 0x2d,
+ OPC2_32_RRR1_MSUBADRS_H_16_UL = 0x2c,
+ OPC2_32_RRR1_MSUBADRS_H_16_UU = 0x2f,
+};
+/*
+ * RRR2 Format
+ */
+/* OPCM_32_RRR2_MADD */
+enum {
+ OPC2_32_RRR2_MADD_32 = 0x0a,
+ OPC2_32_RRR2_MADD_64 = 0x6a,
+ OPC2_32_RRR2_MADDS_32 = 0x8a,
+ OPC2_32_RRR2_MADDS_64 = 0xea,
+ OPC2_32_RRR2_MADD_U_64 = 0x68,
+ OPC2_32_RRR2_MADDS_U_32 = 0x88,
+ OPC2_32_RRR2_MADDS_U_64 = 0xe8,
+};
+/* OPCM_32_RRR2_MSUB */
+enum {
+ OPC2_32_RRR2_MSUB_32 = 0x0a,
+ OPC2_32_RRR2_MSUB_64 = 0x6a,
+ OPC2_32_RRR2_MSUBS_32 = 0x8a,
+ OPC2_32_RRR2_MSUBS_64 = 0xea,
+ OPC2_32_RRR2_MSUB_U_64 = 0x68,
+ OPC2_32_RRR2_MSUBS_U_32 = 0x88,
+ OPC2_32_RRR2_MSUBS_U_64 = 0xe8,
+};
+/*
+ * RRRR Format
+ */
+/* OPCM_32_RRRR_EXTRACT_INSERT */
+enum {
+ OPC2_32_RRRR_DEXTR = 0x04,
+ OPC2_32_RRRR_EXTR = 0x02,
+ OPC2_32_RRRR_EXTR_U = 0x03,
+ OPC2_32_RRRR_INSERT = 0x00,
+};
+/*
+ * RRRW Format
+ */
+/* OPCM_32_RRRW_EXTRACT_INSERT */
+enum {
+ OPC2_32_RRRW_EXTR = 0x02,
+ OPC2_32_RRRW_EXTR_U = 0x03,
+ OPC2_32_RRRW_IMASK = 0x01,
+ OPC2_32_RRRW_INSERT = 0x00,
+};
+/*
+ * SYS Format
+ */
+/* OPCM_32_SYS_INTERRUPTS */
+enum {
+ OPC2_32_SYS_DEBUG = 0x04,
+ OPC2_32_SYS_DISABLE = 0x0d,
+ OPC2_32_SYS_DSYNC = 0x12,
+ OPC2_32_SYS_ENABLE = 0x0c,
+ OPC2_32_SYS_ISYNC = 0x13,
+ OPC2_32_SYS_NOP = 0x00,
+ OPC2_32_SYS_RET = 0x06,
+ OPC2_32_SYS_RFE = 0x07,
+ OPC2_32_SYS_RFM = 0x05,
+ OPC2_32_SYS_RSLCX = 0x09,
+ OPC2_32_SYS_SVLCX = 0x08,
+ OPC2_32_SYS_TRAPSV = 0x15,
+ OPC2_32_SYS_TRAPV = 0x14,
+ OPC2_32_SYS_RESTORE = 0x0e,
+ OPC2_32_SYS_FRET = 0x03,
+};
diff --git a/target/unicore32/Makefile.objs b/target/unicore32/Makefile.objs
new file mode 100644
index 0000000000..6b41b1e9ef
--- /dev/null
+++ b/target/unicore32/Makefile.objs
@@ -0,0 +1,4 @@
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += ucf64_helper.o
+
+obj-$(CONFIG_SOFTMMU) += softmmu.o
diff --git a/target/unicore32/cpu-qom.h b/target/unicore32/cpu-qom.h
new file mode 100644
index 0000000000..bc68e78045
--- /dev/null
+++ b/target/unicore32/cpu-qom.h
@@ -0,0 +1,41 @@
+/*
+ * QEMU UniCore32 CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+#ifndef QEMU_UC32_CPU_QOM_H
+#define QEMU_UC32_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_UNICORE32_CPU "unicore32-cpu"
+
+#define UNICORE32_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(UniCore32CPUClass, (klass), TYPE_UNICORE32_CPU)
+#define UNICORE32_CPU(obj) \
+ OBJECT_CHECK(UniCore32CPU, (obj), TYPE_UNICORE32_CPU)
+#define UNICORE32_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(UniCore32CPUClass, (obj), TYPE_UNICORE32_CPU)
+
+/**
+ * UniCore32CPUClass:
+ * @parent_realize: The parent class' realize handler.
+ *
+ * A UniCore32 CPU model.
+ */
+typedef struct UniCore32CPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+} UniCore32CPUClass;
+
+typedef struct UniCore32CPU UniCore32CPU;
+
+#endif
diff --git a/target/unicore32/cpu.c b/target/unicore32/cpu.c
new file mode 100644
index 0000000000..c169972b59
--- /dev/null
+++ b/target/unicore32/cpu.c
@@ -0,0 +1,204 @@
+/*
+ * QEMU UniCore32 CPU
+ *
+ * Copyright (c) 2010-2012 Guan Xuetao
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Contributions from 2012-04-01 on are considered under GPL version 2,
+ * or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+static void uc32_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+
+ cpu->env.regs[31] = value;
+}
+
+static bool uc32_cpu_has_work(CPUState *cs)
+{
+ return cs->interrupt_request &
+ (CPU_INTERRUPT_HARD | CPU_INTERRUPT_EXITTB);
+}
+
+static inline void set_feature(CPUUniCore32State *env, int feature)
+{
+ env->features |= feature;
+}
+
+/* CPU models */
+
+static ObjectClass *uc32_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_UNICORE32_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc != NULL && (!object_class_dynamic_cast(oc, TYPE_UNICORE32_CPU) ||
+ object_class_is_abstract(oc))) {
+ oc = NULL;
+ }
+ return oc;
+}
+
+typedef struct UniCore32CPUInfo {
+ const char *name;
+ void (*instance_init)(Object *obj);
+} UniCore32CPUInfo;
+
+static void unicore_ii_cpu_initfn(Object *obj)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(obj);
+ CPUUniCore32State *env = &cpu->env;
+
+ env->cp0.c0_cpuid = 0x4d000863;
+ env->cp0.c0_cachetype = 0x0d152152;
+ env->cp0.c1_sys = 0x2000;
+ env->cp0.c2_base = 0x0;
+ env->cp0.c3_faultstatus = 0x0;
+ env->cp0.c4_faultaddr = 0x0;
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = 0;
+
+ set_feature(env, UC32_HWCAP_CMOV);
+ set_feature(env, UC32_HWCAP_UCF64);
+ set_snan_bit_is_one(1, &env->ucf64.fp_status);
+}
+
+static void uc32_any_cpu_initfn(Object *obj)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(obj);
+ CPUUniCore32State *env = &cpu->env;
+
+ env->cp0.c0_cpuid = 0xffffffff;
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = 0;
+
+ set_feature(env, UC32_HWCAP_CMOV);
+ set_feature(env, UC32_HWCAP_UCF64);
+ set_snan_bit_is_one(1, &env->ucf64.fp_status);
+}
+
+static const UniCore32CPUInfo uc32_cpus[] = {
+ { .name = "UniCore-II", .instance_init = unicore_ii_cpu_initfn },
+ { .name = "any", .instance_init = uc32_any_cpu_initfn },
+};
+
+static void uc32_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ UniCore32CPUClass *ucc = UNICORE32_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ qemu_init_vcpu(cs);
+
+ ucc->parent_realize(dev, errp);
+}
+
+static void uc32_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ UniCore32CPU *cpu = UNICORE32_CPU(obj);
+ CPUUniCore32State *env = &cpu->env;
+ static bool inited;
+
+ cs->env_ptr = env;
+
+#ifdef CONFIG_USER_ONLY
+ env->uncached_asr = ASR_MODE_USER;
+ env->regs[31] = 0;
+#else
+ env->uncached_asr = ASR_MODE_PRIV;
+ env->regs[31] = 0x03000000;
+#endif
+
+ tlb_flush(cs, 1);
+
+ if (tcg_enabled() && !inited) {
+ inited = true;
+ uc32_translate_init();
+ }
+}
+
+static const VMStateDescription vmstate_uc32_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+static void uc32_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ UniCore32CPUClass *ucc = UNICORE32_CPU_CLASS(oc);
+
+ ucc->parent_realize = dc->realize;
+ dc->realize = uc32_cpu_realizefn;
+
+ cc->class_by_name = uc32_cpu_class_by_name;
+ cc->has_work = uc32_cpu_has_work;
+ cc->do_interrupt = uc32_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = uc32_cpu_exec_interrupt;
+ cc->dump_state = uc32_cpu_dump_state;
+ cc->set_pc = uc32_cpu_set_pc;
+#ifdef CONFIG_USER_ONLY
+ cc->handle_mmu_fault = uc32_cpu_handle_mmu_fault;
+#else
+ cc->get_phys_page_debug = uc32_cpu_get_phys_page_debug;
+#endif
+ dc->vmsd = &vmstate_uc32_cpu;
+}
+
+static void uc32_register_cpu_type(const UniCore32CPUInfo *info)
+{
+ TypeInfo type_info = {
+ .parent = TYPE_UNICORE32_CPU,
+ .instance_init = info->instance_init,
+ };
+
+ type_info.name = g_strdup_printf("%s-" TYPE_UNICORE32_CPU, info->name);
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+}
+
+static const TypeInfo uc32_cpu_type_info = {
+ .name = TYPE_UNICORE32_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(UniCore32CPU),
+ .instance_init = uc32_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(UniCore32CPUClass),
+ .class_init = uc32_cpu_class_init,
+};
+
+static void uc32_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&uc32_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(uc32_cpus); i++) {
+ uc32_register_cpu_type(&uc32_cpus[i]);
+ }
+}
+
+type_init(uc32_cpu_register_types)
diff --git a/target/unicore32/cpu.h b/target/unicore32/cpu.h
new file mode 100644
index 0000000000..7b5b405e79
--- /dev/null
+++ b/target/unicore32/cpu.h
@@ -0,0 +1,188 @@
+/*
+ * UniCore32 virtual CPU header
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+
+#ifndef UNICORE32_CPU_H
+#define UNICORE32_CPU_H
+
+#define TARGET_LONG_BITS 32
+#define TARGET_PAGE_BITS 12
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define CPUArchState struct CPUUniCore32State
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define NB_MMU_MODES 2
+
+typedef struct CPUUniCore32State {
+ /* Regs for current mode. */
+ uint32_t regs[32];
+ /* Frequently accessed ASR bits are stored separately for efficiently.
+ This contains all the other bits. Use asr_{read,write} to access
+ the whole ASR. */
+ uint32_t uncached_asr;
+ uint32_t bsr;
+
+ /* Banked registers. */
+ uint32_t banked_bsr[6];
+ uint32_t banked_r29[6];
+ uint32_t banked_r30[6];
+
+ /* asr flag cache for faster execution */
+ uint32_t CF; /* 0 or 1 */
+ uint32_t VF; /* V is the bit 31. All other bits are undefined */
+ uint32_t NF; /* N is bit 31. All other bits are undefined. */
+ uint32_t ZF; /* Z set if zero. */
+
+ /* System control coprocessor (cp0) */
+ struct {
+ uint32_t c0_cpuid;
+ uint32_t c0_cachetype;
+ uint32_t c1_sys; /* System control register. */
+ uint32_t c2_base; /* MMU translation table base. */
+ uint32_t c3_faultstatus; /* Fault status registers. */
+ uint32_t c4_faultaddr; /* Fault address registers. */
+ uint32_t c5_cacheop; /* Cache operation registers. */
+ uint32_t c6_tlbop; /* TLB operation registers. */
+ } cp0;
+
+ /* UniCore-F64 coprocessor state. */
+ struct {
+ float64 regs[16];
+ uint32_t xregs[32];
+ float_status fp_status;
+ } ucf64;
+
+ CPU_COMMON
+
+ /* Internal CPU feature flags. */
+ uint32_t features;
+
+} CPUUniCore32State;
+
+/**
+ * UniCore32CPU:
+ * @env: #CPUUniCore32State
+ *
+ * A UniCore32 CPU.
+ */
+struct UniCore32CPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUUniCore32State env;
+};
+
+static inline UniCore32CPU *uc32_env_get_cpu(CPUUniCore32State *env)
+{
+ return container_of(env, UniCore32CPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(uc32_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(UniCore32CPU, env)
+
+void uc32_cpu_do_interrupt(CPUState *cpu);
+bool uc32_cpu_exec_interrupt(CPUState *cpu, int int_req);
+void uc32_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr uc32_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+
+#define ASR_M (0x1f)
+#define ASR_MODE_USER (0x10)
+#define ASR_MODE_INTR (0x12)
+#define ASR_MODE_PRIV (0x13)
+#define ASR_MODE_TRAP (0x17)
+#define ASR_MODE_EXTN (0x1b)
+#define ASR_MODE_SUSR (0x1f)
+#define ASR_I (1 << 7)
+#define ASR_V (1 << 28)
+#define ASR_C (1 << 29)
+#define ASR_Z (1 << 30)
+#define ASR_N (1 << 31)
+#define ASR_NZCV (ASR_N | ASR_Z | ASR_C | ASR_V)
+#define ASR_RESERVED (~(ASR_M | ASR_I | ASR_NZCV))
+
+#define UC32_EXCP_PRIV (1)
+#define UC32_EXCP_ITRAP (2)
+#define UC32_EXCP_DTRAP (3)
+#define UC32_EXCP_INTR (4)
+
+/* Return the current ASR value. */
+target_ulong cpu_asr_read(CPUUniCore32State *env1);
+/* Set the ASR. Note that some bits of mask must be all-set or all-clear. */
+void cpu_asr_write(CPUUniCore32State *env1, target_ulong val, target_ulong mask);
+
+/* UniCore-F64 system registers. */
+#define UC32_UCF64_FPSCR (31)
+#define UCF64_FPSCR_MASK (0x27ffffff)
+#define UCF64_FPSCR_RND_MASK (0x7)
+#define UCF64_FPSCR_RND(r) (((r) >> 0) & UCF64_FPSCR_RND_MASK)
+#define UCF64_FPSCR_TRAPEN_MASK (0x7f)
+#define UCF64_FPSCR_TRAPEN(r) (((r) >> 10) & UCF64_FPSCR_TRAPEN_MASK)
+#define UCF64_FPSCR_FLAG_MASK (0x3ff)
+#define UCF64_FPSCR_FLAG(r) (((r) >> 17) & UCF64_FPSCR_FLAG_MASK)
+#define UCF64_FPSCR_FLAG_ZERO (1 << 17)
+#define UCF64_FPSCR_FLAG_INFINITY (1 << 18)
+#define UCF64_FPSCR_FLAG_INVALID (1 << 19)
+#define UCF64_FPSCR_FLAG_UNDERFLOW (1 << 20)
+#define UCF64_FPSCR_FLAG_OVERFLOW (1 << 21)
+#define UCF64_FPSCR_FLAG_INEXACT (1 << 22)
+#define UCF64_FPSCR_FLAG_HUGEINT (1 << 23)
+#define UCF64_FPSCR_FLAG_DENORMAL (1 << 24)
+#define UCF64_FPSCR_FLAG_UNIMP (1 << 25)
+#define UCF64_FPSCR_FLAG_DIVZERO (1 << 26)
+
+#define UC32_HWCAP_CMOV 4 /* 1 << 2 */
+#define UC32_HWCAP_UCF64 8 /* 1 << 3 */
+
+#define cpu_signal_handler uc32_cpu_signal_handler
+
+int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _kernel
+#define MMU_MODE1_SUFFIX _user
+#define MMU_USER_IDX 1
+static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
+{
+ return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
+}
+
+#include "exec/cpu-all.h"
+
+UniCore32CPU *uc32_cpu_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(uc32_cpu_init(cpu_model))
+
+static inline void cpu_get_tb_cpu_state(CPUUniCore32State *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ *pc = env->regs[31];
+ *cs_base = 0;
+ *flags = 0;
+ if ((env->uncached_asr & ASR_M) != ASR_MODE_USER) {
+ *flags |= (1 << 6);
+ }
+}
+
+int uc32_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
+ int mmu_idx);
+void uc32_translate_init(void);
+void switch_mode(CPUUniCore32State *, int);
+
+#endif /* UNICORE32_CPU_H */
diff --git a/target/unicore32/helper.c b/target/unicore32/helper.c
new file mode 100644
index 0000000000..d603bde237
--- /dev/null
+++ b/target/unicore32/helper.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Contributions from 2012-04-01 on are considered under GPL version 2,
+ * or (at your option) any later version.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#ifndef CONFIG_USER_ONLY
+#include "ui/console.h"
+#endif
+
+#undef DEBUG_UC32
+
+#ifdef DEBUG_UC32
+#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__)
+#else
+#define DPRINTF(fmt, ...) do {} while (0)
+#endif
+
+UniCore32CPU *uc32_cpu_init(const char *cpu_model)
+{
+ return UNICORE32_CPU(cpu_generic_init(TYPE_UNICORE32_CPU, cpu_model));
+}
+
+uint32_t HELPER(clo)(uint32_t x)
+{
+ return clo32(x);
+}
+
+uint32_t HELPER(clz)(uint32_t x)
+{
+ return clz32(x);
+}
+
+#ifndef CONFIG_USER_ONLY
+void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg,
+ uint32_t cop)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ /*
+ * movc pp.nn, rn, #imm9
+ * rn: UCOP_REG_D
+ * nn: UCOP_REG_N
+ * 1: sys control reg.
+ * 2: page table base reg.
+ * 3: data fault status reg.
+ * 4: insn fault status reg.
+ * 5: cache op. reg.
+ * 6: tlb op. reg.
+ * imm9: split UCOP_IMM10 with bit5 is 0
+ */
+ switch (creg) {
+ case 1:
+ if (cop != 0) {
+ goto unrecognized;
+ }
+ env->cp0.c1_sys = val;
+ break;
+ case 2:
+ if (cop != 0) {
+ goto unrecognized;
+ }
+ env->cp0.c2_base = val;
+ break;
+ case 3:
+ if (cop != 0) {
+ goto unrecognized;
+ }
+ env->cp0.c3_faultstatus = val;
+ break;
+ case 4:
+ if (cop != 0) {
+ goto unrecognized;
+ }
+ env->cp0.c4_faultaddr = val;
+ break;
+ case 5:
+ switch (cop) {
+ case 28:
+ DPRINTF("Invalidate Entire I&D cache\n");
+ return;
+ case 20:
+ DPRINTF("Invalidate Entire Icache\n");
+ return;
+ case 12:
+ DPRINTF("Invalidate Entire Dcache\n");
+ return;
+ case 10:
+ DPRINTF("Clean Entire Dcache\n");
+ return;
+ case 14:
+ DPRINTF("Flush Entire Dcache\n");
+ return;
+ case 13:
+ DPRINTF("Invalidate Dcache line\n");
+ return;
+ case 11:
+ DPRINTF("Clean Dcache line\n");
+ return;
+ case 15:
+ DPRINTF("Flush Dcache line\n");
+ return;
+ }
+ break;
+ case 6:
+ if ((cop <= 6) && (cop >= 2)) {
+ /* invalid all tlb */
+ tlb_flush(CPU(cpu), 1);
+ return;
+ }
+ break;
+ default:
+ goto unrecognized;
+ }
+ return;
+unrecognized:
+ DPRINTF("Wrong register (%d) or wrong operation (%d) in cp0_set!\n",
+ creg, cop);
+}
+
+uint32_t helper_cp0_get(CPUUniCore32State *env, uint32_t creg, uint32_t cop)
+{
+ /*
+ * movc rd, pp.nn, #imm9
+ * rd: UCOP_REG_D
+ * nn: UCOP_REG_N
+ * 0: cpuid and cachetype
+ * 1: sys control reg.
+ * 2: page table base reg.
+ * 3: data fault status reg.
+ * 4: insn fault status reg.
+ * imm9: split UCOP_IMM10 with bit5 is 0
+ */
+ switch (creg) {
+ case 0:
+ switch (cop) {
+ case 0:
+ return env->cp0.c0_cpuid;
+ case 1:
+ return env->cp0.c0_cachetype;
+ }
+ break;
+ case 1:
+ if (cop == 0) {
+ return env->cp0.c1_sys;
+ }
+ break;
+ case 2:
+ if (cop == 0) {
+ return env->cp0.c2_base;
+ }
+ break;
+ case 3:
+ if (cop == 0) {
+ return env->cp0.c3_faultstatus;
+ }
+ break;
+ case 4:
+ if (cop == 0) {
+ return env->cp0.c4_faultaddr;
+ }
+ break;
+ }
+ DPRINTF("Wrong register (%d) or wrong operation (%d) in cp0_set!\n",
+ creg, cop);
+ return 0;
+}
+
+#ifdef CONFIG_CURSES
+/*
+ * FIXME:
+ * 1. curses windows will be blank when switching back
+ * 2. backspace is not handled yet
+ */
+static void putc_on_screen(unsigned char ch)
+{
+ static WINDOW *localwin;
+ static int init;
+
+ if (!init) {
+ /* Assume 80 * 30 screen to minimize the implementation */
+ localwin = newwin(30, 80, 0, 0);
+ scrollok(localwin, TRUE);
+ init = TRUE;
+ }
+
+ if (isprint(ch)) {
+ wprintw(localwin, "%c", ch);
+ } else {
+ switch (ch) {
+ case '\n':
+ wprintw(localwin, "%c", ch);
+ break;
+ case '\r':
+ /* If '\r' is put before '\n', the curses window will destroy the
+ * last print line. And meanwhile, '\n' implifies '\r' inside. */
+ break;
+ default: /* Not handled, so just print it hex code */
+ wprintw(localwin, "-- 0x%x --", ch);
+ }
+ }
+
+ wrefresh(localwin);
+}
+#else
+#define putc_on_screen(c) do { } while (0)
+#endif
+
+void helper_cp1_putc(target_ulong x)
+{
+ putc_on_screen((unsigned char)x); /* Output to screen */
+ DPRINTF("%c", x); /* Output to stdout */
+}
+#endif
+
+#ifdef CONFIG_USER_ONLY
+void switch_mode(CPUUniCore32State *env, int mode)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ if (mode != ASR_MODE_USER) {
+ cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
+ }
+}
+
+void uc32_cpu_do_interrupt(CPUState *cs)
+{
+ cpu_abort(cs, "NO interrupt in user mode\n");
+}
+
+int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int access_type, int mmu_idx)
+{
+ cpu_abort(cs, "NO mmu fault in user mode\n");
+ return 1;
+}
+#endif
+
+bool uc32_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+ CPUUniCore32State *env = &cpu->env;
+
+ if (!(env->uncached_asr & ASR_I)) {
+ cs->exception_index = UC32_EXCP_INTR;
+ uc32_cpu_do_interrupt(cs);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/target/unicore32/helper.h b/target/unicore32/helper.h
new file mode 100644
index 0000000000..941813749d
--- /dev/null
+++ b/target/unicore32/helper.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+
+#ifndef CONFIG_USER_ONLY
+DEF_HELPER_4(cp0_set, void, env, i32, i32, i32)
+DEF_HELPER_3(cp0_get, i32, env, i32, i32)
+DEF_HELPER_1(cp1_putc, void, i32)
+#endif
+
+DEF_HELPER_1(clz, i32, i32)
+DEF_HELPER_1(clo, i32, i32)
+
+DEF_HELPER_2(exception, void, env, i32)
+
+DEF_HELPER_3(asr_write, void, env, i32, i32)
+DEF_HELPER_1(asr_read, i32, env)
+
+DEF_HELPER_2(get_user_reg, i32, env, i32)
+DEF_HELPER_3(set_user_reg, void, env, i32, i32)
+
+DEF_HELPER_3(add_cc, i32, env, i32, i32)
+DEF_HELPER_3(adc_cc, i32, env, i32, i32)
+DEF_HELPER_3(sub_cc, i32, env, i32, i32)
+DEF_HELPER_3(sbc_cc, i32, env, i32, i32)
+
+DEF_HELPER_2(shl, i32, i32, i32)
+DEF_HELPER_2(shr, i32, i32, i32)
+DEF_HELPER_2(sar, i32, i32, i32)
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
+
+DEF_HELPER_1(ucf64_get_fpscr, i32, env)
+DEF_HELPER_2(ucf64_set_fpscr, void, env, i32)
+
+DEF_HELPER_3(ucf64_adds, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_addd, f64, f64, f64, env)
+DEF_HELPER_3(ucf64_subs, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_subd, f64, f64, f64, env)
+DEF_HELPER_3(ucf64_muls, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_muld, f64, f64, f64, env)
+DEF_HELPER_3(ucf64_divs, f32, f32, f32, env)
+DEF_HELPER_3(ucf64_divd, f64, f64, f64, env)
+DEF_HELPER_1(ucf64_negs, f32, f32)
+DEF_HELPER_1(ucf64_negd, f64, f64)
+DEF_HELPER_1(ucf64_abss, f32, f32)
+DEF_HELPER_1(ucf64_absd, f64, f64)
+DEF_HELPER_4(ucf64_cmps, void, f32, f32, i32, env)
+DEF_HELPER_4(ucf64_cmpd, void, f64, f64, i32, env)
+
+DEF_HELPER_2(ucf64_sf2df, f64, f32, env)
+DEF_HELPER_2(ucf64_df2sf, f32, f64, env)
+
+DEF_HELPER_2(ucf64_si2sf, f32, f32, env)
+DEF_HELPER_2(ucf64_si2df, f64, f32, env)
+
+DEF_HELPER_2(ucf64_sf2si, f32, f32, env)
+DEF_HELPER_2(ucf64_df2si, f32, f64, env)
diff --git a/target/unicore32/op_helper.c b/target/unicore32/op_helper.c
new file mode 100644
index 0000000000..0872c29faa
--- /dev/null
+++ b/target/unicore32/op_helper.c
@@ -0,0 +1,261 @@
+/*
+ * UniCore32 helper routines
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+
+#define SIGNBIT (uint32_t)0x80000000
+#define SIGNBIT64 ((uint64_t)1 << 63)
+
+void HELPER(exception)(CPUUniCore32State *env, uint32_t excp)
+{
+ CPUState *cs = CPU(uc32_env_get_cpu(env));
+
+ cs->exception_index = excp;
+ cpu_loop_exit(cs);
+}
+
+static target_ulong asr_read(CPUUniCore32State *env)
+{
+ int ZF;
+ ZF = (env->ZF == 0);
+ return env->uncached_asr | (env->NF & 0x80000000) | (ZF << 30) |
+ (env->CF << 29) | ((env->VF & 0x80000000) >> 3);
+}
+
+target_ulong cpu_asr_read(CPUUniCore32State *env)
+{
+ return asr_read(env);
+}
+
+target_ulong HELPER(asr_read)(CPUUniCore32State *env)
+{
+ return asr_read(env);
+}
+
+static void asr_write(CPUUniCore32State *env, target_ulong val,
+ target_ulong mask)
+{
+ if (mask & ASR_NZCV) {
+ env->ZF = (~val) & ASR_Z;
+ env->NF = val;
+ env->CF = (val >> 29) & 1;
+ env->VF = (val << 3) & 0x80000000;
+ }
+
+ if ((env->uncached_asr ^ val) & mask & ASR_M) {
+ switch_mode(env, val & ASR_M);
+ }
+ mask &= ~ASR_NZCV;
+ env->uncached_asr = (env->uncached_asr & ~mask) | (val & mask);
+}
+
+void cpu_asr_write(CPUUniCore32State *env, target_ulong val, target_ulong mask)
+{
+ asr_write(env, val, mask);
+}
+
+void HELPER(asr_write)(CPUUniCore32State *env, target_ulong val,
+ target_ulong mask)
+{
+ asr_write(env, val, mask);
+}
+
+/* Access to user mode registers from privileged modes. */
+uint32_t HELPER(get_user_reg)(CPUUniCore32State *env, uint32_t regno)
+{
+ uint32_t val;
+
+ if (regno == 29) {
+ val = env->banked_r29[0];
+ } else if (regno == 30) {
+ val = env->banked_r30[0];
+ } else {
+ val = env->regs[regno];
+ }
+ return val;
+}
+
+void HELPER(set_user_reg)(CPUUniCore32State *env, uint32_t regno, uint32_t val)
+{
+ if (regno == 29) {
+ env->banked_r29[0] = val;
+ } else if (regno == 30) {
+ env->banked_r30[0] = val;
+ } else {
+ env->regs[regno] = val;
+ }
+}
+
+/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
+ The only way to do that in TCG is a conditional branch, which clobbers
+ all our temporaries. For now implement these as helper functions. */
+
+uint32_t HELPER(add_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ result = a + b;
+ env->NF = env->ZF = result;
+ env->CF = result < a;
+ env->VF = (a ^ b ^ -1) & (a ^ result);
+ return result;
+}
+
+uint32_t HELPER(adc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ if (!env->CF) {
+ result = a + b;
+ env->CF = result < a;
+ } else {
+ result = a + b + 1;
+ env->CF = result <= a;
+ }
+ env->VF = (a ^ b ^ -1) & (a ^ result);
+ env->NF = env->ZF = result;
+ return result;
+}
+
+uint32_t HELPER(sub_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ result = a - b;
+ env->NF = env->ZF = result;
+ env->CF = a >= b;
+ env->VF = (a ^ b) & (a ^ result);
+ return result;
+}
+
+uint32_t HELPER(sbc_cc)(CPUUniCore32State *env, uint32_t a, uint32_t b)
+{
+ uint32_t result;
+ if (!env->CF) {
+ result = a - b - 1;
+ env->CF = a > b;
+ } else {
+ result = a - b;
+ env->CF = a >= b;
+ }
+ env->VF = (a ^ b) & (a ^ result);
+ env->NF = env->ZF = result;
+ return result;
+}
+
+/* Similarly for variable shift instructions. */
+
+uint32_t HELPER(shl)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ return 0;
+ }
+ return x << shift;
+}
+
+uint32_t HELPER(shr)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ return 0;
+ }
+ return (uint32_t)x >> shift;
+}
+
+uint32_t HELPER(sar)(uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ shift = 31;
+ }
+ return (int32_t)x >> shift;
+}
+
+uint32_t HELPER(shl_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32) {
+ env->CF = x & 1;
+ } else {
+ env->CF = 0;
+ }
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (32 - shift)) & 1;
+ return x << shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(shr_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ if (shift == 32) {
+ env->CF = (x >> 31) & 1;
+ } else {
+ env->CF = 0;
+ }
+ return 0;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(sar_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
+{
+ int shift = i & 0xff;
+ if (shift >= 32) {
+ env->CF = (x >> 31) & 1;
+ return (int32_t)x >> 31;
+ } else if (shift != 0) {
+ env->CF = (x >> (shift - 1)) & 1;
+ return (int32_t)x >> shift;
+ }
+ return x;
+}
+
+uint32_t HELPER(ror_cc)(CPUUniCore32State *env, uint32_t x, uint32_t i)
+{
+ int shift1, shift;
+ shift1 = i & 0xff;
+ shift = shift1 & 0x1f;
+ if (shift == 0) {
+ if (shift1 != 0) {
+ env->CF = (x >> 31) & 1;
+ }
+ return x;
+ } else {
+ env->CF = (x >> (shift - 1)) & 1;
+ return ((uint32_t)x >> shift) | (x << (32 - shift));
+ }
+}
+
+#ifndef CONFIG_USER_ONLY
+void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ int ret;
+
+ ret = uc32_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx);
+ if (unlikely(ret)) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+ cpu_loop_exit(cs);
+ }
+}
+#endif
diff --git a/target/unicore32/softmmu.c b/target/unicore32/softmmu.c
new file mode 100644
index 0000000000..e7152e72e0
--- /dev/null
+++ b/target/unicore32/softmmu.c
@@ -0,0 +1,278 @@
+/*
+ * Softmmu related functions
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or any later version.
+ * See the COPYING file in the top-level directory.
+ */
+#ifdef CONFIG_USER_ONLY
+#error This file only exist under softmmu circumstance
+#endif
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+
+#undef DEBUG_UC32
+
+#ifdef DEBUG_UC32
+#define DPRINTF(fmt, ...) printf("%s: " fmt , __func__, ## __VA_ARGS__)
+#else
+#define DPRINTF(fmt, ...) do {} while (0)
+#endif
+
+#define SUPERPAGE_SIZE (1 << 22)
+#define UC32_PAGETABLE_READ (1 << 8)
+#define UC32_PAGETABLE_WRITE (1 << 7)
+#define UC32_PAGETABLE_EXEC (1 << 6)
+#define UC32_PAGETABLE_EXIST (1 << 2)
+#define PAGETABLE_TYPE(x) ((x) & 3)
+
+
+/* Map CPU modes onto saved register banks. */
+static inline int bank_number(CPUUniCore32State *env, int mode)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ switch (mode) {
+ case ASR_MODE_USER:
+ case ASR_MODE_SUSR:
+ return 0;
+ case ASR_MODE_PRIV:
+ return 1;
+ case ASR_MODE_TRAP:
+ return 2;
+ case ASR_MODE_EXTN:
+ return 3;
+ case ASR_MODE_INTR:
+ return 4;
+ }
+ cpu_abort(CPU(cpu), "Bad mode %x\n", mode);
+ return -1;
+}
+
+void switch_mode(CPUUniCore32State *env, int mode)
+{
+ int old_mode;
+ int i;
+
+ old_mode = env->uncached_asr & ASR_M;
+ if (mode == old_mode) {
+ return;
+ }
+
+ i = bank_number(env, old_mode);
+ env->banked_r29[i] = env->regs[29];
+ env->banked_r30[i] = env->regs[30];
+ env->banked_bsr[i] = env->bsr;
+
+ i = bank_number(env, mode);
+ env->regs[29] = env->banked_r29[i];
+ env->regs[30] = env->banked_r30[i];
+ env->bsr = env->banked_bsr[i];
+}
+
+/* Handle a CPU exception. */
+void uc32_cpu_do_interrupt(CPUState *cs)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+ CPUUniCore32State *env = &cpu->env;
+ uint32_t addr;
+ int new_mode;
+
+ switch (cs->exception_index) {
+ case UC32_EXCP_PRIV:
+ new_mode = ASR_MODE_PRIV;
+ addr = 0x08;
+ break;
+ case UC32_EXCP_ITRAP:
+ DPRINTF("itrap happened at %x\n", env->regs[31]);
+ new_mode = ASR_MODE_TRAP;
+ addr = 0x0c;
+ break;
+ case UC32_EXCP_DTRAP:
+ DPRINTF("dtrap happened at %x\n", env->regs[31]);
+ new_mode = ASR_MODE_TRAP;
+ addr = 0x10;
+ break;
+ case UC32_EXCP_INTR:
+ new_mode = ASR_MODE_INTR;
+ addr = 0x18;
+ break;
+ default:
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
+ return;
+ }
+ /* High vectors. */
+ if (env->cp0.c1_sys & (1 << 13)) {
+ addr += 0xffff0000;
+ }
+
+ switch_mode(env, new_mode);
+ env->bsr = cpu_asr_read(env);
+ env->uncached_asr = (env->uncached_asr & ~ASR_M) | new_mode;
+ env->uncached_asr |= ASR_I;
+ /* The PC already points to the proper instruction. */
+ env->regs[30] = env->regs[31];
+ env->regs[31] = addr;
+ cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+}
+
+static int get_phys_addr_ucv2(CPUUniCore32State *env, uint32_t address,
+ int access_type, int is_user, uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ int code;
+ uint32_t table;
+ uint32_t desc;
+ uint32_t phys_addr;
+
+ /* Pagetable walk. */
+ /* Lookup l1 descriptor. */
+ table = env->cp0.c2_base & 0xfffff000;
+ table |= (address >> 20) & 0xffc;
+ desc = ldl_phys(cs->as, table);
+ code = 0;
+ switch (PAGETABLE_TYPE(desc)) {
+ case 3:
+ /* Superpage */
+ if (!(desc & UC32_PAGETABLE_EXIST)) {
+ code = 0x0b; /* superpage miss */
+ goto do_fault;
+ }
+ phys_addr = (desc & 0xffc00000) | (address & 0x003fffff);
+ *page_size = SUPERPAGE_SIZE;
+ break;
+ case 0:
+ /* Lookup l2 entry. */
+ if (is_user) {
+ DPRINTF("PGD address %x, desc %x\n", table, desc);
+ }
+ if (!(desc & UC32_PAGETABLE_EXIST)) {
+ code = 0x05; /* second pagetable miss */
+ goto do_fault;
+ }
+ table = (desc & 0xfffff000) | ((address >> 10) & 0xffc);
+ desc = ldl_phys(cs->as, table);
+ /* 4k page. */
+ if (is_user) {
+ DPRINTF("PTE address %x, desc %x\n", table, desc);
+ }
+ if (!(desc & UC32_PAGETABLE_EXIST)) {
+ code = 0x08; /* page miss */
+ goto do_fault;
+ }
+ switch (PAGETABLE_TYPE(desc)) {
+ case 0:
+ phys_addr = (desc & 0xfffff000) | (address & 0xfff);
+ *page_size = TARGET_PAGE_SIZE;
+ break;
+ default:
+ cpu_abort(CPU(cpu), "wrong page type!");
+ }
+ break;
+ default:
+ cpu_abort(CPU(cpu), "wrong page type!");
+ }
+
+ *phys_ptr = phys_addr;
+ *prot = 0;
+ /* Check access permissions. */
+ if (desc & UC32_PAGETABLE_READ) {
+ *prot |= PAGE_READ;
+ } else {
+ if (is_user && (access_type == 0)) {
+ code = 0x11; /* access unreadable area */
+ goto do_fault;
+ }
+ }
+
+ if (desc & UC32_PAGETABLE_WRITE) {
+ *prot |= PAGE_WRITE;
+ } else {
+ if (is_user && (access_type == 1)) {
+ code = 0x12; /* access unwritable area */
+ goto do_fault;
+ }
+ }
+
+ if (desc & UC32_PAGETABLE_EXEC) {
+ *prot |= PAGE_EXEC;
+ } else {
+ if (is_user && (access_type == 2)) {
+ code = 0x13; /* access unexecutable area */
+ goto do_fault;
+ }
+ }
+
+do_fault:
+ return code;
+}
+
+int uc32_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int access_type, int mmu_idx)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+ CPUUniCore32State *env = &cpu->env;
+ uint32_t phys_addr;
+ target_ulong page_size;
+ int prot;
+ int ret, is_user;
+
+ ret = 1;
+ is_user = mmu_idx == MMU_USER_IDX;
+
+ if ((env->cp0.c1_sys & 1) == 0) {
+ /* MMU disabled. */
+ phys_addr = address;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ page_size = TARGET_PAGE_SIZE;
+ ret = 0;
+ } else {
+ if ((address & (1 << 31)) || (is_user)) {
+ ret = get_phys_addr_ucv2(env, address, access_type, is_user,
+ &phys_addr, &prot, &page_size);
+ if (is_user) {
+ DPRINTF("user space access: ret %x, address %" VADDR_PRIx ", "
+ "access_type %x, phys_addr %x, prot %x\n",
+ ret, address, access_type, phys_addr, prot);
+ }
+ } else {
+ /*IO memory */
+ phys_addr = address | (1 << 31);
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ page_size = TARGET_PAGE_SIZE;
+ ret = 0;
+ }
+ }
+
+ if (ret == 0) {
+ /* Map a single page. */
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
+ return 0;
+ }
+
+ env->cp0.c3_faultstatus = ret;
+ env->cp0.c4_faultaddr = address;
+ if (access_type == 2) {
+ cs->exception_index = UC32_EXCP_ITRAP;
+ } else {
+ cs->exception_index = UC32_EXCP_DTRAP;
+ }
+ return ret;
+}
+
+hwaddr uc32_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+
+ cpu_abort(CPU(cpu), "%s not supported yet\n", __func__);
+ return addr;
+}
diff --git a/target/unicore32/translate.c b/target/unicore32/translate.c
new file mode 100644
index 0000000000..514d460408
--- /dev/null
+++ b/target/unicore32/translate.c
@@ -0,0 +1,2111 @@
+/*
+ * UniCore32 translation
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or (at your option) any
+ * later version. See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "disas/disas.h"
+#include "exec/exec-all.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "exec/cpu_ldst.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+/* internal defines */
+typedef struct DisasContext {
+ target_ulong pc;
+ int is_jmp;
+ /* Nonzero if this instruction has been conditionally skipped. */
+ int condjmp;
+ /* The label that will be jumped to when the instruction is skipped. */
+ TCGLabel *condlabel;
+ struct TranslationBlock *tb;
+ int singlestep_enabled;
+#ifndef CONFIG_USER_ONLY
+ int user;
+#endif
+} DisasContext;
+
+#ifndef CONFIG_USER_ONLY
+#define IS_USER(s) (s->user)
+#else
+#define IS_USER(s) 1
+#endif
+
+/* These instructions trap after executing, so defer them until after the
+ conditional executions state has been updated. */
+#define DISAS_SYSCALL 5
+
+static TCGv_env cpu_env;
+static TCGv_i32 cpu_R[32];
+
+/* FIXME: These should be removed. */
+static TCGv cpu_F0s, cpu_F1s;
+static TCGv_i64 cpu_F0d, cpu_F1d;
+
+#include "exec/gen-icount.h"
+
+static const char *regnames[] = {
+ "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
+ "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
+
+/* initialize TCG globals. */
+void uc32_translate_init(void)
+{
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+
+ for (i = 0; i < 32; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUUniCore32State, regs[i]), regnames[i]);
+ }
+}
+
+static int num_temps;
+
+/* Allocate a temporary variable. */
+static TCGv_i32 new_tmp(void)
+{
+ num_temps++;
+ return tcg_temp_new_i32();
+}
+
+/* Release a temporary variable. */
+static void dead_tmp(TCGv tmp)
+{
+ tcg_temp_free(tmp);
+ num_temps--;
+}
+
+static inline TCGv load_cpu_offset(int offset)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_ld_i32(tmp, cpu_env, offset);
+ return tmp;
+}
+
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
+
+static inline void store_cpu_offset(TCGv var, int offset)
+{
+ tcg_gen_st_i32(var, cpu_env, offset);
+ dead_tmp(var);
+}
+
+#define store_cpu_field(var, name) \
+ store_cpu_offset(var, offsetof(CPUUniCore32State, name))
+
+/* Set a variable to the value of a CPU register. */
+static void load_reg_var(DisasContext *s, TCGv var, int reg)
+{
+ if (reg == 31) {
+ uint32_t addr;
+ /* normaly, since we updated PC */
+ addr = (long)s->pc;
+ tcg_gen_movi_i32(var, addr);
+ } else {
+ tcg_gen_mov_i32(var, cpu_R[reg]);
+ }
+}
+
+/* Create a new temporary and set it to the value of a CPU register. */
+static inline TCGv load_reg(DisasContext *s, int reg)
+{
+ TCGv tmp = new_tmp();
+ load_reg_var(s, tmp, reg);
+ return tmp;
+}
+
+/* Set a CPU register. The source must be a temporary and will be
+ marked as dead. */
+static void store_reg(DisasContext *s, int reg, TCGv var)
+{
+ if (reg == 31) {
+ tcg_gen_andi_i32(var, var, ~3);
+ s->is_jmp = DISAS_JUMP;
+ }
+ tcg_gen_mov_i32(cpu_R[reg], var);
+ dead_tmp(var);
+}
+
+/* Value extensions. */
+#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
+#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
+#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
+#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
+
+#define UCOP_REG_M (((insn) >> 0) & 0x1f)
+#define UCOP_REG_N (((insn) >> 19) & 0x1f)
+#define UCOP_REG_D (((insn) >> 14) & 0x1f)
+#define UCOP_REG_S (((insn) >> 9) & 0x1f)
+#define UCOP_REG_LO (((insn) >> 14) & 0x1f)
+#define UCOP_REG_HI (((insn) >> 9) & 0x1f)
+#define UCOP_SH_OP (((insn) >> 6) & 0x03)
+#define UCOP_SH_IM (((insn) >> 9) & 0x1f)
+#define UCOP_OPCODES (((insn) >> 25) & 0x0f)
+#define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
+#define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
+#define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
+#define UCOP_COND (((insn) >> 25) & 0x0f)
+#define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
+#define UCOP_CPNUM (((insn) >> 10) & 0x0f)
+#define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
+#define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
+#define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
+
+#define UCOP_SET(i) ((insn) & (1 << (i)))
+#define UCOP_SET_P UCOP_SET(28)
+#define UCOP_SET_U UCOP_SET(27)
+#define UCOP_SET_B UCOP_SET(26)
+#define UCOP_SET_W UCOP_SET(25)
+#define UCOP_SET_L UCOP_SET(24)
+#define UCOP_SET_S UCOP_SET(24)
+
+#define ILLEGAL cpu_abort(CPU(cpu), \
+ "Illegal UniCore32 instruction %x at line %d!", \
+ insn, __LINE__)
+
+#ifndef CONFIG_USER_ONLY
+static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
+ uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ TCGv tmp, tmp2, tmp3;
+ if ((insn & 0xfe000000) == 0xe0000000) {
+ tmp2 = new_tmp();
+ tmp3 = new_tmp();
+ tcg_gen_movi_i32(tmp2, UCOP_REG_N);
+ tcg_gen_movi_i32(tmp3, UCOP_IMM10);
+ if (UCOP_SET_L) {
+ tmp = new_tmp();
+ gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
+ store_reg(s, UCOP_REG_D, tmp);
+ } else {
+ tmp = load_reg(s, UCOP_REG_D);
+ gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
+ dead_tmp(tmp);
+ }
+ dead_tmp(tmp2);
+ dead_tmp(tmp3);
+ return;
+ }
+ ILLEGAL;
+}
+
+static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
+ uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ TCGv tmp;
+
+ if ((insn & 0xff003fff) == 0xe1000400) {
+ /*
+ * movc rd, pp.nn, #imm9
+ * rd: UCOP_REG_D
+ * nn: UCOP_REG_N (must be 0)
+ * imm9: 0
+ */
+ if (UCOP_REG_N == 0) {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, 0);
+ store_reg(s, UCOP_REG_D, tmp);
+ return;
+ } else {
+ ILLEGAL;
+ }
+ }
+ if ((insn & 0xff003fff) == 0xe0000401) {
+ /*
+ * movc pp.nn, rn, #imm9
+ * rn: UCOP_REG_D
+ * nn: UCOP_REG_N (must be 1)
+ * imm9: 1
+ */
+ if (UCOP_REG_N == 1) {
+ tmp = load_reg(s, UCOP_REG_D);
+ gen_helper_cp1_putc(tmp);
+ dead_tmp(tmp);
+ return;
+ } else {
+ ILLEGAL;
+ }
+ }
+ ILLEGAL;
+}
+#endif
+
+static inline void gen_set_asr(TCGv var, uint32_t mask)
+{
+ TCGv tmp_mask = tcg_const_i32(mask);
+ gen_helper_asr_write(cpu_env, var, tmp_mask);
+ tcg_temp_free_i32(tmp_mask);
+}
+/* Set NZCV flags from the high 4 bits of var. */
+#define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
+
+static void gen_exception(int excp)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, excp);
+ gen_helper_exception(cpu_env, tmp);
+ dead_tmp(tmp);
+}
+
+#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
+
+/* Set CF to the top bit of var. */
+static void gen_set_CF_bit31(TCGv var)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_shri_i32(tmp, var, 31);
+ gen_set_CF(tmp);
+ dead_tmp(tmp);
+}
+
+/* Set N and Z flags from var. */
+static inline void gen_logic_CC(TCGv var)
+{
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
+}
+
+/* dest = T0 + T1 + CF. */
+static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_add_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ dead_tmp(tmp);
+}
+
+/* dest = T0 - T1 + CF - 1. */
+static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
+{
+ TCGv tmp;
+ tcg_gen_sub_i32(dest, t0, t1);
+ tmp = load_cpu_field(CF);
+ tcg_gen_add_i32(dest, dest, tmp);
+ tcg_gen_subi_i32(dest, dest, 1);
+ dead_tmp(tmp);
+}
+
+static void shifter_out_im(TCGv var, int shift)
+{
+ TCGv tmp = new_tmp();
+ if (shift == 0) {
+ tcg_gen_andi_i32(tmp, var, 1);
+ } else {
+ tcg_gen_shri_i32(tmp, var, shift);
+ if (shift != 31) {
+ tcg_gen_andi_i32(tmp, tmp, 1);
+ }
+ }
+ gen_set_CF(tmp);
+ dead_tmp(tmp);
+}
+
+/* Shift by immediate. Includes special handling for shift == 0. */
+static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
+ int flags)
+{
+ switch (shiftop) {
+ case 0: /* LSL */
+ if (shift != 0) {
+ if (flags) {
+ shifter_out_im(var, 32 - shift);
+ }
+ tcg_gen_shli_i32(var, var, shift);
+ }
+ break;
+ case 1: /* LSR */
+ if (shift == 0) {
+ if (flags) {
+ tcg_gen_shri_i32(var, var, 31);
+ gen_set_CF(var);
+ }
+ tcg_gen_movi_i32(var, 0);
+ } else {
+ if (flags) {
+ shifter_out_im(var, shift - 1);
+ }
+ tcg_gen_shri_i32(var, var, shift);
+ }
+ break;
+ case 2: /* ASR */
+ if (shift == 0) {
+ shift = 32;
+ }
+ if (flags) {
+ shifter_out_im(var, shift - 1);
+ }
+ if (shift == 32) {
+ shift = 31;
+ }
+ tcg_gen_sari_i32(var, var, shift);
+ break;
+ case 3: /* ROR/RRX */
+ if (shift != 0) {
+ if (flags) {
+ shifter_out_im(var, shift - 1);
+ }
+ tcg_gen_rotri_i32(var, var, shift); break;
+ } else {
+ TCGv tmp = load_cpu_field(CF);
+ if (flags) {
+ shifter_out_im(var, 0);
+ }
+ tcg_gen_shri_i32(var, var, 1);
+ tcg_gen_shli_i32(tmp, tmp, 31);
+ tcg_gen_or_i32(var, var, tmp);
+ dead_tmp(tmp);
+ }
+ }
+};
+
+static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
+ TCGv shift, int flags)
+{
+ if (flags) {
+ switch (shiftop) {
+ case 0:
+ gen_helper_shl_cc(var, cpu_env, var, shift);
+ break;
+ case 1:
+ gen_helper_shr_cc(var, cpu_env, var, shift);
+ break;
+ case 2:
+ gen_helper_sar_cc(var, cpu_env, var, shift);
+ break;
+ case 3:
+ gen_helper_ror_cc(var, cpu_env, var, shift);
+ break;
+ }
+ } else {
+ switch (shiftop) {
+ case 0:
+ gen_helper_shl(var, var, shift);
+ break;
+ case 1:
+ gen_helper_shr(var, var, shift);
+ break;
+ case 2:
+ gen_helper_sar(var, var, shift);
+ break;
+ case 3:
+ tcg_gen_andi_i32(shift, shift, 0x1f);
+ tcg_gen_rotr_i32(var, var, shift);
+ break;
+ }
+ }
+ dead_tmp(shift);
+}
+
+static void gen_test_cc(int cc, TCGLabel *label)
+{
+ TCGv tmp;
+ TCGv tmp2;
+ TCGLabel *inv;
+
+ switch (cc) {
+ case 0: /* eq: Z */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 1: /* ne: !Z */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ break;
+ case 2: /* cs: C */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ break;
+ case 3: /* cc: !C */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 4: /* mi: N */
+ tmp = load_cpu_field(NF);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 5: /* pl: !N */
+ tmp = load_cpu_field(NF);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 6: /* vs: V */
+ tmp = load_cpu_field(VF);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 7: /* vc: !V */
+ tmp = load_cpu_field(VF);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 8: /* hi: C && !Z */
+ inv = gen_new_label();
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
+ gen_set_label(inv);
+ break;
+ case 9: /* ls: !C || Z */
+ tmp = load_cpu_field(CF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ break;
+ case 10: /* ge: N == V -> N ^ V == 0 */
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ break;
+ case 11: /* lt: N != V -> N ^ V != 0 */
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ case 12: /* gt: !Z && N == V */
+ inv = gen_new_label();
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
+ gen_set_label(inv);
+ break;
+ case 13: /* le: Z || N != V */
+ tmp = load_cpu_field(ZF);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ dead_tmp(tmp);
+ tmp = load_cpu_field(VF);
+ tmp2 = load_cpu_field(NF);
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
+ break;
+ default:
+ fprintf(stderr, "Bad condition code 0x%x\n", cc);
+ abort();
+ }
+ dead_tmp(tmp);
+}
+
+static const uint8_t table_logic_cc[16] = {
+ 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
+ 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
+ 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
+ 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
+};
+
+/* Set PC state from an immediate address. */
+static inline void gen_bx_im(DisasContext *s, uint32_t addr)
+{
+ s->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_i32(cpu_R[31], addr & ~3);
+}
+
+/* Set PC state from var. var is marked as dead. */
+static inline void gen_bx(DisasContext *s, TCGv var)
+{
+ s->is_jmp = DISAS_UPDATE;
+ tcg_gen_andi_i32(cpu_R[31], var, ~3);
+ dead_tmp(var);
+}
+
+static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
+{
+ store_reg(s, reg, var);
+}
+
+static inline TCGv gen_ld8s(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld8s(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld8u(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld8u(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld16s(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld16s(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld16u(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld16u(tmp, addr, index);
+ return tmp;
+}
+
+static inline TCGv gen_ld32(TCGv addr, int index)
+{
+ TCGv tmp = new_tmp();
+ tcg_gen_qemu_ld32u(tmp, addr, index);
+ return tmp;
+}
+
+static inline void gen_st8(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st8(val, addr, index);
+ dead_tmp(val);
+}
+
+static inline void gen_st16(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st16(val, addr, index);
+ dead_tmp(val);
+}
+
+static inline void gen_st32(TCGv val, TCGv addr, int index)
+{
+ tcg_gen_qemu_st32(val, addr, index);
+ dead_tmp(val);
+}
+
+static inline void gen_set_pc_im(uint32_t val)
+{
+ tcg_gen_movi_i32(cpu_R[31], val);
+}
+
+/* Force a TB lookup after an instruction that changes the CPU state. */
+static inline void gen_lookup_tb(DisasContext *s)
+{
+ tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
+ TCGv var)
+{
+ int val;
+ TCGv offset;
+
+ if (UCOP_SET(29)) {
+ /* immediate */
+ val = UCOP_IMM14;
+ if (!UCOP_SET_U) {
+ val = -val;
+ }
+ if (val != 0) {
+ tcg_gen_addi_i32(var, var, val);
+ }
+ } else {
+ /* shift/register */
+ offset = load_reg(s, UCOP_REG_M);
+ gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
+ if (!UCOP_SET_U) {
+ tcg_gen_sub_i32(var, var, offset);
+ } else {
+ tcg_gen_add_i32(var, var, offset);
+ }
+ dead_tmp(offset);
+ }
+}
+
+static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
+ TCGv var)
+{
+ int val;
+ TCGv offset;
+
+ if (UCOP_SET(26)) {
+ /* immediate */
+ val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
+ if (!UCOP_SET_U) {
+ val = -val;
+ }
+ if (val != 0) {
+ tcg_gen_addi_i32(var, var, val);
+ }
+ } else {
+ /* register */
+ offset = load_reg(s, UCOP_REG_M);
+ if (!UCOP_SET_U) {
+ tcg_gen_sub_i32(var, var, offset);
+ } else {
+ tcg_gen_add_i32(var, var, offset);
+ }
+ dead_tmp(offset);
+ }
+}
+
+static inline long ucf64_reg_offset(int reg)
+{
+ if (reg & 1) {
+ return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
+ + offsetof(CPU_DoubleU, l.upper);
+ } else {
+ return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
+ + offsetof(CPU_DoubleU, l.lower);
+ }
+}
+
+#define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
+#define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
+
+/* UniCore-F64 single load/store I_offset */
+static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ int offset;
+ TCGv tmp;
+ TCGv addr;
+
+ addr = load_reg(s, UCOP_REG_N);
+ if (!UCOP_SET_P && !UCOP_SET_W) {
+ ILLEGAL;
+ }
+
+ if (UCOP_SET_P) {
+ offset = UCOP_IMM10 << 2;
+ if (!UCOP_SET_U) {
+ offset = -offset;
+ }
+ if (offset != 0) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ }
+
+ if (UCOP_SET_L) { /* load */
+ tmp = gen_ld32(addr, IS_USER(s));
+ ucf64_gen_st32(tmp, UCOP_REG_D);
+ } else { /* store */
+ tmp = ucf64_gen_ld32(UCOP_REG_D);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+
+ if (!UCOP_SET_P) {
+ offset = UCOP_IMM10 << 2;
+ if (!UCOP_SET_U) {
+ offset = -offset;
+ }
+ if (offset != 0) {
+ tcg_gen_addi_i32(addr, addr, offset);
+ }
+ }
+ if (UCOP_SET_W) {
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+}
+
+/* UniCore-F64 load/store multiple words */
+static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ unsigned int i;
+ int j, n, freg;
+ TCGv tmp;
+ TCGv addr;
+
+ if (UCOP_REG_D != 0) {
+ ILLEGAL;
+ }
+ if (UCOP_REG_N == 31) {
+ ILLEGAL;
+ }
+ if ((insn << 24) == 0) {
+ ILLEGAL;
+ }
+
+ addr = load_reg(s, UCOP_REG_N);
+
+ n = 0;
+ for (i = 0; i < 8; i++) {
+ if (UCOP_SET(i)) {
+ n++;
+ }
+ }
+
+ if (UCOP_SET_U) {
+ if (UCOP_SET_P) { /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } /* unnecessary to do anything when post increment */
+ } else {
+ if (UCOP_SET_P) { /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ } else { /* post decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ }
+ }
+
+ freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
+
+ for (i = 0, j = 0; i < 8; i++, freg++) {
+ if (!UCOP_SET(i)) {
+ continue;
+ }
+
+ if (UCOP_SET_L) { /* load */
+ tmp = gen_ld32(addr, IS_USER(s));
+ ucf64_gen_st32(tmp, freg);
+ } else { /* store */
+ tmp = ucf64_gen_ld32(freg);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+
+ j++;
+ /* unnecessary to add after the last transfer */
+ if (j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+
+ if (UCOP_SET_W) { /* write back */
+ if (UCOP_SET_U) {
+ if (!UCOP_SET_P) { /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } /* unnecessary to do anything when pre increment */
+ } else {
+ if (UCOP_SET_P) {
+ /* pre decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ }
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+}
+
+/* UniCore-F64 mrc/mcr */
+static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ TCGv tmp;
+
+ if ((insn & 0xfe0003ff) == 0xe2000000) {
+ /* control register */
+ if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
+ ILLEGAL;
+ }
+ if (UCOP_SET(24)) {
+ /* CFF */
+ tmp = new_tmp();
+ gen_helper_ucf64_get_fpscr(tmp, cpu_env);
+ store_reg(s, UCOP_REG_D, tmp);
+ } else {
+ /* CTF */
+ tmp = load_reg(s, UCOP_REG_D);
+ gen_helper_ucf64_set_fpscr(cpu_env, tmp);
+ dead_tmp(tmp);
+ gen_lookup_tb(s);
+ }
+ return;
+ }
+ if ((insn & 0xfe0003ff) == 0xe0000000) {
+ /* general register */
+ if (UCOP_REG_D == 31) {
+ ILLEGAL;
+ }
+ if (UCOP_SET(24)) { /* MFF */
+ tmp = ucf64_gen_ld32(UCOP_REG_N);
+ store_reg(s, UCOP_REG_D, tmp);
+ } else { /* MTF */
+ tmp = load_reg(s, UCOP_REG_D);
+ ucf64_gen_st32(tmp, UCOP_REG_N);
+ }
+ return;
+ }
+ if ((insn & 0xfb000000) == 0xe9000000) {
+ /* MFFC */
+ if (UCOP_REG_D != 31) {
+ ILLEGAL;
+ }
+ if (UCOP_UCF64_COND & 0x8) {
+ ILLEGAL;
+ }
+
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
+ if (UCOP_SET(26)) {
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
+ } else {
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
+ }
+ dead_tmp(tmp);
+ return;
+ }
+ ILLEGAL;
+}
+
+/* UniCore-F64 convert instructions */
+static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ if (UCOP_UCF64_FMT == 3) {
+ ILLEGAL;
+ }
+ if (UCOP_REG_N != 0) {
+ ILLEGAL;
+ }
+ switch (UCOP_UCF64_FUNC) {
+ case 0: /* cvt.s */
+ switch (UCOP_UCF64_FMT) {
+ case 1 /* d */:
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ case 2 /* w */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ default /* s */:
+ ILLEGAL;
+ break;
+ }
+ break;
+ case 1: /* cvt.d */
+ switch (UCOP_UCF64_FMT) {
+ case 0 /* s */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
+ tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ case 2 /* w */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
+ tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ default /* d */:
+ ILLEGAL;
+ break;
+ }
+ break;
+ case 4: /* cvt.w */
+ switch (UCOP_UCF64_FMT) {
+ case 0 /* s */:
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ case 1 /* d */:
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
+ tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
+ break;
+ default /* w */:
+ ILLEGAL;
+ break;
+ }
+ break;
+ default:
+ ILLEGAL;
+ }
+}
+
+/* UniCore-F64 compare instructions */
+static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ if (UCOP_SET(25)) {
+ ILLEGAL;
+ }
+ if (UCOP_REG_D != 0) {
+ ILLEGAL;
+ }
+
+ ILLEGAL; /* TODO */
+ if (UCOP_SET(24)) {
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
+ } else {
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
+ tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
+ /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
+ }
+}
+
+#define gen_helper_ucf64_movs(x, y) do { } while (0)
+#define gen_helper_ucf64_movd(x, y) do { } while (0)
+
+#define UCF64_OP1(name) do { \
+ if (UCOP_REG_N != 0) { \
+ ILLEGAL; \
+ } \
+ switch (UCOP_UCF64_FMT) { \
+ case 0 /* s */: \
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
+ tcg_gen_st_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 1 /* d */: \
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
+ tcg_gen_st_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 2 /* w */: \
+ ILLEGAL; \
+ break; \
+ } \
+ } while (0)
+
+#define UCF64_OP2(name) do { \
+ switch (UCOP_UCF64_FMT) { \
+ case 0 /* s */: \
+ tcg_gen_ld_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_N)); \
+ tcg_gen_ld_i32(cpu_F1s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##s(cpu_F0s, \
+ cpu_F0s, cpu_F1s, cpu_env); \
+ tcg_gen_st_i32(cpu_F0s, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 1 /* d */: \
+ tcg_gen_ld_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_N)); \
+ tcg_gen_ld_i64(cpu_F1d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_M)); \
+ gen_helper_ucf64_##name##d(cpu_F0d, \
+ cpu_F0d, cpu_F1d, cpu_env); \
+ tcg_gen_st_i64(cpu_F0d, cpu_env, \
+ ucf64_reg_offset(UCOP_REG_D)); \
+ break; \
+ case 2 /* w */: \
+ ILLEGAL; \
+ break; \
+ } \
+ } while (0)
+
+/* UniCore-F64 data processing */
+static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ if (UCOP_UCF64_FMT == 3) {
+ ILLEGAL;
+ }
+ switch (UCOP_UCF64_FUNC) {
+ case 0: /* add */
+ UCF64_OP2(add);
+ break;
+ case 1: /* sub */
+ UCF64_OP2(sub);
+ break;
+ case 2: /* mul */
+ UCF64_OP2(mul);
+ break;
+ case 4: /* div */
+ UCF64_OP2(div);
+ break;
+ case 5: /* abs */
+ UCF64_OP1(abs);
+ break;
+ case 6: /* mov */
+ UCF64_OP1(mov);
+ break;
+ case 7: /* neg */
+ UCF64_OP1(neg);
+ break;
+ default:
+ ILLEGAL;
+ }
+}
+
+/* Disassemble an F64 instruction */
+static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ if (!UCOP_SET(29)) {
+ if (UCOP_SET(26)) {
+ do_ucf64_ldst_m(env, s, insn);
+ } else {
+ do_ucf64_ldst_i(env, s, insn);
+ }
+ } else {
+ if (UCOP_SET(5)) {
+ switch ((insn >> 26) & 0x3) {
+ case 0:
+ do_ucf64_datap(env, s, insn);
+ break;
+ case 1:
+ ILLEGAL;
+ break;
+ case 2:
+ do_ucf64_fcvt(env, s, insn);
+ break;
+ case 3:
+ do_ucf64_fcmp(env, s, insn);
+ break;
+ }
+ } else {
+ do_ucf64_trans(env, s, insn);
+ }
+ }
+}
+
+static inline bool use_goto_tb(DisasContext *s, uint32_t dest)
+{
+#ifndef CONFIG_USER_ONLY
+ return (s->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
+#else
+ return true;
+#endif
+}
+
+static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
+{
+ if (use_goto_tb(s, dest)) {
+ tcg_gen_goto_tb(n);
+ gen_set_pc_im(dest);
+ tcg_gen_exit_tb((uintptr_t)s->tb + n);
+ } else {
+ gen_set_pc_im(dest);
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static inline void gen_jmp(DisasContext *s, uint32_t dest)
+{
+ if (unlikely(s->singlestep_enabled)) {
+ /* An indirect jump so that we still trigger the debug exception. */
+ gen_bx_im(s, dest);
+ } else {
+ gen_goto_tb(s, 0, dest);
+ s->is_jmp = DISAS_TB_JUMP;
+ }
+}
+
+/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
+static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
+{
+ TCGv tmp;
+ if (bsr) {
+ /* ??? This is also undefined in system mode. */
+ if (IS_USER(s)) {
+ return 1;
+ }
+
+ tmp = load_cpu_field(bsr);
+ tcg_gen_andi_i32(tmp, tmp, ~mask);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_or_i32(tmp, tmp, t0);
+ store_cpu_field(tmp, bsr);
+ } else {
+ gen_set_asr(t0, mask);
+ }
+ dead_tmp(t0);
+ gen_lookup_tb(s);
+ return 0;
+}
+
+/* Generate an old-style exception return. Marks pc as dead. */
+static void gen_exception_return(DisasContext *s, TCGv pc)
+{
+ TCGv tmp;
+ store_reg(s, 31, pc);
+ tmp = load_cpu_field(bsr);
+ gen_set_asr(tmp, 0xffffffff);
+ dead_tmp(tmp);
+ s->is_jmp = DISAS_UPDATE;
+}
+
+static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
+ uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+
+ switch (UCOP_CPNUM) {
+#ifndef CONFIG_USER_ONLY
+ case 0:
+ disas_cp0_insn(env, s, insn);
+ break;
+ case 1:
+ disas_ocd_insn(env, s, insn);
+ break;
+#endif
+ case 2:
+ disas_ucf64_insn(env, s, insn);
+ break;
+ default:
+ /* Unknown coprocessor. */
+ cpu_abort(CPU(cpu), "Unknown coprocessor!");
+ }
+}
+
+/* data processing instructions */
+static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ TCGv tmp;
+ TCGv tmp2;
+ int logic_cc;
+
+ if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
+ if (UCOP_SET(23)) { /* CMOV instructions */
+ if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
+ ILLEGAL;
+ }
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ s->condlabel = gen_new_label();
+ gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
+ }
+
+ logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
+
+ if (UCOP_SET(29)) {
+ unsigned int val;
+ /* immediate operand */
+ val = UCOP_IMM_9;
+ if (UCOP_SH_IM) {
+ val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
+ }
+ tmp2 = new_tmp();
+ tcg_gen_movi_i32(tmp2, val);
+ if (logic_cc && UCOP_SH_IM) {
+ gen_set_CF_bit31(tmp2);
+ }
+ } else {
+ /* register */
+ tmp2 = load_reg(s, UCOP_REG_M);
+ if (UCOP_SET(5)) {
+ tmp = load_reg(s, UCOP_REG_S);
+ gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
+ } else {
+ gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
+ }
+ }
+
+ if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
+ tmp = load_reg(s, UCOP_REG_N);
+ } else {
+ TCGV_UNUSED(tmp);
+ }
+
+ switch (UCOP_OPCODES) {
+ case 0x00:
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x01:
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x02:
+ if (UCOP_SET_S && UCOP_REG_D == 31) {
+ /* SUBS r31, ... is used for exception return. */
+ if (IS_USER(s)) {
+ ILLEGAL;
+ }
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
+ gen_exception_return(s, tmp);
+ } else {
+ if (UCOP_SET_S) {
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ }
+ break;
+ case 0x03:
+ if (UCOP_SET_S) {
+ gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
+ } else {
+ tcg_gen_sub_i32(tmp, tmp2, tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x04:
+ if (UCOP_SET_S) {
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
+ } else {
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x05:
+ if (UCOP_SET_S) {
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_add_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x06:
+ if (UCOP_SET_S) {
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
+ } else {
+ gen_sub_carry(tmp, tmp, tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x07:
+ if (UCOP_SET_S) {
+ gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
+ } else {
+ gen_sub_carry(tmp, tmp2, tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x08:
+ if (UCOP_SET_S) {
+ tcg_gen_and_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x09:
+ if (UCOP_SET_S) {
+ tcg_gen_xor_i32(tmp, tmp, tmp2);
+ gen_logic_CC(tmp);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x0a:
+ if (UCOP_SET_S) {
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x0b:
+ if (UCOP_SET_S) {
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
+ }
+ dead_tmp(tmp);
+ break;
+ case 0x0c:
+ tcg_gen_or_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ case 0x0d:
+ if (logic_cc && UCOP_REG_D == 31) {
+ /* MOVS r31, ... is used for exception return. */
+ if (IS_USER(s)) {
+ ILLEGAL;
+ }
+ gen_exception_return(s, tmp2);
+ } else {
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp2);
+ }
+ break;
+ case 0x0e:
+ tcg_gen_andc_i32(tmp, tmp, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp);
+ break;
+ default:
+ case 0x0f:
+ tcg_gen_not_i32(tmp2, tmp2);
+ if (logic_cc) {
+ gen_logic_CC(tmp2);
+ }
+ store_reg_bx(s, UCOP_REG_D, tmp2);
+ break;
+ }
+ if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
+ dead_tmp(tmp2);
+ }
+}
+
+/* multiply */
+static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ TCGv tmp, tmp2, tmp3, tmp4;
+
+ if (UCOP_SET(27)) {
+ /* 64 bit mul */
+ tmp = load_reg(s, UCOP_REG_M);
+ tmp2 = load_reg(s, UCOP_REG_N);
+ if (UCOP_SET(26)) {
+ tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
+ } else {
+ tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
+ }
+ if (UCOP_SET(25)) { /* mult accumulate */
+ tmp3 = load_reg(s, UCOP_REG_LO);
+ tmp4 = load_reg(s, UCOP_REG_HI);
+ tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
+ dead_tmp(tmp3);
+ dead_tmp(tmp4);
+ }
+ store_reg(s, UCOP_REG_LO, tmp);
+ store_reg(s, UCOP_REG_HI, tmp2);
+ } else {
+ /* 32 bit mul */
+ tmp = load_reg(s, UCOP_REG_M);
+ tmp2 = load_reg(s, UCOP_REG_N);
+ tcg_gen_mul_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ if (UCOP_SET(25)) {
+ /* Add */
+ tmp2 = load_reg(s, UCOP_REG_S);
+ tcg_gen_add_i32(tmp, tmp, tmp2);
+ dead_tmp(tmp2);
+ }
+ if (UCOP_SET_S) {
+ gen_logic_CC(tmp);
+ }
+ store_reg(s, UCOP_REG_D, tmp);
+ }
+}
+
+/* miscellaneous instructions */
+static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ unsigned int val;
+ TCGv tmp;
+
+ if ((insn & 0xffffffe0) == 0x10ffc120) {
+ /* Trivial implementation equivalent to bx. */
+ tmp = load_reg(s, UCOP_REG_M);
+ gen_bx(s, tmp);
+ return;
+ }
+
+ if ((insn & 0xfbffc000) == 0x30ffc000) {
+ /* PSR = immediate */
+ val = UCOP_IMM_9;
+ if (UCOP_SH_IM) {
+ val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
+ }
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
+ ILLEGAL;
+ }
+ return;
+ }
+
+ if ((insn & 0xfbffffe0) == 0x12ffc020) {
+ /* PSR.flag = reg */
+ tmp = load_reg(s, UCOP_REG_M);
+ if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
+ ILLEGAL;
+ }
+ return;
+ }
+
+ if ((insn & 0xfbffffe0) == 0x10ffc020) {
+ /* PSR = reg */
+ tmp = load_reg(s, UCOP_REG_M);
+ if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
+ ILLEGAL;
+ }
+ return;
+ }
+
+ if ((insn & 0xfbf83fff) == 0x10f80000) {
+ /* reg = PSR */
+ if (UCOP_SET_B) {
+ if (IS_USER(s)) {
+ ILLEGAL;
+ }
+ tmp = load_cpu_field(bsr);
+ } else {
+ tmp = new_tmp();
+ gen_helper_asr_read(tmp, cpu_env);
+ }
+ store_reg(s, UCOP_REG_D, tmp);
+ return;
+ }
+
+ if ((insn & 0xfbf83fe0) == 0x12f80120) {
+ /* clz */
+ tmp = load_reg(s, UCOP_REG_M);
+ if (UCOP_SET(26)) {
+ gen_helper_clo(tmp, tmp);
+ } else {
+ gen_helper_clz(tmp, tmp);
+ }
+ store_reg(s, UCOP_REG_D, tmp);
+ return;
+ }
+
+ /* otherwise */
+ ILLEGAL;
+}
+
+/* load/store I_offset and R_offset */
+static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ unsigned int mmu_idx;
+ TCGv tmp;
+ TCGv tmp2;
+
+ tmp2 = load_reg(s, UCOP_REG_N);
+ mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
+
+ /* immediate */
+ if (UCOP_SET_P) {
+ gen_add_data_offset(s, insn, tmp2);
+ }
+
+ if (UCOP_SET_L) {
+ /* load */
+ if (UCOP_SET_B) {
+ tmp = gen_ld8u(tmp2, mmu_idx);
+ } else {
+ tmp = gen_ld32(tmp2, mmu_idx);
+ }
+ } else {
+ /* store */
+ tmp = load_reg(s, UCOP_REG_D);
+ if (UCOP_SET_B) {
+ gen_st8(tmp, tmp2, mmu_idx);
+ } else {
+ gen_st32(tmp, tmp2, mmu_idx);
+ }
+ }
+ if (!UCOP_SET_P) {
+ gen_add_data_offset(s, insn, tmp2);
+ store_reg(s, UCOP_REG_N, tmp2);
+ } else if (UCOP_SET_W) {
+ store_reg(s, UCOP_REG_N, tmp2);
+ } else {
+ dead_tmp(tmp2);
+ }
+ if (UCOP_SET_L) {
+ /* Complete the load. */
+ if (UCOP_REG_D == 31) {
+ gen_bx(s, tmp);
+ } else {
+ store_reg(s, UCOP_REG_D, tmp);
+ }
+ }
+}
+
+/* SWP instruction */
+static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ TCGv addr;
+ TCGv tmp;
+ TCGv tmp2;
+
+ if ((insn & 0xff003fe0) != 0x40000120) {
+ ILLEGAL;
+ }
+
+ /* ??? This is not really atomic. However we know
+ we never have multiple CPUs running in parallel,
+ so it is good enough. */
+ addr = load_reg(s, UCOP_REG_N);
+ tmp = load_reg(s, UCOP_REG_M);
+ if (UCOP_SET_B) {
+ tmp2 = gen_ld8u(addr, IS_USER(s));
+ gen_st8(tmp, addr, IS_USER(s));
+ } else {
+ tmp2 = gen_ld32(addr, IS_USER(s));
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+ dead_tmp(addr);
+ store_reg(s, UCOP_REG_D, tmp2);
+}
+
+/* load/store hw/sb */
+static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ TCGv addr;
+ TCGv tmp;
+
+ if (UCOP_SH_OP == 0) {
+ do_swap(env, s, insn);
+ return;
+ }
+
+ addr = load_reg(s, UCOP_REG_N);
+ if (UCOP_SET_P) {
+ gen_add_datah_offset(s, insn, addr);
+ }
+
+ if (UCOP_SET_L) { /* load */
+ switch (UCOP_SH_OP) {
+ case 1:
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ case 2:
+ tmp = gen_ld8s(addr, IS_USER(s));
+ break;
+ default: /* see do_swap */
+ case 3:
+ tmp = gen_ld16s(addr, IS_USER(s));
+ break;
+ }
+ } else { /* store */
+ if (UCOP_SH_OP != 1) {
+ ILLEGAL;
+ }
+ tmp = load_reg(s, UCOP_REG_D);
+ gen_st16(tmp, addr, IS_USER(s));
+ }
+ /* Perform base writeback before the loaded value to
+ ensure correct behavior with overlapping index registers. */
+ if (!UCOP_SET_P) {
+ gen_add_datah_offset(s, insn, addr);
+ store_reg(s, UCOP_REG_N, addr);
+ } else if (UCOP_SET_W) {
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ if (UCOP_SET_L) {
+ /* Complete the load. */
+ store_reg(s, UCOP_REG_D, tmp);
+ }
+}
+
+/* load/store multiple words */
+static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ unsigned int val, i, mmu_idx;
+ int j, n, reg, user, loaded_base;
+ TCGv tmp;
+ TCGv tmp2;
+ TCGv addr;
+ TCGv loaded_var;
+
+ if (UCOP_SET(7)) {
+ ILLEGAL;
+ }
+ /* XXX: store correct base if write back */
+ user = 0;
+ if (UCOP_SET_B) { /* S bit in instruction table */
+ if (IS_USER(s)) {
+ ILLEGAL; /* only usable in supervisor mode */
+ }
+ if (UCOP_SET(18) == 0) { /* pc reg */
+ user = 1;
+ }
+ }
+
+ mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
+ addr = load_reg(s, UCOP_REG_N);
+
+ /* compute total size */
+ loaded_base = 0;
+ TCGV_UNUSED(loaded_var);
+ n = 0;
+ for (i = 0; i < 6; i++) {
+ if (UCOP_SET(i)) {
+ n++;
+ }
+ }
+ for (i = 9; i < 19; i++) {
+ if (UCOP_SET(i)) {
+ n++;
+ }
+ }
+ /* XXX: test invalid n == 0 case ? */
+ if (UCOP_SET_U) {
+ if (UCOP_SET_P) {
+ /* pre increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ } else {
+ /* post increment */
+ }
+ } else {
+ if (UCOP_SET_P) {
+ /* pre decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ } else {
+ /* post decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ }
+ }
+
+ j = 0;
+ reg = UCOP_SET(6) ? 16 : 0;
+ for (i = 0; i < 19; i++, reg++) {
+ if (i == 6) {
+ i = i + 3;
+ }
+ if (UCOP_SET(i)) {
+ if (UCOP_SET_L) { /* load */
+ tmp = gen_ld32(addr, mmu_idx);
+ if (reg == 31) {
+ gen_bx(s, tmp);
+ } else if (user) {
+ tmp2 = tcg_const_i32(reg);
+ gen_helper_set_user_reg(cpu_env, tmp2, tmp);
+ tcg_temp_free_i32(tmp2);
+ dead_tmp(tmp);
+ } else if (reg == UCOP_REG_N) {
+ loaded_var = tmp;
+ loaded_base = 1;
+ } else {
+ store_reg(s, reg, tmp);
+ }
+ } else { /* store */
+ if (reg == 31) {
+ /* special case: r31 = PC + 4 */
+ val = (long)s->pc;
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ } else if (user) {
+ tmp = new_tmp();
+ tmp2 = tcg_const_i32(reg);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
+ tcg_temp_free_i32(tmp2);
+ } else {
+ tmp = load_reg(s, reg);
+ }
+ gen_st32(tmp, addr, mmu_idx);
+ }
+ j++;
+ /* no need to add after the last transfer */
+ if (j != n) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ }
+ }
+ if (UCOP_SET_W) { /* write back */
+ if (UCOP_SET_U) {
+ if (UCOP_SET_P) {
+ /* pre increment */
+ } else {
+ /* post increment */
+ tcg_gen_addi_i32(addr, addr, 4);
+ }
+ } else {
+ if (UCOP_SET_P) {
+ /* pre decrement */
+ if (n != 1) {
+ tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
+ }
+ } else {
+ /* post decrement */
+ tcg_gen_addi_i32(addr, addr, -(n * 4));
+ }
+ }
+ store_reg(s, UCOP_REG_N, addr);
+ } else {
+ dead_tmp(addr);
+ }
+ if (loaded_base) {
+ store_reg(s, UCOP_REG_N, loaded_var);
+ }
+ if (UCOP_SET_B && !user) {
+ /* Restore ASR from BSR. */
+ tmp = load_cpu_field(bsr);
+ gen_set_asr(tmp, 0xffffffff);
+ dead_tmp(tmp);
+ s->is_jmp = DISAS_UPDATE;
+ }
+}
+
+/* branch (and link) */
+static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ unsigned int val;
+ int32_t offset;
+ TCGv tmp;
+
+ if (UCOP_COND == 0xf) {
+ ILLEGAL;
+ }
+
+ if (UCOP_COND != 0xe) {
+ /* if not always execute, we generate a conditional jump to
+ next instruction */
+ s->condlabel = gen_new_label();
+ gen_test_cc(UCOP_COND ^ 1, s->condlabel);
+ s->condjmp = 1;
+ }
+
+ val = (int32_t)s->pc;
+ if (UCOP_SET_L) {
+ tmp = new_tmp();
+ tcg_gen_movi_i32(tmp, val);
+ store_reg(s, 30, tmp);
+ }
+ offset = (((int32_t)insn << 8) >> 8);
+ val += (offset << 2); /* unicore is pc+4 */
+ gen_jmp(s, val);
+}
+
+static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ unsigned int insn;
+
+ insn = cpu_ldl_code(env, s->pc);
+ s->pc += 4;
+
+ /* UniCore instructions class:
+ * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
+ * AAA : see switch case
+ * BBBB : opcodes or cond or PUBW
+ * C : S OR L
+ * D : 8
+ * E : 5
+ */
+ switch (insn >> 29) {
+ case 0x0:
+ if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
+ do_mult(env, s, insn);
+ break;
+ }
+
+ if (UCOP_SET(8)) {
+ do_misc(env, s, insn);
+ break;
+ }
+ case 0x1:
+ if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
+ do_misc(env, s, insn);
+ break;
+ }
+ do_datap(env, s, insn);
+ break;
+
+ case 0x2:
+ if (UCOP_SET(8) && UCOP_SET(5)) {
+ do_ldst_hwsb(env, s, insn);
+ break;
+ }
+ if (UCOP_SET(8) || UCOP_SET(5)) {
+ ILLEGAL;
+ }
+ case 0x3:
+ do_ldst_ir(env, s, insn);
+ break;
+
+ case 0x4:
+ if (UCOP_SET(8)) {
+ ILLEGAL; /* extended instructions */
+ }
+ do_ldst_m(env, s, insn);
+ break;
+ case 0x5:
+ do_branch(env, s, insn);
+ break;
+ case 0x6:
+ /* Coprocessor. */
+ disas_coproc_insn(env, s, insn);
+ break;
+ case 0x7:
+ if (!UCOP_SET(28)) {
+ disas_coproc_insn(env, s, insn);
+ break;
+ }
+ if ((insn & 0xff000000) == 0xff000000) { /* syscall */
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_SYSCALL;
+ break;
+ }
+ ILLEGAL;
+ }
+}
+
+/* generate intermediate code for basic block 'tb'. */
+void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc1, *dc = &dc1;
+ target_ulong pc_start;
+ uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
+
+ /* generate intermediate code */
+ num_temps = 0;
+
+ pc_start = tb->pc;
+
+ dc->tb = tb;
+
+ dc->is_jmp = DISAS_NEXT;
+ dc->pc = pc_start;
+ dc->singlestep_enabled = cs->singlestep_enabled;
+ dc->condjmp = 0;
+ cpu_F0s = tcg_temp_new_i32();
+ cpu_F1s = tcg_temp_new_i32();
+ cpu_F0d = tcg_temp_new_i64();
+ cpu_F1d = tcg_temp_new_i64();
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+#ifndef CONFIG_USER_ONLY
+ if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
+ dc->user = 1;
+ } else {
+ dc->user = 0;
+ }
+#endif
+
+ gen_tb_start(tb);
+ do {
+ tcg_gen_insn_start(dc->pc);
+ num_insns++;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
+ gen_set_pc_im(dc->pc);
+ gen_exception(EXCP_DEBUG);
+ dc->is_jmp = DISAS_JUMP;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc->pc += 4;
+ goto done_generating;
+ }
+
+ if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ disas_uc32_insn(env, dc);
+
+ if (num_temps) {
+ fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
+ num_temps = 0;
+ }
+
+ if (dc->condjmp && !dc->is_jmp) {
+ gen_set_label(dc->condlabel);
+ dc->condjmp = 0;
+ }
+ /* Translation stops when a conditional branch is encountered.
+ * Otherwise the subsequent code could get translated several times.
+ * Also stop translation when a page boundary is reached. This
+ * ensures prefetch aborts occur at the right place. */
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
+ !cs->singlestep_enabled &&
+ !singlestep &&
+ dc->pc < next_page_start &&
+ num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ if (dc->condjmp) {
+ /* FIXME: This can theoretically happen with self-modifying
+ code. */
+ cpu_abort(cs, "IO on conditional branch instruction");
+ }
+ gen_io_end();
+ }
+
+ /* At this stage dc->condjmp will only be set when the skipped
+ instruction was a conditional branch or trap, and the PC has
+ already been written. */
+ if (unlikely(cs->singlestep_enabled)) {
+ /* Make sure the pc is updated, and raise a debug exception. */
+ if (dc->condjmp) {
+ if (dc->is_jmp == DISAS_SYSCALL) {
+ gen_exception(UC32_EXCP_PRIV);
+ } else {
+ gen_exception(EXCP_DEBUG);
+ }
+ gen_set_label(dc->condlabel);
+ }
+ if (dc->condjmp || !dc->is_jmp) {
+ gen_set_pc_im(dc->pc);
+ dc->condjmp = 0;
+ }
+ if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
+ gen_exception(UC32_EXCP_PRIV);
+ } else {
+ gen_exception(EXCP_DEBUG);
+ }
+ } else {
+ /* While branches must always occur at the end of an IT block,
+ there are a few other things that can cause us to terminate
+ the TB in the middel of an IT block:
+ - Exception generating instructions (bkpt, swi, undefined).
+ - Page boundaries.
+ - Hardware watchpoints.
+ Hardware breakpoints have already been handled and skip this code.
+ */
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 1, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ case DISAS_SYSCALL:
+ gen_exception(UC32_EXCP_PRIV);
+ break;
+ }
+ if (dc->condjmp) {
+ gen_set_label(dc->condlabel);
+ gen_goto_tb(dc, 1, dc->pc);
+ dc->condjmp = 0;
+ }
+ }
+
+done_generating:
+ gen_tb_end(tb, num_insns);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+}
+
+static const char *cpu_mode_names[16] = {
+ "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
+ "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
+};
+
+#undef UCF64_DUMP_STATE
+#ifdef UCF64_DUMP_STATE
+static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ int i;
+ union {
+ uint32_t i;
+ float s;
+ } s0, s1;
+ CPU_DoubleU d;
+ /* ??? This assumes float64 and double have the same layout.
+ Oh well, it's only debug dumps. */
+ union {
+ float64 f64;
+ double d;
+ } d0;
+
+ for (i = 0; i < 16; i++) {
+ d.d = env->ucf64.regs[i];
+ s0.i = d.l.lower;
+ s1.i = d.l.upper;
+ d0.f64 = d.d;
+ cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
+ i * 2, (int)s0.i, s0.s,
+ i * 2 + 1, (int)s1.i, s1.s);
+ cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
+ i, (uint64_t)d0.f64, d0.d);
+ }
+ cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
+}
+#else
+#define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
+#endif
+
+void uc32_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ UniCore32CPU *cpu = UNICORE32_CPU(cs);
+ CPUUniCore32State *env = &cpu->env;
+ int i;
+ uint32_t psr;
+
+ for (i = 0; i < 32; i++) {
+ cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
+ if ((i % 4) == 3) {
+ cpu_fprintf(f, "\n");
+ } else {
+ cpu_fprintf(f, " ");
+ }
+ }
+ psr = cpu_asr_read(env);
+ cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
+ psr,
+ psr & (1 << 31) ? 'N' : '-',
+ psr & (1 << 30) ? 'Z' : '-',
+ psr & (1 << 29) ? 'C' : '-',
+ psr & (1 << 28) ? 'V' : '-',
+ cpu_mode_names[psr & 0xf]);
+
+ cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
+}
+
+void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->regs[31] = data[0];
+}
diff --git a/target/unicore32/ucf64_helper.c b/target/unicore32/ucf64_helper.c
new file mode 100644
index 0000000000..6c919010c3
--- /dev/null
+++ b/target/unicore32/ucf64_helper.c
@@ -0,0 +1,325 @@
+/*
+ * UniCore-F64 simulation helpers for QEMU.
+ *
+ * Copyright (C) 2010-2012 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation, or any later version.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/*
+ * The convention used for UniCore-F64 instructions:
+ * Single precition routines have a "s" suffix
+ * Double precision routines have a "d" suffix.
+ */
+
+/* Convert host exception flags to f64 form. */
+static inline int ucf64_exceptbits_from_host(int host_bits)
+{
+ int target_bits = 0;
+
+ if (host_bits & float_flag_invalid) {
+ target_bits |= UCF64_FPSCR_FLAG_INVALID;
+ }
+ if (host_bits & float_flag_divbyzero) {
+ target_bits |= UCF64_FPSCR_FLAG_DIVZERO;
+ }
+ if (host_bits & float_flag_overflow) {
+ target_bits |= UCF64_FPSCR_FLAG_OVERFLOW;
+ }
+ if (host_bits & float_flag_underflow) {
+ target_bits |= UCF64_FPSCR_FLAG_UNDERFLOW;
+ }
+ if (host_bits & float_flag_inexact) {
+ target_bits |= UCF64_FPSCR_FLAG_INEXACT;
+ }
+ return target_bits;
+}
+
+uint32_t HELPER(ucf64_get_fpscr)(CPUUniCore32State *env)
+{
+ int i;
+ uint32_t fpscr;
+
+ fpscr = (env->ucf64.xregs[UC32_UCF64_FPSCR] & UCF64_FPSCR_MASK);
+ i = get_float_exception_flags(&env->ucf64.fp_status);
+ fpscr |= ucf64_exceptbits_from_host(i);
+ return fpscr;
+}
+
+/* Convert ucf64 exception flags to target form. */
+static inline int ucf64_exceptbits_to_host(int target_bits)
+{
+ int host_bits = 0;
+
+ if (target_bits & UCF64_FPSCR_FLAG_INVALID) {
+ host_bits |= float_flag_invalid;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_DIVZERO) {
+ host_bits |= float_flag_divbyzero;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_OVERFLOW) {
+ host_bits |= float_flag_overflow;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_UNDERFLOW) {
+ host_bits |= float_flag_underflow;
+ }
+ if (target_bits & UCF64_FPSCR_FLAG_INEXACT) {
+ host_bits |= float_flag_inexact;
+ }
+ return host_bits;
+}
+
+void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val)
+{
+ UniCore32CPU *cpu = uc32_env_get_cpu(env);
+ int i;
+ uint32_t changed;
+
+ changed = env->ucf64.xregs[UC32_UCF64_FPSCR];
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = (val & UCF64_FPSCR_MASK);
+
+ changed ^= val;
+ if (changed & (UCF64_FPSCR_RND_MASK)) {
+ i = UCF64_FPSCR_RND(val);
+ switch (i) {
+ case 0:
+ i = float_round_nearest_even;
+ break;
+ case 1:
+ i = float_round_to_zero;
+ break;
+ case 2:
+ i = float_round_up;
+ break;
+ case 3:
+ i = float_round_down;
+ break;
+ default: /* 100 and 101 not implement */
+ cpu_abort(CPU(cpu), "Unsupported UniCore-F64 round mode");
+ }
+ set_float_rounding_mode(i, &env->ucf64.fp_status);
+ }
+
+ i = ucf64_exceptbits_to_host(UCF64_FPSCR_TRAPEN(val));
+ set_float_exception_flags(i, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_adds)(float32 a, float32 b, CPUUniCore32State *env)
+{
+ return float32_add(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_addd)(float64 a, float64 b, CPUUniCore32State *env)
+{
+ return float64_add(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_subs)(float32 a, float32 b, CPUUniCore32State *env)
+{
+ return float32_sub(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_subd)(float64 a, float64 b, CPUUniCore32State *env)
+{
+ return float64_sub(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_muls)(float32 a, float32 b, CPUUniCore32State *env)
+{
+ return float32_mul(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_muld)(float64 a, float64 b, CPUUniCore32State *env)
+{
+ return float64_mul(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_divs)(float32 a, float32 b, CPUUniCore32State *env)
+{
+ return float32_div(a, b, &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_divd)(float64 a, float64 b, CPUUniCore32State *env)
+{
+ return float64_div(a, b, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_negs)(float32 a)
+{
+ return float32_chs(a);
+}
+
+float64 HELPER(ucf64_negd)(float64 a)
+{
+ return float64_chs(a);
+}
+
+float32 HELPER(ucf64_abss)(float32 a)
+{
+ return float32_abs(a);
+}
+
+float64 HELPER(ucf64_absd)(float64 a)
+{
+ return float64_abs(a);
+}
+
+void HELPER(ucf64_cmps)(float32 a, float32 b, uint32_t c,
+ CPUUniCore32State *env)
+{
+ int flag;
+ flag = float32_compare_quiet(a, b, &env->ucf64.fp_status);
+ env->CF = 0;
+ switch (c & 0x7) {
+ case 0: /* F */
+ break;
+ case 1: /* UN */
+ if (flag == 2) {
+ env->CF = 1;
+ }
+ break;
+ case 2: /* EQ */
+ if (flag == 0) {
+ env->CF = 1;
+ }
+ break;
+ case 3: /* UEQ */
+ if ((flag == 0) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 4: /* OLT */
+ if (flag == -1) {
+ env->CF = 1;
+ }
+ break;
+ case 5: /* ULT */
+ if ((flag == -1) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 6: /* OLE */
+ if ((flag == -1) || (flag == 0)) {
+ env->CF = 1;
+ }
+ break;
+ case 7: /* ULE */
+ if (flag != 1) {
+ env->CF = 1;
+ }
+ break;
+ }
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29)
+ | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff);
+}
+
+void HELPER(ucf64_cmpd)(float64 a, float64 b, uint32_t c,
+ CPUUniCore32State *env)
+{
+ int flag;
+ flag = float64_compare_quiet(a, b, &env->ucf64.fp_status);
+ env->CF = 0;
+ switch (c & 0x7) {
+ case 0: /* F */
+ break;
+ case 1: /* UN */
+ if (flag == 2) {
+ env->CF = 1;
+ }
+ break;
+ case 2: /* EQ */
+ if (flag == 0) {
+ env->CF = 1;
+ }
+ break;
+ case 3: /* UEQ */
+ if ((flag == 0) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 4: /* OLT */
+ if (flag == -1) {
+ env->CF = 1;
+ }
+ break;
+ case 5: /* ULT */
+ if ((flag == -1) || (flag == 2)) {
+ env->CF = 1;
+ }
+ break;
+ case 6: /* OLE */
+ if ((flag == -1) || (flag == 0)) {
+ env->CF = 1;
+ }
+ break;
+ case 7: /* ULE */
+ if (flag != 1) {
+ env->CF = 1;
+ }
+ break;
+ }
+ env->ucf64.xregs[UC32_UCF64_FPSCR] = (env->CF << 29)
+ | (env->ucf64.xregs[UC32_UCF64_FPSCR] & 0x0fffffff);
+}
+
+/* Helper routines to perform bitwise copies between float and int. */
+static inline float32 ucf64_itos(uint32_t i)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.i = i;
+ return v.s;
+}
+
+static inline uint32_t ucf64_stoi(float32 s)
+{
+ union {
+ uint32_t i;
+ float32 s;
+ } v;
+
+ v.s = s;
+ return v.i;
+}
+
+/* Integer to float conversion. */
+float32 HELPER(ucf64_si2sf)(float32 x, CPUUniCore32State *env)
+{
+ return int32_to_float32(ucf64_stoi(x), &env->ucf64.fp_status);
+}
+
+float64 HELPER(ucf64_si2df)(float32 x, CPUUniCore32State *env)
+{
+ return int32_to_float64(ucf64_stoi(x), &env->ucf64.fp_status);
+}
+
+/* Float to integer conversion. */
+float32 HELPER(ucf64_sf2si)(float32 x, CPUUniCore32State *env)
+{
+ return ucf64_itos(float32_to_int32(x, &env->ucf64.fp_status));
+}
+
+float32 HELPER(ucf64_df2si)(float64 x, CPUUniCore32State *env)
+{
+ return ucf64_itos(float64_to_int32(x, &env->ucf64.fp_status));
+}
+
+/* floating point conversion */
+float64 HELPER(ucf64_sf2df)(float32 x, CPUUniCore32State *env)
+{
+ return float32_to_float64(x, &env->ucf64.fp_status);
+}
+
+float32 HELPER(ucf64_df2sf)(float64 x, CPUUniCore32State *env)
+{
+ return float64_to_float32(x, &env->ucf64.fp_status);
+}
diff --git a/target/xtensa/Makefile.objs b/target/xtensa/Makefile.objs
new file mode 100644
index 0000000000..481de91973
--- /dev/null
+++ b/target/xtensa/Makefile.objs
@@ -0,0 +1,7 @@
+obj-y += xtensa-semi.o
+obj-y += core-dc232b.o
+obj-y += core-dc233c.o
+obj-y += core-fsf.o
+obj-$(CONFIG_SOFTMMU) += monitor.o
+obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += gdbstub.o
diff --git a/target/xtensa/core-dc232b.c b/target/xtensa/core-dc232b.c
new file mode 100644
index 0000000000..bb8ed4197f
--- /dev/null
+++ b/target/xtensa/core-dc232b.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-dc232b/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig dc232b __attribute__((unused)) = {
+ .name = "dc232b",
+ .gdb_regmap = {
+ .num_regs = 120,
+ .num_core_regs = 52,
+ .reg = {
+#include "core-dc232b/gdb-config.c"
+ }
+ },
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dc232b)
diff --git a/target/xtensa/core-dc232b/core-isa.h b/target/xtensa/core-dc232b/core-isa.h
new file mode 100644
index 0000000000..a9935b87af
--- /dev/null
+++ b/target/xtensa/core-dc232b/core-isa.h
@@ -0,0 +1,422 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2007 Tensilica Inc.
+ */
+
+#ifndef XTENSA_DC232B_CORE_ISA_H
+#define XTENSA_DC232B_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU insns */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 701001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc232b" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "Diamond 232L Standard Core Rev.B (LE)"
+#define XCHAL_BUILD_UNIQUE_ID 0x0000BEEF /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56307FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0D40BEEF /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.1.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2210 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 221001 /* major*100+minor */
+#define XCHAL_HW_REL_LX2 1
+#define XCHAL_HW_REL_LX2_1 1
+#define XCHAL_HW_REL_LX2_1_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2210 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 221001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2210 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 221001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+ EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0xD0000000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00000000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0xD8000500
+#define XCHAL_RESET_VECTOR1_PADDR 0x00000500
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0xD0000340
+#define XCHAL_USER_VECTOR_PADDR 0x00000340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD00003C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000003C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD00001C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000001C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xD0000200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00000200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0xD0000280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00000280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0xD00002C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000002C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_DC232B_CORE_ISA_H */
diff --git a/target/xtensa/core-dc232b/gdb-config.c b/target/xtensa/core-dc232b/gdb-config.c
new file mode 100644
index 0000000000..13aba5edec
--- /dev/null
+++ b/target/xtensa/core-dc232b/gdb-config.c
@@ -0,0 +1,261 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (C) 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+
+ This file is part of GDB.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+ XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(1, 4, 32, 4, 4, 0x0100, 0x0006, -2, 1, 0x0002, ar0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(2, 8, 32, 4, 4, 0x0101, 0x0006, -2, 1, 0x0002, ar1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(3, 12, 32, 4, 4, 0x0102, 0x0006, -2, 1, 0x0002, ar2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(4, 16, 32, 4, 4, 0x0103, 0x0006, -2, 1, 0x0002, ar3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(5, 20, 32, 4, 4, 0x0104, 0x0006, -2, 1, 0x0002, ar4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(6, 24, 32, 4, 4, 0x0105, 0x0006, -2, 1, 0x0002, ar5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(7, 28, 32, 4, 4, 0x0106, 0x0006, -2, 1, 0x0002, ar6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(8, 32, 32, 4, 4, 0x0107, 0x0006, -2, 1, 0x0002, ar7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(9, 36, 32, 4, 4, 0x0108, 0x0006, -2, 1, 0x0002, ar8,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(10, 40, 32, 4, 4, 0x0109, 0x0006, -2, 1, 0x0002, ar9,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(11, 44, 32, 4, 4, 0x010a, 0x0006, -2, 1, 0x0002, ar10,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(12, 48, 32, 4, 4, 0x010b, 0x0006, -2, 1, 0x0002, ar11,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(13, 52, 32, 4, 4, 0x010c, 0x0006, -2, 1, 0x0002, ar12,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(14, 56, 32, 4, 4, 0x010d, 0x0006, -2, 1, 0x0002, ar13,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(15, 60, 32, 4, 4, 0x010e, 0x0006, -2, 1, 0x0002, ar14,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(16, 64, 32, 4, 4, 0x010f, 0x0006, -2, 1, 0x0002, ar15,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(17, 68, 32, 4, 4, 0x0110, 0x0006, -2, 1, 0x0002, ar16,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(18, 72, 32, 4, 4, 0x0111, 0x0006, -2, 1, 0x0002, ar17,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(19, 76, 32, 4, 4, 0x0112, 0x0006, -2, 1, 0x0002, ar18,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(20, 80, 32, 4, 4, 0x0113, 0x0006, -2, 1, 0x0002, ar19,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(21, 84, 32, 4, 4, 0x0114, 0x0006, -2, 1, 0x0002, ar20,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(22, 88, 32, 4, 4, 0x0115, 0x0006, -2, 1, 0x0002, ar21,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(23, 92, 32, 4, 4, 0x0116, 0x0006, -2, 1, 0x0002, ar22,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(24, 96, 32, 4, 4, 0x0117, 0x0006, -2, 1, 0x0002, ar23,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(25, 100, 32, 4, 4, 0x0118, 0x0006, -2, 1, 0x0002, ar24,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(26, 104, 32, 4, 4, 0x0119, 0x0006, -2, 1, 0x0002, ar25,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(27, 108, 32, 4, 4, 0x011a, 0x0006, -2, 1, 0x0002, ar26,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(28, 112, 32, 4, 4, 0x011b, 0x0006, -2, 1, 0x0002, ar27,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(29, 116, 32, 4, 4, 0x011c, 0x0006, -2, 1, 0x0002, ar28,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(30, 120, 32, 4, 4, 0x011d, 0x0006, -2, 1, 0x0002, ar29,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(31, 124, 32, 4, 4, 0x011e, 0x0006, -2, 1, 0x0002, ar30,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(32, 128, 32, 4, 4, 0x011f, 0x0006, -2, 1, 0x0002, ar31,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(33, 132, 32, 4, 4, 0x0200, 0x0006, -2, 2, 0x1100, lbeg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(34, 136, 32, 4, 4, 0x0201, 0x0006, -2, 2, 0x1100, lend,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(35, 140, 32, 4, 4, 0x0202, 0x0006, -2, 2, 0x1100, lcount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(36, 144, 6, 4, 4, 0x0203, 0x0006, -2, 2, 0x1100, sar,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(37, 148, 32, 4, 4, 0x0205, 0x0006, -2, 2, 0x1100, litbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(38, 152, 3, 4, 4, 0x0248, 0x0006, -2, 2, 0x1002, windowbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(39, 156, 8, 4, 4, 0x0249, 0x0006, -2, 2, 0x1002, windowstart,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(40, 160, 32, 4, 4, 0x02b0, 0x0002, -2, 2, 0x1000, sr176,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(41, 164, 32, 4, 4, 0x02d0, 0x0002, -2, 2, 0x1000, sr208,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(42, 168, 19, 4, 4, 0x02e6, 0x0006, -2, 2, 0x1100, ps,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(43, 172, 32, 4, 4, 0x03e7, 0x0006, -2, 3, 0x0110, threadptr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(44, 176, 32, 4, 4, 0x020c, 0x0006, -1, 2, 0x1100, scompare1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(45, 180, 32, 4, 4, 0x0210, 0x0006, -1, 2, 0x1100, acclo,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(46, 184, 8, 4, 4, 0x0211, 0x0006, -1, 2, 0x1100, acchi,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(47, 188, 32, 4, 4, 0x0220, 0x0006, -1, 2, 0x1100, m0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(48, 192, 32, 4, 4, 0x0221, 0x0006, -1, 2, 0x1100, m1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(49, 196, 32, 4, 4, 0x0222, 0x0006, -1, 2, 0x1100, m2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(50, 200, 32, 4, 4, 0x0223, 0x0006, -1, 2, 0x1100, m3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(51, 204, 32, 4, 4, 0x03e6, 0x000e, -1, 3, 0x0110, expstate,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(52, 208, 32, 4, 4, 0x0253, 0x0007, -2, 2, 0x1000, ptevaddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(53, 212, 32, 4, 4, 0x0259, 0x000d, -2, 2, 0x1000, mmid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(54, 216, 32, 4, 4, 0x025a, 0x0007, -2, 2, 0x1000, rasid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(55, 220, 18, 4, 4, 0x025b, 0x0007, -2, 2, 0x1000, itlbcfg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(56, 224, 18, 4, 4, 0x025c, 0x0007, -2, 2, 0x1000, dtlbcfg,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(57, 228, 2, 4, 4, 0x0260, 0x0007, -2, 2, 0x1000, ibreakenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(58, 232, 32, 4, 4, 0x0268, 0x0007, -2, 2, 0x1000, ddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(59, 236, 32, 4, 4, 0x0280, 0x0007, -2, 2, 0x1000, ibreaka0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(60, 240, 32, 4, 4, 0x0281, 0x0007, -2, 2, 0x1000, ibreaka1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(61, 244, 32, 4, 4, 0x0290, 0x0007, -2, 2, 0x1000, dbreaka0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(62, 248, 32, 4, 4, 0x0291, 0x0007, -2, 2, 0x1000, dbreaka1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(63, 252, 32, 4, 4, 0x02a0, 0x0007, -2, 2, 0x1000, dbreakc0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(64, 256, 32, 4, 4, 0x02a1, 0x0007, -2, 2, 0x1000, dbreakc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(65, 260, 32, 4, 4, 0x02b1, 0x0007, -2, 2, 0x1000, epc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(66, 264, 32, 4, 4, 0x02b2, 0x0007, -2, 2, 0x1000, epc2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(67, 268, 32, 4, 4, 0x02b3, 0x0007, -2, 2, 0x1000, epc3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(68, 272, 32, 4, 4, 0x02b4, 0x0007, -2, 2, 0x1000, epc4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(69, 276, 32, 4, 4, 0x02b5, 0x0007, -2, 2, 0x1000, epc5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(70, 280, 32, 4, 4, 0x02b6, 0x0007, -2, 2, 0x1000, epc6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(71, 284, 32, 4, 4, 0x02b7, 0x0007, -2, 2, 0x1000, epc7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(72, 288, 32, 4, 4, 0x02c0, 0x0007, -2, 2, 0x1000, depc,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(73, 292, 19, 4, 4, 0x02c2, 0x0007, -2, 2, 0x1000, eps2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(74, 296, 19, 4, 4, 0x02c3, 0x0007, -2, 2, 0x1000, eps3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(75, 300, 19, 4, 4, 0x02c4, 0x0007, -2, 2, 0x1000, eps4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(76, 304, 19, 4, 4, 0x02c5, 0x0007, -2, 2, 0x1000, eps5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(77, 308, 19, 4, 4, 0x02c6, 0x0007, -2, 2, 0x1000, eps6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(78, 312, 19, 4, 4, 0x02c7, 0x0007, -2, 2, 0x1000, eps7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(79, 316, 32, 4, 4, 0x02d1, 0x0007, -2, 2, 0x1000, excsave1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(80, 320, 32, 4, 4, 0x02d2, 0x0007, -2, 2, 0x1000, excsave2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(81, 324, 32, 4, 4, 0x02d3, 0x0007, -2, 2, 0x1000, excsave3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(82, 328, 32, 4, 4, 0x02d4, 0x0007, -2, 2, 0x1000, excsave4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(83, 332, 32, 4, 4, 0x02d5, 0x0007, -2, 2, 0x1000, excsave5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(84, 336, 32, 4, 4, 0x02d6, 0x0007, -2, 2, 0x1000, excsave6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(85, 340, 32, 4, 4, 0x02d7, 0x0007, -2, 2, 0x1000, excsave7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(86, 344, 8, 4, 4, 0x02e0, 0x0007, -2, 2, 0x1000, cpenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(87, 348, 22, 4, 4, 0x02e2, 0x000b, -2, 2, 0x1000, interrupt,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(88, 352, 22, 4, 4, 0x02e2, 0x000d, -2, 2, 0x1000, intset,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(89, 356, 22, 4, 4, 0x02e3, 0x000d, -2, 2, 0x1000, intclear,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(90, 360, 22, 4, 4, 0x02e4, 0x0007, -2, 2, 0x1000, intenable,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(91, 364, 32, 4, 4, 0x02e7, 0x0007, -2, 2, 0x1000, vecbase,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(92, 368, 6, 4, 4, 0x02e8, 0x0007, -2, 2, 0x1000, exccause,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(93, 372, 12, 4, 4, 0x02e9, 0x0003, -2, 2, 0x1000, debugcause,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(94, 376, 32, 4, 4, 0x02ea, 0x000f, -2, 2, 0x1000, ccount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(95, 380, 32, 4, 4, 0x02eb, 0x0003, -2, 2, 0x1000, prid,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(96, 384, 32, 4, 4, 0x02ec, 0x000f, -2, 2, 0x1000, icount,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(97, 388, 4, 4, 4, 0x02ed, 0x0007, -2, 2, 0x1000, icountlevel,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(98, 392, 32, 4, 4, 0x02ee, 0x0007, -2, 2, 0x1000, excvaddr,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(99, 396, 32, 4, 4, 0x02f0, 0x000f, -2, 2, 0x1000, ccompare0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(100, 400, 32, 4, 4, 0x02f1, 0x000f, -2, 2, 0x1000, ccompare1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(101, 404, 32, 4, 4, 0x02f2, 0x000f, -2, 2, 0x1000, ccompare2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(102, 408, 32, 4, 4, 0x02f4, 0x0007, -2, 2, 0x1000, misc0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(103, 412, 32, 4, 4, 0x02f5, 0x0007, -2, 2, 0x1000, misc1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(104, 416, 32, 4, 4, 0x0000, 0x0006, -2, 8, 0x0100, a0,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(105, 420, 32, 4, 4, 0x0001, 0x0006, -2, 8, 0x0100, a1,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(106, 424, 32, 4, 4, 0x0002, 0x0006, -2, 8, 0x0100, a2,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(107, 428, 32, 4, 4, 0x0003, 0x0006, -2, 8, 0x0100, a3,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(108, 432, 32, 4, 4, 0x0004, 0x0006, -2, 8, 0x0100, a4,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(109, 436, 32, 4, 4, 0x0005, 0x0006, -2, 8, 0x0100, a5,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(110, 440, 32, 4, 4, 0x0006, 0x0006, -2, 8, 0x0100, a6,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(111, 444, 32, 4, 4, 0x0007, 0x0006, -2, 8, 0x0100, a7,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(112, 448, 32, 4, 4, 0x0008, 0x0006, -2, 8, 0x0100, a8,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(113, 452, 32, 4, 4, 0x0009, 0x0006, -2, 8, 0x0100, a9,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(114, 456, 32, 4, 4, 0x000a, 0x0006, -2, 8, 0x0100, a10,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(115, 460, 32, 4, 4, 0x000b, 0x0006, -2, 8, 0x0100, a11,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(116, 464, 32, 4, 4, 0x000c, 0x0006, -2, 8, 0x0100, a12,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(117, 468, 32, 4, 4, 0x000d, 0x0006, -2, 8, 0x0100, a13,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(118, 472, 32, 4, 4, 0x000e, 0x0006, -2, 8, 0x0100, a14,
+ 0, 0, 0, 0, 0, 0)
+ XTREG(119, 476, 32, 4, 4, 0x000f, 0x0006, -2, 8, 0x0100, a15,
+ 0, 0, 0, 0, 0, 0)
diff --git a/target/xtensa/core-dc233c.c b/target/xtensa/core-dc233c.c
new file mode 100644
index 0000000000..40475e5205
--- /dev/null
+++ b/target/xtensa/core-dc233c.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+
+#include "core-dc233c/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig dc233c __attribute__((unused)) = {
+ .name = "dc233c",
+ .gdb_regmap = {
+ .num_regs = 121,
+ .num_core_regs = 52,
+ .reg = {
+#include "core-dc233c/gdb-config.c"
+ }
+ },
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(dc233c)
diff --git a/target/xtensa/core-dc233c/core-isa.h b/target/xtensa/core-dc233c/core-isa.h
new file mode 100644
index 0000000000..ff92b7f3ed
--- /dev/null
+++ b/target/xtensa/core-dc233c/core-isa.h
@@ -0,0 +1,473 @@
+/*
+ * xtensa/config/core-isa.h -- HAL definitions that are dependent on Xtensa
+ * processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef XTENSA_DC233C_CORE_ISA_H
+#define XTENSA_DC233C_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 32 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 5 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 1 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 1 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 1 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 1 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 1 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_DIV32 1 /* QUOS/QUOU/REMS/REMU instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 1 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 1 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_EXTERN_REGS 1 /* WER/RER instructions */
+#define XCHAL_HAVE_MP_INTERRUPTS 0 /* interrupt distributor port */
+#define XCHAL_HAVE_MP_RUNSTALL 0 /* core RunStall control port */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 1 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 8 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 1 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#define XCHAL_HAVE_DFP_accel 0 /* double precision FP acceleration pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFIPRO 0 /* HiFiPro Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+#define XCHAL_HAVE_HIFI2EP 0 /* HiFi2EP */
+#define XCHAL_HAVE_CONNXD2 0 /* ConnX D2 pkg */
+#define XCHAL_HAVE_BBE16 0 /* ConnX BBE16 pkg */
+#define XCHAL_HAVE_BBE16_RSQRT 0 /* BBE16 & vector recip sqrt */
+#define XCHAL_HAVE_BBE16_VECDIV 0 /* BBE16 & vector divide */
+#define XCHAL_HAVE_BBE16_DESPREAD 0 /* BBE16 & despread */
+#define XCHAL_HAVE_BSP3 0 /* ConnX BSP3 pkg */
+#define XCHAL_HAVE_SSP16 0 /* ConnX SSP16 pkg */
+#define XCHAL_HAVE_SSP16_VITERBI 0 /* SSP16 & viterbi */
+#define XCHAL_HAVE_TURBO16 0 /* ConnX Turbo16 pkg */
+#define XCHAL_HAVE_BBP16 0 /* ConnX BBP16 pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 8 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+#define XCHAL_UNALIGNED_LOAD_HW 0 /* unaligned loads work in hw */
+#define XCHAL_UNALIGNED_STORE_HW 0 /* unaligned stores work in hw*/
+
+#define XCHAL_SW_VERSION 900001 /* sw version of this header */
+
+#define XCHAL_CORE_ID "dc233c" /* alphanum core name
+(CoreID) set in the Xtensa
+Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "dc233c"
+#define XCHAL_BUILD_UNIQUE_ID 0x00004B21 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC56707FE /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x14404B21 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX4.0.1" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2400 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 1 /* minor ver# of targeted hw */
+#define XCHAL_HW_VERSION 240001 /* major*100+minor */
+#define XCHAL_HW_REL_LX4 1
+#define XCHAL_HW_REL_LX4_0 1
+#define XCHAL_HW_REL_LX4_0_1 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2400 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 1 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION 240001 /* earliest targeted hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2400 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 1 /* minor v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION 240001 /* latest targeted hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 1 /* writeback feature */
+#define XCHAL_DCACHE_IS_COHERENT 0 /* MP coherence feature */
+
+#define XCHAL_HAVE_PREFETCH 0 /* PREFCTL register */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 7
+#define XCHAL_DCACHE_SETWIDTH 7
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 4
+#define XCHAL_DCACHE_WAYS 4
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 1
+#define XCHAL_DCACHE_LINE_LOCKABLE 1
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Cache access size in bytes (affects operation of SICW instruction): */
+#define XCHAL_ICACHE_ACCESS_SIZE 4
+#define XCHAL_DCACHE_ACCESS_SIZE 4
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+#define XCHAL_HAVE_IMEM_LOADSTORE 1 /* can load/store to IROM/IRAM*/
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 1 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 22 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 17 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 6 /* number of interrupt levels
+(not including level zero) */
+#define XCHAL_EXCM_LEVEL 3 /* level masked by PS.EXCM */
+/* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_MASK 0x00000100
+#define XCHAL_INTLEVEL3_MASK 0x00200E00
+#define XCHAL_INTLEVEL4_MASK 0x00001000
+#define XCHAL_INTLEVEL5_MASK 0x00002000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00004000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x001F80FF
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x001F81FF
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x003F8FFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x003F9FFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x003FBFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x003FFFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 1
+#define XCHAL_INT2_LEVEL 1
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 3
+#define XCHAL_INT11_LEVEL 3
+#define XCHAL_INT12_LEVEL 4
+#define XCHAL_INT13_LEVEL 5
+#define XCHAL_INT14_LEVEL 7
+#define XCHAL_INT15_LEVEL 1
+#define XCHAL_INT16_LEVEL 1
+#define XCHAL_INT17_LEVEL 1
+#define XCHAL_INT18_LEVEL 1
+#define XCHAL_INT19_LEVEL 1
+#define XCHAL_INT20_LEVEL 1
+#define XCHAL_INT21_LEVEL 3
+#define XCHAL_DEBUGLEVEL 6 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 1 /* OCD external db interrupt */
+#define XCHAL_NMILEVEL 7 /* NMI "level" (for use with
+EXCSAVE/EPS/EPC_n, RFI n) */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_NMI
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT17_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT18_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT19_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT20_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT21_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFC00000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x00000880
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x003F8000
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000133F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00002440
+#define XCHAL_INTTYPE_MASK_NMI 0x00004000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 6 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 10 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 13 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+#define XCHAL_NMI_INTERRUPT 14 /* non-maskable interrupt */
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+#define XCHAL_INTLEVEL2_NUM 8
+#define XCHAL_INTLEVEL4_NUM 12
+#define XCHAL_INTLEVEL5_NUM 13
+#define XCHAL_INTLEVEL7_NUM 14
+/* (There are many interrupts each at level(s) 1, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 1) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 1) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT7_NUM 9 /* (intlevel 3) */
+#define XCHAL_EXTINT8_NUM 12 /* (intlevel 4) */
+#define XCHAL_EXTINT9_NUM 14 /* (intlevel 7) */
+#define XCHAL_EXTINT10_NUM 15 /* (intlevel 1) */
+#define XCHAL_EXTINT11_NUM 16 /* (intlevel 1) */
+#define XCHAL_EXTINT12_NUM 17 /* (intlevel 1) */
+#define XCHAL_EXTINT13_NUM 18 /* (intlevel 1) */
+#define XCHAL_EXTINT14_NUM 19 /* (intlevel 1) */
+#define XCHAL_EXTINT15_NUM 20 /* (intlevel 1) */
+#define XCHAL_EXTINT16_NUM 21 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+number: 1 == XEA1 (old)
+2 == XEA2 (new)
+0 == XEAX (extern) or TX */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_HALT 0 /* halt architecture option */
+#define XCHAL_HAVE_BOOTLOADER 0 /* boot loader (for TX) */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+#define XCHAL_HAVE_VECTOR_SELECT 1 /* relocatable vectors */
+#define XCHAL_HAVE_VECBASE 1 /* relocatable vectors */
+#define XCHAL_VECBASE_RESET_VADDR 0x00002000 /* VECBASE reset value */
+#define XCHAL_VECBASE_RESET_PADDR 0x00002000
+#define XCHAL_RESET_VECBASE_OVERLAP 0
+
+#define XCHAL_RESET_VECTOR0_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR0_PADDR 0xFE000000
+#define XCHAL_RESET_VECTOR1_VADDR 0x00001000
+#define XCHAL_RESET_VECTOR1_PADDR 0x00001000
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000000
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000000
+#define XCHAL_USER_VECOFS 0x00000340
+#define XCHAL_USER_VECTOR_VADDR 0x00002340
+#define XCHAL_USER_VECTOR_PADDR 0x00002340
+#define XCHAL_KERNEL_VECOFS 0x00000300
+#define XCHAL_KERNEL_VECTOR_VADDR 0x00002300
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00002300
+#define XCHAL_DOUBLEEXC_VECOFS 0x000003C0
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0x000023C0
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x000023C0
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#define XCHAL_WINDOW_VECTORS_VADDR 0x00002000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00002000
+#define XCHAL_INTLEVEL2_VECOFS 0x00000180
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0x00002180
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00002180
+#define XCHAL_INTLEVEL3_VECOFS 0x000001C0
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0x000021C0
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x000021C0
+#define XCHAL_INTLEVEL4_VECOFS 0x00000200
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0x00002200
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0x00002200
+#define XCHAL_INTLEVEL5_VECOFS 0x00000240
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0x00002240
+#define XCHAL_INTLEVEL5_VECTOR_PADDR 0x00002240
+#define XCHAL_INTLEVEL6_VECOFS 0x00000280
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0x00002280
+#define XCHAL_INTLEVEL6_VECTOR_PADDR 0x00002280
+#define XCHAL_DEBUG_VECOFS XCHAL_INTLEVEL6_VECOFS
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL6_VECTOR_PADDR
+#define XCHAL_NMI_VECOFS 0x000002C0
+#define XCHAL_NMI_VECTOR_VADDR 0x000022C0
+#define XCHAL_NMI_VECTOR_PADDR 0x000022C0
+#define XCHAL_INTLEVEL7_VECOFS XCHAL_NMI_VECOFS
+#define XCHAL_INTLEVEL7_VECTOR_VADDR XCHAL_NMI_VECTOR_VADDR
+#define XCHAL_INTLEVEL7_VECTOR_PADDR XCHAL_NMI_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See core-matmap.h header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 1 /* one way maps I+D 4GB vaddr */
+#define XCHAL_SPANNING_WAY 6 /* TLB spanning way number */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+[autorefill] and protection)
+usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_DC233C_CORE_ISA_H */
diff --git a/target/xtensa/core-dc233c/gdb-config.c b/target/xtensa/core-dc233c/gdb-config.c
new file mode 100644
index 0000000000..b632341b28
--- /dev/null
+++ b/target/xtensa/core-dc233c/gdb-config.c
@@ -0,0 +1,145 @@
+/* Configuration for the Xtensa architecture for GDB, the GNU debugger.
+
+ Copyright (c) 2003-2010 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+/* idx ofs bi sz al targno flags cp typ group name */
+XTREG(0, 0, 32, 4, 4, 0x0020, 0x0006, -2, 9, 0x0100, pc, 0, 0, 0, 0, 0, 0)
+XTREG(1, 4, 32, 4, 4, 0x0100, 0x0006, -2, 1, 0x0002, ar0, 0, 0, 0, 0, 0, 0)
+XTREG(2, 8, 32, 4, 4, 0x0101, 0x0006, -2, 1, 0x0002, ar1, 0, 0, 0, 0, 0, 0)
+XTREG(3, 12, 32, 4, 4, 0x0102, 0x0006, -2, 1, 0x0002, ar2, 0, 0, 0, 0, 0, 0)
+XTREG(4, 16, 32, 4, 4, 0x0103, 0x0006, -2, 1, 0x0002, ar3, 0, 0, 0, 0, 0, 0)
+XTREG(5, 20, 32, 4, 4, 0x0104, 0x0006, -2, 1, 0x0002, ar4, 0, 0, 0, 0, 0, 0)
+XTREG(6, 24, 32, 4, 4, 0x0105, 0x0006, -2, 1, 0x0002, ar5, 0, 0, 0, 0, 0, 0)
+XTREG(7, 28, 32, 4, 4, 0x0106, 0x0006, -2, 1, 0x0002, ar6, 0, 0, 0, 0, 0, 0)
+XTREG(8, 32, 32, 4, 4, 0x0107, 0x0006, -2, 1, 0x0002, ar7, 0, 0, 0, 0, 0, 0)
+XTREG(9, 36, 32, 4, 4, 0x0108, 0x0006, -2, 1, 0x0002, ar8, 0, 0, 0, 0, 0, 0)
+XTREG(10, 40, 32, 4, 4, 0x0109, 0x0006, -2, 1, 0x0002, ar9, 0, 0, 0, 0, 0, 0)
+XTREG(11, 44, 32, 4, 4, 0x010a, 0x0006, -2, 1, 0x0002, ar10, 0, 0, 0, 0, 0, 0)
+XTREG(12, 48, 32, 4, 4, 0x010b, 0x0006, -2, 1, 0x0002, ar11, 0, 0, 0, 0, 0, 0)
+XTREG(13, 52, 32, 4, 4, 0x010c, 0x0006, -2, 1, 0x0002, ar12, 0, 0, 0, 0, 0, 0)
+XTREG(14, 56, 32, 4, 4, 0x010d, 0x0006, -2, 1, 0x0002, ar13, 0, 0, 0, 0, 0, 0)
+XTREG(15, 60, 32, 4, 4, 0x010e, 0x0006, -2, 1, 0x0002, ar14, 0, 0, 0, 0, 0, 0)
+XTREG(16, 64, 32, 4, 4, 0x010f, 0x0006, -2, 1, 0x0002, ar15, 0, 0, 0, 0, 0, 0)
+XTREG(17, 68, 32, 4, 4, 0x0110, 0x0006, -2, 1, 0x0002, ar16, 0, 0, 0, 0, 0, 0)
+XTREG(18, 72, 32, 4, 4, 0x0111, 0x0006, -2, 1, 0x0002, ar17, 0, 0, 0, 0, 0, 0)
+XTREG(19, 76, 32, 4, 4, 0x0112, 0x0006, -2, 1, 0x0002, ar18, 0, 0, 0, 0, 0, 0)
+XTREG(20, 80, 32, 4, 4, 0x0113, 0x0006, -2, 1, 0x0002, ar19, 0, 0, 0, 0, 0, 0)
+XTREG(21, 84, 32, 4, 4, 0x0114, 0x0006, -2, 1, 0x0002, ar20, 0, 0, 0, 0, 0, 0)
+XTREG(22, 88, 32, 4, 4, 0x0115, 0x0006, -2, 1, 0x0002, ar21, 0, 0, 0, 0, 0, 0)
+XTREG(23, 92, 32, 4, 4, 0x0116, 0x0006, -2, 1, 0x0002, ar22, 0, 0, 0, 0, 0, 0)
+XTREG(24, 96, 32, 4, 4, 0x0117, 0x0006, -2, 1, 0x0002, ar23, 0, 0, 0, 0, 0, 0)
+XTREG(25, 100, 32, 4, 4, 0x0118, 0x0006, -2, 1, 0x0002, ar24, 0, 0, 0, 0, 0, 0)
+XTREG(26, 104, 32, 4, 4, 0x0119, 0x0006, -2, 1, 0x0002, ar25, 0, 0, 0, 0, 0, 0)
+XTREG(27, 108, 32, 4, 4, 0x011a, 0x0006, -2, 1, 0x0002, ar26, 0, 0, 0, 0, 0, 0)
+XTREG(28, 112, 32, 4, 4, 0x011b, 0x0006, -2, 1, 0x0002, ar27, 0, 0, 0, 0, 0, 0)
+XTREG(29, 116, 32, 4, 4, 0x011c, 0x0006, -2, 1, 0x0002, ar28, 0, 0, 0, 0, 0, 0)
+XTREG(30, 120, 32, 4, 4, 0x011d, 0x0006, -2, 1, 0x0002, ar29, 0, 0, 0, 0, 0, 0)
+XTREG(31, 124, 32, 4, 4, 0x011e, 0x0006, -2, 1, 0x0002, ar30, 0, 0, 0, 0, 0, 0)
+XTREG(32, 128, 32, 4, 4, 0x011f, 0x0006, -2, 1, 0x0002, ar31, 0, 0, 0, 0, 0, 0)
+XTREG(33, 132, 32, 4, 4, 0x0200, 0x0006, -2, 2, 0x1100, lbeg, 0, 0, 0, 0, 0, 0)
+XTREG(34, 136, 32, 4, 4, 0x0201, 0x0006, -2, 2, 0x1100, lend, 0, 0, 0, 0, 0, 0)
+XTREG(35, 140, 32, 4, 4, 0x0202, 0x0006, -2, 2, 0x1100, lcount, 0, 0, 0, 0, 0, 0)
+XTREG(36, 144, 6, 4, 4, 0x0203, 0x0006, -2, 2, 0x1100, sar, 0, 0, 0, 0, 0, 0)
+XTREG(37, 148, 32, 4, 4, 0x0205, 0x0006, -2, 2, 0x1100, litbase, 0, 0, 0, 0, 0, 0)
+XTREG(38, 152, 3, 4, 4, 0x0248, 0x0006, -2, 2, 0x1002, windowbase, 0, 0, 0, 0, 0, 0)
+XTREG(39, 156, 8, 4, 4, 0x0249, 0x0006, -2, 2, 0x1002, windowstart, 0, 0, 0, 0, 0, 0)
+XTREG(40, 160, 32, 4, 4, 0x02b0, 0x0002, -2, 2, 0x1000, sr176, 0, 0, 0, 0, 0, 0)
+XTREG(41, 164, 32, 4, 4, 0x02d0, 0x0002, -2, 2, 0x1000, sr208, 0, 0, 0, 0, 0, 0)
+XTREG(42, 168, 19, 4, 4, 0x02e6, 0x0006, -2, 2, 0x1100, ps, 0, 0, 0, 0, 0, 0)
+XTREG(43, 172, 32, 4, 4, 0x03e7, 0x0006, -2, 3, 0x0110, threadptr, 0, 0, 0, 0, 0, 0)
+XTREG(44, 176, 32, 4, 4, 0x020c, 0x0006, -1, 2, 0x1100, scompare1, 0, 0, 0, 0, 0, 0)
+XTREG(45, 180, 32, 4, 4, 0x0210, 0x0006, -1, 2, 0x1100, acclo, 0, 0, 0, 0, 0, 0)
+XTREG(46, 184, 8, 4, 4, 0x0211, 0x0006, -1, 2, 0x1100, acchi, 0, 0, 0, 0, 0, 0)
+XTREG(47, 188, 32, 4, 4, 0x0220, 0x0006, -1, 2, 0x1100, m0, 0, 0, 0, 0, 0, 0)
+XTREG(48, 192, 32, 4, 4, 0x0221, 0x0006, -1, 2, 0x1100, m1, 0, 0, 0, 0, 0, 0)
+XTREG(49, 196, 32, 4, 4, 0x0222, 0x0006, -1, 2, 0x1100, m2, 0, 0, 0, 0, 0, 0)
+XTREG(50, 200, 32, 4, 4, 0x0223, 0x0006, -1, 2, 0x1100, m3, 0, 0, 0, 0, 0, 0)
+XTREG(51, 204, 32, 4, 4, 0x03e6, 0x000e, -1, 3, 0x0110, expstate, 0, 0, 0, 0, 0, 0)
+XTREG(52, 208, 32, 4, 4, 0x0253, 0x0007, -2, 2, 0x1000, ptevaddr, 0, 0, 0, 0, 0, 0)
+XTREG(53, 212, 32, 4, 4, 0x0259, 0x000d, -2, 2, 0x1000, mmid, 0, 0, 0, 0, 0, 0)
+XTREG(54, 216, 32, 4, 4, 0x025a, 0x0007, -2, 2, 0x1000, rasid, 0, 0, 0, 0, 0, 0)
+XTREG(55, 220, 25, 4, 4, 0x025b, 0x0007, -2, 2, 0x1000, itlbcfg, 0, 0, 0, 0, 0, 0)
+XTREG(56, 224, 25, 4, 4, 0x025c, 0x0007, -2, 2, 0x1000, dtlbcfg, 0, 0, 0, 0, 0, 0)
+XTREG(57, 228, 2, 4, 4, 0x0260, 0x0007, -2, 2, 0x1000, ibreakenable, 0, 0, 0, 0, 0, 0)
+XTREG(58, 232, 6, 4, 4, 0x0263, 0x0007, -2, 2, 0x1000, atomctl, 0, 0, 0, 0, 0, 0)
+XTREG(59, 236, 32, 4, 4, 0x0268, 0x0007, -2, 2, 0x1000, ddr, 0, 0, 0, 0, 0, 0)
+XTREG(60, 240, 32, 4, 4, 0x0280, 0x0007, -2, 2, 0x1000, ibreaka0, 0, 0, 0, 0, 0, 0)
+XTREG(61, 244, 32, 4, 4, 0x0281, 0x0007, -2, 2, 0x1000, ibreaka1, 0, 0, 0, 0, 0, 0)
+XTREG(62, 248, 32, 4, 4, 0x0290, 0x0007, -2, 2, 0x1000, dbreaka0, 0, 0, 0, 0, 0, 0)
+XTREG(63, 252, 32, 4, 4, 0x0291, 0x0007, -2, 2, 0x1000, dbreaka1, 0, 0, 0, 0, 0, 0)
+XTREG(64, 256, 32, 4, 4, 0x02a0, 0x0007, -2, 2, 0x1000, dbreakc0, 0, 0, 0, 0, 0, 0)
+XTREG(65, 260, 32, 4, 4, 0x02a1, 0x0007, -2, 2, 0x1000, dbreakc1, 0, 0, 0, 0, 0, 0)
+XTREG(66, 264, 32, 4, 4, 0x02b1, 0x0007, -2, 2, 0x1000, epc1, 0, 0, 0, 0, 0, 0)
+XTREG(67, 268, 32, 4, 4, 0x02b2, 0x0007, -2, 2, 0x1000, epc2, 0, 0, 0, 0, 0, 0)
+XTREG(68, 272, 32, 4, 4, 0x02b3, 0x0007, -2, 2, 0x1000, epc3, 0, 0, 0, 0, 0, 0)
+XTREG(69, 276, 32, 4, 4, 0x02b4, 0x0007, -2, 2, 0x1000, epc4, 0, 0, 0, 0, 0, 0)
+XTREG(70, 280, 32, 4, 4, 0x02b5, 0x0007, -2, 2, 0x1000, epc5, 0, 0, 0, 0, 0, 0)
+XTREG(71, 284, 32, 4, 4, 0x02b6, 0x0007, -2, 2, 0x1000, epc6, 0, 0, 0, 0, 0, 0)
+XTREG(72, 288, 32, 4, 4, 0x02b7, 0x0007, -2, 2, 0x1000, epc7, 0, 0, 0, 0, 0, 0)
+XTREG(73, 292, 32, 4, 4, 0x02c0, 0x0007, -2, 2, 0x1000, depc, 0, 0, 0, 0, 0, 0)
+XTREG(74, 296, 19, 4, 4, 0x02c2, 0x0007, -2, 2, 0x1000, eps2, 0, 0, 0, 0, 0, 0)
+XTREG(75, 300, 19, 4, 4, 0x02c3, 0x0007, -2, 2, 0x1000, eps3, 0, 0, 0, 0, 0, 0)
+XTREG(76, 304, 19, 4, 4, 0x02c4, 0x0007, -2, 2, 0x1000, eps4, 0, 0, 0, 0, 0, 0)
+XTREG(77, 308, 19, 4, 4, 0x02c5, 0x0007, -2, 2, 0x1000, eps5, 0, 0, 0, 0, 0, 0)
+XTREG(78, 312, 19, 4, 4, 0x02c6, 0x0007, -2, 2, 0x1000, eps6, 0, 0, 0, 0, 0, 0)
+XTREG(79, 316, 19, 4, 4, 0x02c7, 0x0007, -2, 2, 0x1000, eps7, 0, 0, 0, 0, 0, 0)
+XTREG(80, 320, 32, 4, 4, 0x02d1, 0x0007, -2, 2, 0x1000, excsave1, 0, 0, 0, 0, 0, 0)
+XTREG(81, 324, 32, 4, 4, 0x02d2, 0x0007, -2, 2, 0x1000, excsave2, 0, 0, 0, 0, 0, 0)
+XTREG(82, 328, 32, 4, 4, 0x02d3, 0x0007, -2, 2, 0x1000, excsave3, 0, 0, 0, 0, 0, 0)
+XTREG(83, 332, 32, 4, 4, 0x02d4, 0x0007, -2, 2, 0x1000, excsave4, 0, 0, 0, 0, 0, 0)
+XTREG(84, 336, 32, 4, 4, 0x02d5, 0x0007, -2, 2, 0x1000, excsave5, 0, 0, 0, 0, 0, 0)
+XTREG(85, 340, 32, 4, 4, 0x02d6, 0x0007, -2, 2, 0x1000, excsave6, 0, 0, 0, 0, 0, 0)
+XTREG(86, 344, 32, 4, 4, 0x02d7, 0x0007, -2, 2, 0x1000, excsave7, 0, 0, 0, 0, 0, 0)
+XTREG(87, 348, 8, 4, 4, 0x02e0, 0x0007, -2, 2, 0x1000, cpenable, 0, 0, 0, 0, 0, 0)
+XTREG(88, 352, 22, 4, 4, 0x02e2, 0x000b, -2, 2, 0x1000, interrupt, 0, 0, 0, 0, 0, 0)
+XTREG(89, 356, 22, 4, 4, 0x02e2, 0x000d, -2, 2, 0x1000, intset, 0, 0, 0, 0, 0, 0)
+XTREG(90, 360, 22, 4, 4, 0x02e3, 0x000d, -2, 2, 0x1000, intclear, 0, 0, 0, 0, 0, 0)
+XTREG(91, 364, 22, 4, 4, 0x02e4, 0x0007, -2, 2, 0x1000, intenable, 0, 0, 0, 0, 0, 0)
+XTREG(92, 368, 32, 4, 4, 0x02e7, 0x0007, -2, 2, 0x1000, vecbase, 0, 0, 0, 0, 0, 0)
+XTREG(93, 372, 6, 4, 4, 0x02e8, 0x0007, -2, 2, 0x1000, exccause, 0, 0, 0, 0, 0, 0)
+XTREG(94, 376, 12, 4, 4, 0x02e9, 0x0003, -2, 2, 0x1000, debugcause, 0, 0, 0, 0, 0, 0)
+XTREG(95, 380, 32, 4, 4, 0x02ea, 0x000f, -2, 2, 0x1000, ccount, 0, 0, 0, 0, 0, 0)
+XTREG(96, 384, 32, 4, 4, 0x02eb, 0x0003, -2, 2, 0x1000, prid, 0, 0, 0, 0, 0, 0)
+XTREG(97, 388, 32, 4, 4, 0x02ec, 0x000f, -2, 2, 0x1000, icount, 0, 0, 0, 0, 0, 0)
+XTREG(98, 392, 4, 4, 4, 0x02ed, 0x0007, -2, 2, 0x1000, icountlevel, 0, 0, 0, 0, 0, 0)
+XTREG(99, 396, 32, 4, 4, 0x02ee, 0x0007, -2, 2, 0x1000, excvaddr, 0, 0, 0, 0, 0, 0)
+XTREG(100, 400, 32, 4, 4, 0x02f0, 0x000f, -2, 2, 0x1000, ccompare0, 0, 0, 0, 0, 0, 0)
+XTREG(101, 404, 32, 4, 4, 0x02f1, 0x000f, -2, 2, 0x1000, ccompare1, 0, 0, 0, 0, 0, 0)
+XTREG(102, 408, 32, 4, 4, 0x02f2, 0x000f, -2, 2, 0x1000, ccompare2, 0, 0, 0, 0, 0, 0)
+XTREG(103, 412, 32, 4, 4, 0x02f4, 0x0007, -2, 2, 0x1000, misc0, 0, 0, 0, 0, 0, 0)
+XTREG(104, 416, 32, 4, 4, 0x02f5, 0x0007, -2, 2, 0x1000, misc1, 0, 0, 0, 0, 0, 0)
+XTREG(105, 420, 32, 4, 4, 0x0000, 0x0006, -2, 8, 0x0100, a0, 0, 0, 0, 0, 0, 0)
+XTREG(106, 424, 32, 4, 4, 0x0001, 0x0006, -2, 8, 0x0100, a1, 0, 0, 0, 0, 0, 0)
+XTREG(107, 428, 32, 4, 4, 0x0002, 0x0006, -2, 8, 0x0100, a2, 0, 0, 0, 0, 0, 0)
+XTREG(108, 432, 32, 4, 4, 0x0003, 0x0006, -2, 8, 0x0100, a3, 0, 0, 0, 0, 0, 0)
+XTREG(109, 436, 32, 4, 4, 0x0004, 0x0006, -2, 8, 0x0100, a4, 0, 0, 0, 0, 0, 0)
+XTREG(110, 440, 32, 4, 4, 0x0005, 0x0006, -2, 8, 0x0100, a5, 0, 0, 0, 0, 0, 0)
+XTREG(111, 444, 32, 4, 4, 0x0006, 0x0006, -2, 8, 0x0100, a6, 0, 0, 0, 0, 0, 0)
+XTREG(112, 448, 32, 4, 4, 0x0007, 0x0006, -2, 8, 0x0100, a7, 0, 0, 0, 0, 0, 0)
+XTREG(113, 452, 32, 4, 4, 0x0008, 0x0006, -2, 8, 0x0100, a8, 0, 0, 0, 0, 0, 0)
+XTREG(114, 456, 32, 4, 4, 0x0009, 0x0006, -2, 8, 0x0100, a9, 0, 0, 0, 0, 0, 0)
+XTREG(115, 460, 32, 4, 4, 0x000a, 0x0006, -2, 8, 0x0100, a10, 0, 0, 0, 0, 0, 0)
+XTREG(116, 464, 32, 4, 4, 0x000b, 0x0006, -2, 8, 0x0100, a11, 0, 0, 0, 0, 0, 0)
+XTREG(117, 468, 32, 4, 4, 0x000c, 0x0006, -2, 8, 0x0100, a12, 0, 0, 0, 0, 0, 0)
+XTREG(118, 472, 32, 4, 4, 0x000d, 0x0006, -2, 8, 0x0100, a13, 0, 0, 0, 0, 0, 0)
+XTREG(119, 476, 32, 4, 4, 0x000e, 0x0006, -2, 8, 0x0100, a14, 0, 0, 0, 0, 0, 0)
+XTREG(120, 480, 32, 4, 4, 0x000f, 0x0006, -2, 8, 0x0100, a15, 0, 0, 0, 0, 0, 0)
diff --git a/target/xtensa/core-fsf.c b/target/xtensa/core-fsf.c
new file mode 100644
index 0000000000..15ef470e8b
--- /dev/null
+++ b/target/xtensa/core-fsf.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-fsf/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig fsf __attribute__((unused)) = {
+ .name = "fsf",
+ .gdb_regmap = {
+ /* GDB for this core is not supported currently */
+ .reg = {
+ XTREG_END
+ },
+ },
+ .clock_freq_khz = 10000,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE(fsf)
diff --git a/target/xtensa/core-fsf/core-isa.h b/target/xtensa/core-fsf/core-isa.h
new file mode 100644
index 0000000000..fb2bb8f2cb
--- /dev/null
+++ b/target/xtensa/core-fsf/core-isa.h
@@ -0,0 +1,360 @@
+/*
+ * Xtensa processor core configuration information.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2006 Tensilica Inc.
+ */
+
+#ifndef XTENSA_FSF_CORE_ISA_H
+#define XTENSA_FSF_CORE_ISA_H
+
+/****************************************************************************
+ Parameters Useful for Any Code, USER or PRIVILEGED
+ ****************************************************************************/
+
+/*
+ * Note: Macros of the form XCHAL_HAVE_*** have a value of 1 if the option is
+ * configured, and a value of 0 otherwise. These macros are always defined.
+ */
+
+
+/*----------------------------------------------------------------------
+ ISA
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_BE 1 /* big-endian byte ordering */
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
+#define XCHAL_NUM_AREGS_LOG2 6 /* log2(XCHAL_NUM_AREGS) */
+#define XCHAL_MAX_INSTRUCTION_SIZE 3 /* max instr bytes (3..8) */
+#define XCHAL_HAVE_DEBUG 1 /* debug option */
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#define XCHAL_HAVE_NSA 1 /* NSA/NSAU instructions */
+#define XCHAL_HAVE_MINMAX 0 /* MIN/MAX instructions */
+#define XCHAL_HAVE_SEXT 0 /* SEXT instruction */
+#define XCHAL_HAVE_CLAMPS 0 /* CLAMPS instruction */
+#define XCHAL_HAVE_MUL16 0 /* MUL16S/MUL16U instructions */
+#define XCHAL_HAVE_MUL32 0 /* MULL instruction */
+#define XCHAL_HAVE_MUL32_HIGH 0 /* MULUH/MULSH instructions */
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#define XCHAL_HAVE_ABSOLUTE_LITERALS 1 /* non-PC-rel (extended) L32R */
+#define XCHAL_HAVE_CONST16 0 /* CONST16 instruction */
+#define XCHAL_HAVE_ADDX 1 /* ADDX#/SUBX# instructions */
+#define XCHAL_HAVE_WIDE_BRANCHES 0 /* B*.W18 or B*.W15 instr's */
+#define XCHAL_HAVE_PREDICTED_BRANCHES 0 /* B[EQ/EQZ/NE/NEZ]T instr's */
+#define XCHAL_HAVE_CALL4AND12 1 /* (obsolete option) */
+#define XCHAL_HAVE_ABS 1 /* ABS instruction */
+/*#define XCHAL_HAVE_POPC 0*/ /* POPC instruction */
+/*#define XCHAL_HAVE_CRC 0*/ /* CRC instruction */
+#define XCHAL_HAVE_RELEASE_SYNC 0 /* L32AI/S32RI instructions */
+#define XCHAL_HAVE_S32C1I 0 /* S32C1I instruction */
+#define XCHAL_HAVE_SPECULATION 0 /* speculation */
+#define XCHAL_HAVE_FULL_RESET 1 /* all regs/state reset */
+#define XCHAL_NUM_CONTEXTS 1 /* */
+#define XCHAL_NUM_MISC_REGS 2 /* num of scratch regs (0..4) */
+#define XCHAL_HAVE_TAP_MASTER 0 /* JTAG TAP control instr's */
+#define XCHAL_HAVE_PRID 1 /* processor ID register */
+#define XCHAL_HAVE_THREADPTR 1 /* THREADPTR register */
+#define XCHAL_HAVE_BOOLEANS 0 /* boolean registers */
+#define XCHAL_HAVE_CP 0 /* CPENABLE reg (coprocessor) */
+#define XCHAL_CP_MAXCFG 0 /* max allowed cp id plus one */
+#define XCHAL_HAVE_MAC16 0 /* MAC16 package */
+#define XCHAL_HAVE_VECTORFPU2005 0 /* vector floating-point pkg */
+#define XCHAL_HAVE_FP 0 /* floating point pkg */
+#define XCHAL_HAVE_VECTRA1 0 /* Vectra I pkg */
+#define XCHAL_HAVE_VECTRALX 0 /* Vectra LX pkg */
+#define XCHAL_HAVE_HIFI2 0 /* HiFi2 Audio Engine pkg */
+
+
+/*----------------------------------------------------------------------
+ MISC
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_WRITEBUFFER_ENTRIES 4 /* size of write buffer */
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#define XCHAL_DATA_WIDTH 4 /* data width in bytes */
+/* In T1050, applies to selected core load and store instructions (see ISA): */
+#define XCHAL_UNALIGNED_LOAD_EXCEPTION 1 /* unaligned loads cause exc. */
+#define XCHAL_UNALIGNED_STORE_EXCEPTION 1 /* unaligned stores cause exc.*/
+
+#define XCHAL_SW_VERSION 800002 /* sw version of this header */
+
+#define XCHAL_CORE_ID "fsf" /* alphanum core name
+ (CoreID) set in the Xtensa
+ Processor Generator */
+
+#define XCHAL_CORE_DESCRIPTION "fsf standard core"
+#define XCHAL_BUILD_UNIQUE_ID 0x00006700 /* 22-bit sw build ID */
+
+/*
+ * These definitions describe the hardware targeted by this software.
+ */
+#define XCHAL_HW_CONFIGID0 0xC103C3FF /* ConfigID hi 32 bits*/
+#define XCHAL_HW_CONFIGID1 0x0C006700 /* ConfigID lo 32 bits*/
+#define XCHAL_HW_VERSION_NAME "LX2.0.0" /* full version name */
+#define XCHAL_HW_VERSION_MAJOR 2200 /* major ver# of targeted hw */
+#define XCHAL_HW_VERSION_MINOR 0 /* minor ver# of targeted hw */
+#define XTHAL_HW_REL_LX2 1
+#define XTHAL_HW_REL_LX2_0 1
+#define XTHAL_HW_REL_LX2_0_0 1
+#define XCHAL_HW_CONFIGID_RELIABLE 1
+/* If software targets a *range* of hardware versions, these are the bounds: */
+#define XCHAL_HW_MIN_VERSION_MAJOR 2200 /* major v of earliest tgt hw */
+#define XCHAL_HW_MIN_VERSION_MINOR 0 /* minor v of earliest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MAJOR 2200 /* major v of latest tgt hw */
+#define XCHAL_HW_MAX_VERSION_MINOR 0 /* minor v of latest tgt hw */
+
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
+#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
+#define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
+#define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
+
+#define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
+#define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
+
+#define XCHAL_DCACHE_IS_WRITEBACK 0 /* writeback feature */
+
+
+
+
+/****************************************************************************
+ Parameters Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code
+ ****************************************************************************/
+
+
+#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY
+
+/*----------------------------------------------------------------------
+ CACHE
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_PIF 1 /* any outbound PIF present */
+
+/* If present, cache size in bytes == (ways * 2^(linewidth + setwidth)). */
+
+/* Number of cache sets in log2(lines per way): */
+#define XCHAL_ICACHE_SETWIDTH 8
+#define XCHAL_DCACHE_SETWIDTH 8
+
+/* Cache set associativity (number of ways): */
+#define XCHAL_ICACHE_WAYS 2
+#define XCHAL_DCACHE_WAYS 2
+
+/* Cache features: */
+#define XCHAL_ICACHE_LINE_LOCKABLE 0
+#define XCHAL_DCACHE_LINE_LOCKABLE 0
+#define XCHAL_ICACHE_ECC_PARITY 0
+#define XCHAL_DCACHE_ECC_PARITY 0
+
+/* Number of encoded cache attr bits (see <xtensa/hal.h> for decoded bits): */
+#define XCHAL_CA_BITS 4
+
+
+/*----------------------------------------------------------------------
+ INTERNAL I/D RAM/ROMs and XLMI
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_NUM_INSTROM 0 /* number of core instr. ROMs */
+#define XCHAL_NUM_INSTRAM 0 /* number of core instr. RAMs */
+#define XCHAL_NUM_DATAROM 0 /* number of core data ROMs */
+#define XCHAL_NUM_DATARAM 0 /* number of core data RAMs */
+#define XCHAL_NUM_URAM 0 /* number of core unified RAMs*/
+#define XCHAL_NUM_XLMI 0 /* number of core XLMI ports */
+
+
+/*----------------------------------------------------------------------
+ INTERRUPTS and TIMERS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_INTERRUPTS 1 /* interrupt option */
+#define XCHAL_HAVE_HIGHPRI_INTERRUPTS 1 /* med/high-pri. interrupts */
+#define XCHAL_HAVE_NMI 0 /* non-maskable interrupt */
+#define XCHAL_HAVE_CCOUNT 1 /* CCOUNT reg. (timer option) */
+#define XCHAL_NUM_TIMERS 3 /* number of CCOMPAREn regs */
+#define XCHAL_NUM_INTERRUPTS 17 /* number of interrupts */
+#define XCHAL_NUM_INTERRUPTS_LOG2 5 /* ceil(log2(NUM_INTERRUPTS)) */
+#define XCHAL_NUM_EXTINTERRUPTS 10 /* num of external interrupts */
+#define XCHAL_NUM_INTLEVELS 4 /* number of interrupt levels
+ (not including level zero) */
+#define XCHAL_EXCM_LEVEL 1 /* level masked by PS.EXCM */
+ /* (always 1 in XEA1; levels 2 .. EXCM_LEVEL are "medium priority") */
+
+/* Masks of interrupts at each interrupt level: */
+#define XCHAL_INTLEVEL1_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_MASK 0x00008902
+#define XCHAL_INTLEVEL3_MASK 0x00011204
+#define XCHAL_INTLEVEL4_MASK 0x00000000
+#define XCHAL_INTLEVEL5_MASK 0x00000000
+#define XCHAL_INTLEVEL6_MASK 0x00000000
+#define XCHAL_INTLEVEL7_MASK 0x00000000
+
+/* Masks of interrupts at each range 1..n of interrupt levels: */
+#define XCHAL_INTLEVEL1_ANDBELOW_MASK 0x000064F9
+#define XCHAL_INTLEVEL2_ANDBELOW_MASK 0x0000EDFB
+#define XCHAL_INTLEVEL3_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL4_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL5_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL6_ANDBELOW_MASK 0x0001FFFF
+#define XCHAL_INTLEVEL7_ANDBELOW_MASK 0x0001FFFF
+
+/* Level of each interrupt: */
+#define XCHAL_INT0_LEVEL 1
+#define XCHAL_INT1_LEVEL 2
+#define XCHAL_INT2_LEVEL 3
+#define XCHAL_INT3_LEVEL 1
+#define XCHAL_INT4_LEVEL 1
+#define XCHAL_INT5_LEVEL 1
+#define XCHAL_INT6_LEVEL 1
+#define XCHAL_INT7_LEVEL 1
+#define XCHAL_INT8_LEVEL 2
+#define XCHAL_INT9_LEVEL 3
+#define XCHAL_INT10_LEVEL 1
+#define XCHAL_INT11_LEVEL 2
+#define XCHAL_INT12_LEVEL 3
+#define XCHAL_INT13_LEVEL 1
+#define XCHAL_INT14_LEVEL 1
+#define XCHAL_INT15_LEVEL 2
+#define XCHAL_INT16_LEVEL 3
+#define XCHAL_DEBUGLEVEL 4 /* debug interrupt level */
+#define XCHAL_HAVE_DEBUG_EXTERN_INT 0 /* OCD external db interrupt */
+
+/* Type of each interrupt: */
+#define XCHAL_INT0_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT1_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT2_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT3_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT4_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT5_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT6_TYPE XTHAL_INTTYPE_EXTERN_LEVEL
+#define XCHAL_INT7_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT8_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT9_TYPE XTHAL_INTTYPE_EXTERN_EDGE
+#define XCHAL_INT10_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT11_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT12_TYPE XTHAL_INTTYPE_TIMER
+#define XCHAL_INT13_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT14_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT15_TYPE XTHAL_INTTYPE_SOFTWARE
+#define XCHAL_INT16_TYPE XTHAL_INTTYPE_SOFTWARE
+
+/* Masks of interrupts for each type of interrupt: */
+#define XCHAL_INTTYPE_MASK_UNCONFIGURED 0xFFFE0000
+#define XCHAL_INTTYPE_MASK_SOFTWARE 0x0001E000
+#define XCHAL_INTTYPE_MASK_EXTERN_EDGE 0x00000380
+#define XCHAL_INTTYPE_MASK_EXTERN_LEVEL 0x0000007F
+#define XCHAL_INTTYPE_MASK_TIMER 0x00001C00
+#define XCHAL_INTTYPE_MASK_NMI 0x00000000
+#define XCHAL_INTTYPE_MASK_WRITE_ERROR 0x00000000
+
+/* Interrupt numbers assigned to specific interrupt sources: */
+#define XCHAL_TIMER0_INTERRUPT 10 /* CCOMPARE0 */
+#define XCHAL_TIMER1_INTERRUPT 11 /* CCOMPARE1 */
+#define XCHAL_TIMER2_INTERRUPT 12 /* CCOMPARE2 */
+#define XCHAL_TIMER3_INTERRUPT XTHAL_TIMER_UNCONFIGURED
+
+/* Interrupt numbers for levels at which only one interrupt is configured: */
+/* (There are many interrupts each at level(s) 1, 2, 3.) */
+
+
+/*
+ * External interrupt vectors/levels.
+ * These macros describe how Xtensa processor interrupt numbers
+ * (as numbered internally, eg. in INTERRUPT and INTENABLE registers)
+ * map to external BInterrupt<n> pins, for those interrupts
+ * configured as external (level-triggered, edge-triggered, or NMI).
+ * See the Xtensa processor databook for more details.
+ */
+
+/* Core interrupt numbers mapped to each EXTERNAL interrupt number: */
+#define XCHAL_EXTINT0_NUM 0 /* (intlevel 1) */
+#define XCHAL_EXTINT1_NUM 1 /* (intlevel 2) */
+#define XCHAL_EXTINT2_NUM 2 /* (intlevel 3) */
+#define XCHAL_EXTINT3_NUM 3 /* (intlevel 1) */
+#define XCHAL_EXTINT4_NUM 4 /* (intlevel 1) */
+#define XCHAL_EXTINT5_NUM 5 /* (intlevel 1) */
+#define XCHAL_EXTINT6_NUM 6 /* (intlevel 1) */
+#define XCHAL_EXTINT7_NUM 7 /* (intlevel 1) */
+#define XCHAL_EXTINT8_NUM 8 /* (intlevel 2) */
+#define XCHAL_EXTINT9_NUM 9 /* (intlevel 3) */
+
+
+/*----------------------------------------------------------------------
+ EXCEPTIONS and VECTORS
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_XEA_VERSION 2 /* Xtensa Exception Architecture
+ number: 1 == XEA1 (old)
+ 2 == XEA2 (new)
+ 0 == XEAX (extern) */
+#define XCHAL_HAVE_XEA1 0 /* Exception Architecture 1 */
+#define XCHAL_HAVE_XEA2 1 /* Exception Architecture 2 */
+#define XCHAL_HAVE_XEAX 0 /* External Exception Arch. */
+#define XCHAL_HAVE_EXCEPTIONS 1 /* exception option */
+#define XCHAL_HAVE_MEM_ECC_PARITY 0 /* local memory ECC/parity */
+
+#define XCHAL_RESET_VECTOR_VADDR 0xFE000020
+#define XCHAL_RESET_VECTOR_PADDR 0xFE000020
+#define XCHAL_USER_VECTOR_VADDR 0xD0000220
+#define XCHAL_USER_VECTOR_PADDR 0x00000220
+#define XCHAL_KERNEL_VECTOR_VADDR 0xD0000200
+#define XCHAL_KERNEL_VECTOR_PADDR 0x00000200
+#define XCHAL_DOUBLEEXC_VECTOR_VADDR 0xD0000290
+#define XCHAL_DOUBLEEXC_VECTOR_PADDR 0x00000290
+#define XCHAL_WINDOW_VECTORS_VADDR 0xD0000000
+#define XCHAL_WINDOW_VECTORS_PADDR 0x00000000
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0xD0000240
+#define XCHAL_INTLEVEL2_VECTOR_PADDR 0x00000240
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0xD0000250
+#define XCHAL_INTLEVEL3_VECTOR_PADDR 0x00000250
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0xFE000520
+#define XCHAL_INTLEVEL4_VECTOR_PADDR 0xFE000520
+#define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR
+#define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL4_VECTOR_PADDR
+
+
+/*----------------------------------------------------------------------
+ DEBUG
+ ----------------------------------------------------------------------*/
+
+#define XCHAL_HAVE_OCD 1 /* OnChipDebug option */
+#define XCHAL_NUM_IBREAK 2 /* number of IBREAKn regs */
+#define XCHAL_NUM_DBREAK 2 /* number of DBREAKn regs */
+#define XCHAL_HAVE_OCD_DIR_ARRAY 1 /* faster OCD option */
+
+
+/*----------------------------------------------------------------------
+ MMU
+ ----------------------------------------------------------------------*/
+
+/* See <xtensa/config/core-matmap.h> header file for more details. */
+
+#define XCHAL_HAVE_TLBS 1 /* inverse of HAVE_CACHEATTR */
+#define XCHAL_HAVE_SPANNING_WAY 0 /* one way maps I+D 4GB vaddr */
+#define XCHAL_HAVE_IDENTITY_MAP 0 /* vaddr == paddr always */
+#define XCHAL_HAVE_CACHEATTR 0 /* CACHEATTR register present */
+#define XCHAL_HAVE_MIMIC_CACHEATTR 0 /* region protection */
+#define XCHAL_HAVE_XLT_CACHEATTR 0 /* region prot. w/translation */
+#define XCHAL_HAVE_PTP_MMU 1 /* full MMU (with page table
+ [autorefill] and protection)
+ usable for an MMU-based OS */
+/* If none of the above last 4 are set, it's a custom TLB configuration. */
+#define XCHAL_ITLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+#define XCHAL_DTLB_ARF_ENTRIES_LOG2 2 /* log2(autorefill way size) */
+
+#define XCHAL_MMU_ASID_BITS 8 /* number of bits in ASIDs */
+#define XCHAL_MMU_RINGS 4 /* number of rings (1..4) */
+#define XCHAL_MMU_RING_BITS 2 /* num of bits in RING field */
+
+#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */
+
+
+#endif /* XTENSA_FSF_CORE_ISA_H */
diff --git a/target/xtensa/cpu-qom.h b/target/xtensa/cpu-qom.h
new file mode 100644
index 0000000000..403bd95721
--- /dev/null
+++ b/target/xtensa/cpu-qom.h
@@ -0,0 +1,66 @@
+/*
+ * QEMU Xtensa CPU
+ *
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef QEMU_XTENSA_CPU_QOM_H
+#define QEMU_XTENSA_CPU_QOM_H
+
+#include "qom/cpu.h"
+
+#define TYPE_XTENSA_CPU "xtensa-cpu"
+
+#define XTENSA_CPU_CLASS(class) \
+ OBJECT_CLASS_CHECK(XtensaCPUClass, (class), TYPE_XTENSA_CPU)
+#define XTENSA_CPU(obj) \
+ OBJECT_CHECK(XtensaCPU, (obj), TYPE_XTENSA_CPU)
+#define XTENSA_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(XtensaCPUClass, (obj), TYPE_XTENSA_CPU)
+
+typedef struct XtensaConfig XtensaConfig;
+
+/**
+ * XtensaCPUClass:
+ * @parent_realize: The parent class' realize handler.
+ * @parent_reset: The parent class' reset handler.
+ * @config: The CPU core configuration.
+ *
+ * An Xtensa CPU model.
+ */
+typedef struct XtensaCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ DeviceRealize parent_realize;
+ void (*parent_reset)(CPUState *cpu);
+
+ const XtensaConfig *config;
+} XtensaCPUClass;
+
+typedef struct XtensaCPU XtensaCPU;
+
+#endif
diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c
new file mode 100644
index 0000000000..e8e9f9175b
--- /dev/null
+++ b/target/xtensa/cpu.c
@@ -0,0 +1,184 @@
+/*
+ * QEMU Xtensa CPU
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * Copyright (c) 2012 SUSE LINUX Products GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "cpu.h"
+#include "qemu-common.h"
+#include "migration/vmstate.h"
+#include "exec/exec-all.h"
+
+
+static void xtensa_cpu_set_pc(CPUState *cs, vaddr value)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ cpu->env.pc = value;
+}
+
+static bool xtensa_cpu_has_work(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+
+ return cpu->env.pending_irq_level;
+}
+
+/* CPUClass::reset() */
+static void xtensa_cpu_reset(CPUState *s)
+{
+ XtensaCPU *cpu = XTENSA_CPU(s);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(cpu);
+ CPUXtensaState *env = &cpu->env;
+
+ xcc->parent_reset(s);
+
+ env->exception_taken = 0;
+ env->pc = env->config->exception_vector[EXC_RESET];
+ env->sregs[LITBASE] &= ~1;
+ env->sregs[PS] = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_INTERRUPT) ? 0x1f : 0x10;
+ env->sregs[VECBASE] = env->config->vecbase;
+ env->sregs[IBREAKENABLE] = 0;
+ env->sregs[CACHEATTR] = 0x22222222;
+ env->sregs[ATOMCTL] = xtensa_option_enabled(env->config,
+ XTENSA_OPTION_ATOMCTL) ? 0x28 : 0x15;
+ env->sregs[CONFIGID0] = env->config->configid[0];
+ env->sregs[CONFIGID1] = env->config->configid[1];
+
+ env->pending_irq_level = 0;
+ reset_mmu(env);
+}
+
+static ObjectClass *xtensa_cpu_class_by_name(const char *cpu_model)
+{
+ ObjectClass *oc;
+ char *typename;
+
+ if (cpu_model == NULL) {
+ return NULL;
+ }
+
+ typename = g_strdup_printf("%s-" TYPE_XTENSA_CPU, cpu_model);
+ oc = object_class_by_name(typename);
+ g_free(typename);
+ if (oc == NULL || !object_class_dynamic_cast(oc, TYPE_XTENSA_CPU) ||
+ object_class_is_abstract(oc)) {
+ return NULL;
+ }
+ return oc;
+}
+
+static void xtensa_cpu_realizefn(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ cs->gdb_num_regs = xcc->config->gdb_regmap.num_regs;
+
+ qemu_init_vcpu(cs);
+
+ xcc->parent_realize(dev, errp);
+}
+
+static void xtensa_cpu_initfn(Object *obj)
+{
+ CPUState *cs = CPU(obj);
+ XtensaCPU *cpu = XTENSA_CPU(obj);
+ XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj);
+ CPUXtensaState *env = &cpu->env;
+ static bool tcg_inited;
+
+ cs->env_ptr = env;
+ env->config = xcc->config;
+
+ if (tcg_enabled() && !tcg_inited) {
+ tcg_inited = true;
+ xtensa_translate_init();
+ }
+}
+
+static const VMStateDescription vmstate_xtensa_cpu = {
+ .name = "cpu",
+ .unmigratable = 1,
+};
+
+static void xtensa_cpu_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(oc);
+ XtensaCPUClass *xcc = XTENSA_CPU_CLASS(cc);
+
+ xcc->parent_realize = dc->realize;
+ dc->realize = xtensa_cpu_realizefn;
+
+ xcc->parent_reset = cc->reset;
+ cc->reset = xtensa_cpu_reset;
+
+ cc->class_by_name = xtensa_cpu_class_by_name;
+ cc->has_work = xtensa_cpu_has_work;
+ cc->do_interrupt = xtensa_cpu_do_interrupt;
+ cc->cpu_exec_interrupt = xtensa_cpu_exec_interrupt;
+ cc->dump_state = xtensa_cpu_dump_state;
+ cc->set_pc = xtensa_cpu_set_pc;
+ cc->gdb_read_register = xtensa_cpu_gdb_read_register;
+ cc->gdb_write_register = xtensa_cpu_gdb_write_register;
+ cc->gdb_stop_before_watchpoint = true;
+#ifndef CONFIG_USER_ONLY
+ cc->do_unaligned_access = xtensa_cpu_do_unaligned_access;
+ cc->get_phys_page_debug = xtensa_cpu_get_phys_page_debug;
+ cc->do_unassigned_access = xtensa_cpu_do_unassigned_access;
+#endif
+ cc->debug_excp_handler = xtensa_breakpoint_handler;
+ dc->vmsd = &vmstate_xtensa_cpu;
+}
+
+static const TypeInfo xtensa_cpu_type_info = {
+ .name = TYPE_XTENSA_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(XtensaCPU),
+ .instance_init = xtensa_cpu_initfn,
+ .abstract = true,
+ .class_size = sizeof(XtensaCPUClass),
+ .class_init = xtensa_cpu_class_init,
+};
+
+static void xtensa_cpu_register_types(void)
+{
+ type_register_static(&xtensa_cpu_type_info);
+}
+
+type_init(xtensa_cpu_register_types)
diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h
new file mode 100644
index 0000000000..7fe82a37af
--- /dev/null
+++ b/target/xtensa/cpu.h
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef XTENSA_CPU_H
+#define XTENSA_CPU_H
+
+#define ALIGNED_ONLY
+#define TARGET_LONG_BITS 32
+
+#define CPUArchState struct CPUXtensaState
+
+#include "qemu-common.h"
+#include "cpu-qom.h"
+#include "exec/cpu-defs.h"
+#include "fpu/softfloat.h"
+
+#define NB_MMU_MODES 4
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_PAGE_BITS 12
+
+enum {
+ /* Additional instructions */
+ XTENSA_OPTION_CODE_DENSITY,
+ XTENSA_OPTION_LOOP,
+ XTENSA_OPTION_EXTENDED_L32R,
+ XTENSA_OPTION_16_BIT_IMUL,
+ XTENSA_OPTION_32_BIT_IMUL,
+ XTENSA_OPTION_32_BIT_IMUL_HIGH,
+ XTENSA_OPTION_32_BIT_IDIV,
+ XTENSA_OPTION_MAC16,
+ XTENSA_OPTION_MISC_OP_NSA,
+ XTENSA_OPTION_MISC_OP_MINMAX,
+ XTENSA_OPTION_MISC_OP_SEXT,
+ XTENSA_OPTION_MISC_OP_CLAMPS,
+ XTENSA_OPTION_COPROCESSOR,
+ XTENSA_OPTION_BOOLEAN,
+ XTENSA_OPTION_FP_COPROCESSOR,
+ XTENSA_OPTION_MP_SYNCHRO,
+ XTENSA_OPTION_CONDITIONAL_STORE,
+ XTENSA_OPTION_ATOMCTL,
+ XTENSA_OPTION_DEPBITS,
+
+ /* Interrupts and exceptions */
+ XTENSA_OPTION_EXCEPTION,
+ XTENSA_OPTION_RELOCATABLE_VECTOR,
+ XTENSA_OPTION_UNALIGNED_EXCEPTION,
+ XTENSA_OPTION_INTERRUPT,
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT,
+ XTENSA_OPTION_TIMER_INTERRUPT,
+
+ /* Local memory */
+ XTENSA_OPTION_ICACHE,
+ XTENSA_OPTION_ICACHE_TEST,
+ XTENSA_OPTION_ICACHE_INDEX_LOCK,
+ XTENSA_OPTION_DCACHE,
+ XTENSA_OPTION_DCACHE_TEST,
+ XTENSA_OPTION_DCACHE_INDEX_LOCK,
+ XTENSA_OPTION_IRAM,
+ XTENSA_OPTION_IROM,
+ XTENSA_OPTION_DRAM,
+ XTENSA_OPTION_DROM,
+ XTENSA_OPTION_XLMI,
+ XTENSA_OPTION_HW_ALIGNMENT,
+ XTENSA_OPTION_MEMORY_ECC_PARITY,
+
+ /* Memory protection and translation */
+ XTENSA_OPTION_REGION_PROTECTION,
+ XTENSA_OPTION_REGION_TRANSLATION,
+ XTENSA_OPTION_MMU,
+ XTENSA_OPTION_CACHEATTR,
+
+ /* Other */
+ XTENSA_OPTION_WINDOWED_REGISTER,
+ XTENSA_OPTION_PROCESSOR_INTERFACE,
+ XTENSA_OPTION_MISC_SR,
+ XTENSA_OPTION_THREAD_POINTER,
+ XTENSA_OPTION_PROCESSOR_ID,
+ XTENSA_OPTION_DEBUG,
+ XTENSA_OPTION_TRACE_PORT,
+};
+
+enum {
+ THREADPTR = 231,
+ FCR = 232,
+ FSR = 233,
+};
+
+enum {
+ LBEG = 0,
+ LEND = 1,
+ LCOUNT = 2,
+ SAR = 3,
+ BR = 4,
+ LITBASE = 5,
+ SCOMPARE1 = 12,
+ ACCLO = 16,
+ ACCHI = 17,
+ MR = 32,
+ WINDOW_BASE = 72,
+ WINDOW_START = 73,
+ PTEVADDR = 83,
+ RASID = 90,
+ ITLBCFG = 91,
+ DTLBCFG = 92,
+ IBREAKENABLE = 96,
+ CACHEATTR = 98,
+ ATOMCTL = 99,
+ IBREAKA = 128,
+ DBREAKA = 144,
+ DBREAKC = 160,
+ CONFIGID0 = 176,
+ EPC1 = 177,
+ DEPC = 192,
+ EPS2 = 194,
+ CONFIGID1 = 208,
+ EXCSAVE1 = 209,
+ CPENABLE = 224,
+ INTSET = 226,
+ INTCLEAR = 227,
+ INTENABLE = 228,
+ PS = 230,
+ VECBASE = 231,
+ EXCCAUSE = 232,
+ DEBUGCAUSE = 233,
+ CCOUNT = 234,
+ PRID = 235,
+ ICOUNT = 236,
+ ICOUNTLEVEL = 237,
+ EXCVADDR = 238,
+ CCOMPARE = 240,
+ MISC = 244,
+};
+
+#define PS_INTLEVEL 0xf
+#define PS_INTLEVEL_SHIFT 0
+
+#define PS_EXCM 0x10
+#define PS_UM 0x20
+
+#define PS_RING 0xc0
+#define PS_RING_SHIFT 6
+
+#define PS_OWB 0xf00
+#define PS_OWB_SHIFT 8
+
+#define PS_CALLINC 0x30000
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_LEN 2
+
+#define PS_WOE 0x40000
+
+#define DEBUGCAUSE_IC 0x1
+#define DEBUGCAUSE_IB 0x2
+#define DEBUGCAUSE_DB 0x4
+#define DEBUGCAUSE_BI 0x8
+#define DEBUGCAUSE_BN 0x10
+#define DEBUGCAUSE_DI 0x20
+#define DEBUGCAUSE_DBNUM 0xf00
+#define DEBUGCAUSE_DBNUM_SHIFT 8
+
+#define DBREAKC_SB 0x80000000
+#define DBREAKC_LB 0x40000000
+#define DBREAKC_SB_LB (DBREAKC_SB | DBREAKC_LB)
+#define DBREAKC_MASK 0x3f
+
+#define MAX_NAREG 64
+#define MAX_NINTERRUPT 32
+#define MAX_NLEVEL 6
+#define MAX_NNMI 1
+#define MAX_NCCOMPARE 3
+#define MAX_TLB_WAY_SIZE 8
+#define MAX_NDBREAK 2
+
+#define REGION_PAGE_MASK 0xe0000000
+
+#define PAGE_CACHE_MASK 0x700
+#define PAGE_CACHE_SHIFT 8
+#define PAGE_CACHE_INVALID 0x000
+#define PAGE_CACHE_BYPASS 0x100
+#define PAGE_CACHE_WT 0x200
+#define PAGE_CACHE_WB 0x400
+#define PAGE_CACHE_ISOLATE 0x600
+
+enum {
+ /* Static vectors */
+ EXC_RESET,
+ EXC_MEMORY_ERROR,
+
+ /* Dynamic vectors */
+ EXC_WINDOW_OVERFLOW4,
+ EXC_WINDOW_UNDERFLOW4,
+ EXC_WINDOW_OVERFLOW8,
+ EXC_WINDOW_UNDERFLOW8,
+ EXC_WINDOW_OVERFLOW12,
+ EXC_WINDOW_UNDERFLOW12,
+ EXC_IRQ,
+ EXC_KERNEL,
+ EXC_USER,
+ EXC_DOUBLE,
+ EXC_DEBUG,
+ EXC_MAX
+};
+
+enum {
+ ILLEGAL_INSTRUCTION_CAUSE = 0,
+ SYSCALL_CAUSE,
+ INSTRUCTION_FETCH_ERROR_CAUSE,
+ LOAD_STORE_ERROR_CAUSE,
+ LEVEL1_INTERRUPT_CAUSE,
+ ALLOCA_CAUSE,
+ INTEGER_DIVIDE_BY_ZERO_CAUSE,
+ PRIVILEGED_CAUSE = 8,
+ LOAD_STORE_ALIGNMENT_CAUSE,
+
+ INSTR_PIF_DATA_ERROR_CAUSE = 12,
+ LOAD_STORE_PIF_DATA_ERROR_CAUSE,
+ INSTR_PIF_ADDR_ERROR_CAUSE,
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+
+ INST_TLB_MISS_CAUSE,
+ INST_TLB_MULTI_HIT_CAUSE,
+ INST_FETCH_PRIVILEGE_CAUSE,
+ INST_FETCH_PROHIBITED_CAUSE = 20,
+ LOAD_STORE_TLB_MISS_CAUSE = 24,
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE,
+ LOAD_STORE_PRIVILEGE_CAUSE,
+ LOAD_PROHIBITED_CAUSE = 28,
+ STORE_PROHIBITED_CAUSE,
+
+ COPROCESSOR0_DISABLED = 32,
+};
+
+typedef enum {
+ INTTYPE_LEVEL,
+ INTTYPE_EDGE,
+ INTTYPE_NMI,
+ INTTYPE_SOFTWARE,
+ INTTYPE_TIMER,
+ INTTYPE_DEBUG,
+ INTTYPE_WRITE_ERR,
+ INTTYPE_PROFILING,
+ INTTYPE_MAX
+} interrupt_type;
+
+typedef struct xtensa_tlb_entry {
+ uint32_t vaddr;
+ uint32_t paddr;
+ uint8_t asid;
+ uint8_t attr;
+ bool variable;
+} xtensa_tlb_entry;
+
+typedef struct xtensa_tlb {
+ unsigned nways;
+ const unsigned way_size[10];
+ bool varway56;
+ unsigned nrefillentries;
+} xtensa_tlb;
+
+typedef struct XtensaGdbReg {
+ int targno;
+ int type;
+ int group;
+ unsigned size;
+} XtensaGdbReg;
+
+typedef struct XtensaGdbRegmap {
+ int num_regs;
+ int num_core_regs;
+ /* PC + a + ar + sr + ur */
+ XtensaGdbReg reg[1 + 16 + 64 + 256 + 256];
+} XtensaGdbRegmap;
+
+struct XtensaConfig {
+ const char *name;
+ uint64_t options;
+ XtensaGdbRegmap gdb_regmap;
+ unsigned nareg;
+ int excm_level;
+ int ndepc;
+ uint32_t vecbase;
+ uint32_t exception_vector[EXC_MAX];
+ unsigned ninterrupt;
+ unsigned nlevel;
+ uint32_t interrupt_vector[MAX_NLEVEL + MAX_NNMI + 1];
+ uint32_t level_mask[MAX_NLEVEL + MAX_NNMI + 1];
+ uint32_t inttype_mask[INTTYPE_MAX];
+ struct {
+ uint32_t level;
+ interrupt_type inttype;
+ } interrupt[MAX_NINTERRUPT];
+ unsigned nccompare;
+ uint32_t timerint[MAX_NCCOMPARE];
+ unsigned nextint;
+ unsigned extint[MAX_NINTERRUPT];
+
+ unsigned debug_level;
+ unsigned nibreak;
+ unsigned ndbreak;
+
+ uint32_t configid[2];
+
+ uint32_t clock_freq_khz;
+
+ xtensa_tlb itlb;
+ xtensa_tlb dtlb;
+};
+
+typedef struct XtensaConfigList {
+ const XtensaConfig *config;
+ struct XtensaConfigList *next;
+} XtensaConfigList;
+
+#ifdef HOST_WORDS_BIGENDIAN
+enum {
+ FP_F32_HIGH,
+ FP_F32_LOW,
+};
+#else
+enum {
+ FP_F32_LOW,
+ FP_F32_HIGH,
+};
+#endif
+
+typedef struct CPUXtensaState {
+ const XtensaConfig *config;
+ uint32_t regs[16];
+ uint32_t pc;
+ uint32_t sregs[256];
+ uint32_t uregs[256];
+ uint32_t phys_regs[MAX_NAREG];
+ union {
+ float32 f32[2];
+ float64 f64;
+ } fregs[16];
+ float_status fp_status;
+
+ xtensa_tlb_entry itlb[7][MAX_TLB_WAY_SIZE];
+ xtensa_tlb_entry dtlb[10][MAX_TLB_WAY_SIZE];
+ unsigned autorefill_idx;
+
+ int pending_irq_level; /* level of last raised IRQ */
+ void **irq_inputs;
+ QEMUTimer *ccompare_timer;
+ uint32_t wake_ccount;
+ int64_t halt_clock;
+
+ int exception_taken;
+
+ /* Watchpoints for DBREAK registers */
+ struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK];
+
+ CPU_COMMON
+} CPUXtensaState;
+
+/**
+ * XtensaCPU:
+ * @env: #CPUXtensaState
+ *
+ * An Xtensa CPU.
+ */
+struct XtensaCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUXtensaState env;
+};
+
+static inline XtensaCPU *xtensa_env_get_cpu(const CPUXtensaState *env)
+{
+ return container_of(env, XtensaCPU, env);
+}
+
+#define ENV_GET_CPU(e) CPU(xtensa_env_get_cpu(e))
+
+#define ENV_OFFSET offsetof(XtensaCPU, env)
+
+void xtensa_cpu_do_interrupt(CPUState *cpu);
+bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request);
+void xtensa_cpu_do_unassigned_access(CPUState *cpu, hwaddr addr,
+ bool is_write, bool is_exec, int opaque,
+ unsigned size);
+void xtensa_cpu_dump_state(CPUState *cpu, FILE *f,
+ fprintf_function cpu_fprintf, int flags);
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
+int xtensa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
+int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
+ MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+#define cpu_signal_handler cpu_xtensa_signal_handler
+#define cpu_list xtensa_cpu_list
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define XTENSA_DEFAULT_CPU_MODEL "fsf"
+#else
+#define XTENSA_DEFAULT_CPU_MODEL "dc232b"
+#endif
+
+XtensaCPU *cpu_xtensa_init(const char *cpu_model);
+
+#define cpu_init(cpu_model) CPU(cpu_xtensa_init(cpu_model))
+
+void xtensa_translate_init(void);
+void xtensa_breakpoint_handler(CPUState *cs);
+void xtensa_finalize_config(XtensaConfig *config);
+void xtensa_register_core(XtensaConfigList *node);
+void check_interrupts(CPUXtensaState *s);
+void xtensa_irq_init(CPUXtensaState *env);
+void *xtensa_get_extint(CPUXtensaState *env, unsigned extint);
+void xtensa_advance_ccount(CPUXtensaState *env, uint32_t d);
+void xtensa_timer_irq(CPUXtensaState *env, uint32_t id, uint32_t active);
+void xtensa_rearm_ccompare_timer(CPUXtensaState *env);
+int cpu_xtensa_signal_handler(int host_signum, void *pinfo, void *puc);
+void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf);
+void xtensa_sync_window_from_phys(CPUXtensaState *env);
+void xtensa_sync_phys_from_window(CPUXtensaState *env);
+uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way);
+void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t wi, uint32_t *ei);
+int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
+ uint32_t *pwi, uint32_t *pei, uint8_t *pring);
+void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
+ xtensa_tlb_entry *entry, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte);
+void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte);
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access);
+void reset_mmu(CPUXtensaState *env);
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env);
+void debug_exception_env(CPUXtensaState *new_env, uint32_t cause);
+
+
+#define XTENSA_OPTION_BIT(opt) (((uint64_t)1) << (opt))
+#define XTENSA_OPTION_ALL (~(uint64_t)0)
+
+static inline bool xtensa_option_bits_enabled(const XtensaConfig *config,
+ uint64_t opt)
+{
+ return (config->options & opt) != 0;
+}
+
+static inline bool xtensa_option_enabled(const XtensaConfig *config, int opt)
+{
+ return xtensa_option_bits_enabled(config, XTENSA_OPTION_BIT(opt));
+}
+
+static inline int xtensa_get_cintlevel(const CPUXtensaState *env)
+{
+ int level = (env->sregs[PS] & PS_INTLEVEL) >> PS_INTLEVEL_SHIFT;
+ if ((env->sregs[PS] & PS_EXCM) && env->config->excm_level > level) {
+ level = env->config->excm_level;
+ }
+ return level;
+}
+
+static inline int xtensa_get_ring(const CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+static inline int xtensa_get_cring(const CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) &&
+ (env->sregs[PS] & PS_EXCM) == 0) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+static inline xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env,
+ bool dtlb, unsigned wi, unsigned ei)
+{
+ return dtlb ?
+ env->dtlb[wi] + ei :
+ env->itlb[wi] + ei;
+}
+
+static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
+{
+ return env->sregs[WINDOW_START] |
+ (env->sregs[WINDOW_START] << env->config->nareg / 4);
+}
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _ring0
+#define MMU_MODE1_SUFFIX _ring1
+#define MMU_MODE2_SUFFIX _ring2
+#define MMU_MODE3_SUFFIX _ring3
+
+static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
+{
+ return xtensa_get_cring(env);
+}
+
+#define XTENSA_TBFLAG_RING_MASK 0x3
+#define XTENSA_TBFLAG_EXCM 0x4
+#define XTENSA_TBFLAG_LITBASE 0x8
+#define XTENSA_TBFLAG_DEBUG 0x10
+#define XTENSA_TBFLAG_ICOUNT 0x20
+#define XTENSA_TBFLAG_CPENABLE_MASK 0x3fc0
+#define XTENSA_TBFLAG_CPENABLE_SHIFT 6
+#define XTENSA_TBFLAG_EXCEPTION 0x4000
+#define XTENSA_TBFLAG_WINDOW_MASK 0x18000
+#define XTENSA_TBFLAG_WINDOW_SHIFT 15
+
+static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ *pc = env->pc;
+ *cs_base = 0;
+ *flags = 0;
+ *flags |= xtensa_get_ring(env);
+ if (env->sregs[PS] & PS_EXCM) {
+ *flags |= XTENSA_TBFLAG_EXCM;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_EXTENDED_L32R) &&
+ (env->sregs[LITBASE] & 1)) {
+ *flags |= XTENSA_TBFLAG_LITBASE;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_DEBUG)) {
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ *flags |= XTENSA_TBFLAG_DEBUG;
+ }
+ if (xtensa_get_cintlevel(env) < env->sregs[ICOUNTLEVEL]) {
+ *flags |= XTENSA_TBFLAG_ICOUNT;
+ }
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_COPROCESSOR)) {
+ *flags |= env->sregs[CPENABLE] << XTENSA_TBFLAG_CPENABLE_SHIFT;
+ }
+ if (cs->singlestep_enabled && env->exception_taken) {
+ *flags |= XTENSA_TBFLAG_EXCEPTION;
+ }
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_WINDOWED_REGISTER) &&
+ (env->sregs[PS] & (PS_WOE | PS_EXCM)) == PS_WOE) {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t w = ctz32(windowstart | 0x8);
+
+ *flags |= w << XTENSA_TBFLAG_WINDOW_SHIFT;
+ } else {
+ *flags |= 3 << XTENSA_TBFLAG_WINDOW_SHIFT;
+ }
+}
+
+#include "exec/cpu-all.h"
+
+#endif
diff --git a/target/xtensa/gdbstub.c b/target/xtensa/gdbstub.c
new file mode 100644
index 0000000000..fa5469a4ef
--- /dev/null
+++ b/target/xtensa/gdbstub.c
@@ -0,0 +1,128 @@
+/*
+ * Xtensa gdb server stub
+ *
+ * Copyright (c) 2003-2005 Fabrice Bellard
+ * Copyright (c) 2013 SUSE LINUX Products GmbH
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/gdbstub.h"
+#include "qemu/log.h"
+
+int xtensa_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
+ unsigned i;
+
+ if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
+ return 0;
+ }
+
+ switch (reg->type) {
+ case 9: /*pc*/
+ return gdb_get_reg32(mem_buf, env->pc);
+
+ case 1: /*ar*/
+ xtensa_sync_phys_from_window(env);
+ return gdb_get_reg32(mem_buf, env->phys_regs[(reg->targno & 0xff)
+ % env->config->nareg]);
+
+ case 2: /*SR*/
+ return gdb_get_reg32(mem_buf, env->sregs[reg->targno & 0xff]);
+
+ case 3: /*UR*/
+ return gdb_get_reg32(mem_buf, env->uregs[reg->targno & 0xff]);
+
+ case 4: /*f*/
+ i = reg->targno & 0x0f;
+ switch (reg->size) {
+ case 4:
+ return gdb_get_reg32(mem_buf,
+ float32_val(env->fregs[i].f32[FP_F32_LOW]));
+ case 8:
+ return gdb_get_reg64(mem_buf, float64_val(env->fregs[i].f64));
+ default:
+ return 0;
+ }
+
+ case 8: /*a*/
+ return gdb_get_reg32(mem_buf, env->regs[reg->targno & 0x0f]);
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s from reg %d of unsupported type %d\n",
+ __func__, n, reg->type);
+ return 0;
+ }
+}
+
+int xtensa_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ uint32_t tmp;
+ const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
+
+ if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ switch (reg->type) {
+ case 9: /*pc*/
+ env->pc = tmp;
+ break;
+
+ case 1: /*ar*/
+ env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
+ xtensa_sync_window_from_phys(env);
+ break;
+
+ case 2: /*SR*/
+ env->sregs[reg->targno & 0xff] = tmp;
+ break;
+
+ case 3: /*UR*/
+ env->uregs[reg->targno & 0xff] = tmp;
+ break;
+
+ case 4: /*f*/
+ switch (reg->size) {
+ case 4:
+ env->fregs[reg->targno & 0x0f].f32[FP_F32_LOW] = make_float32(tmp);
+ return 4;
+ case 8:
+ env->fregs[reg->targno & 0x0f].f64 = make_float64(tmp);
+ return 8;
+ default:
+ return 0;
+ }
+
+ case 8: /*a*/
+ env->regs[reg->targno & 0x0f] = tmp;
+ break;
+
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s to reg %d of unsupported type %d\n",
+ __func__, n, reg->type);
+ return 0;
+ }
+
+ return 4;
+}
diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c
new file mode 100644
index 0000000000..768b32c417
--- /dev/null
+++ b/target/xtensa/helper.c
@@ -0,0 +1,730 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/loader.h"
+#endif
+
+static struct XtensaConfigList *xtensa_cores;
+
+static void xtensa_core_class_init(ObjectClass *oc, void *data)
+{
+ CPUClass *cc = CPU_CLASS(oc);
+ XtensaCPUClass *xcc = XTENSA_CPU_CLASS(oc);
+ const XtensaConfig *config = data;
+
+ xcc->config = config;
+
+ /* Use num_core_regs to see only non-privileged registers in an unmodified
+ * gdb. Use num_regs to see all registers. gdb modification is required
+ * for that: reset bit 0 in the 'flags' field of the registers definitions
+ * in the gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
+ */
+ cc->gdb_num_core_regs = config->gdb_regmap.num_regs;
+}
+
+void xtensa_finalize_config(XtensaConfig *config)
+{
+ unsigned i, n = 0;
+
+ if (config->gdb_regmap.num_regs) {
+ return;
+ }
+
+ for (i = 0; config->gdb_regmap.reg[i].targno >= 0; ++i) {
+ n += (config->gdb_regmap.reg[i].type != 6);
+ }
+ config->gdb_regmap.num_regs = n;
+}
+
+void xtensa_register_core(XtensaConfigList *node)
+{
+ TypeInfo type = {
+ .parent = TYPE_XTENSA_CPU,
+ .class_init = xtensa_core_class_init,
+ .class_data = (void *)node->config,
+ };
+
+ node->next = xtensa_cores;
+ xtensa_cores = node;
+ type.name = g_strdup_printf("%s-" TYPE_XTENSA_CPU, node->config->name);
+ type_register(&type);
+ g_free((gpointer)type.name);
+}
+
+static uint32_t check_hw_breakpoints(CPUXtensaState *env)
+{
+ unsigned i;
+
+ for (i = 0; i < env->config->ndbreak; ++i) {
+ if (env->cpu_watchpoint[i] &&
+ env->cpu_watchpoint[i]->flags & BP_WATCHPOINT_HIT) {
+ return DEBUGCAUSE_DB | (i << DEBUGCAUSE_DBNUM_SHIFT);
+ }
+ }
+ return 0;
+}
+
+void xtensa_breakpoint_handler(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->watchpoint_hit) {
+ if (cs->watchpoint_hit->flags & BP_CPU) {
+ uint32_t cause;
+
+ cs->watchpoint_hit = NULL;
+ cause = check_hw_breakpoints(env);
+ if (cause) {
+ debug_exception_env(env, cause);
+ }
+ cpu_loop_exit_noexc(cs);
+ }
+ }
+}
+
+XtensaCPU *cpu_xtensa_init(const char *cpu_model)
+{
+ ObjectClass *oc;
+ XtensaCPU *cpu;
+ CPUXtensaState *env;
+
+ oc = cpu_class_by_name(TYPE_XTENSA_CPU, cpu_model);
+ if (oc == NULL) {
+ return NULL;
+ }
+
+ cpu = XTENSA_CPU(object_new(object_class_get_name(oc)));
+ env = &cpu->env;
+
+ xtensa_irq_init(env);
+
+ object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
+
+ return cpu;
+}
+
+
+void xtensa_cpu_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ XtensaConfigList *core = xtensa_cores;
+ cpu_fprintf(f, "Available CPUs:\n");
+ for (; core; core = core->next) {
+ cpu_fprintf(f, " %s\n", core->config->name);
+ }
+}
+
+hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0,
+ &paddr, &page_size, &access) == 0) {
+ return paddr;
+ }
+ return ~0;
+}
+
+static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector)
+{
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_RELOCATABLE_VECTOR)) {
+ return vector - env->config->vecbase + env->sregs[VECBASE];
+ } else {
+ return vector;
+ }
+}
+
+/*!
+ * Handle penging IRQ.
+ * For the high priority interrupt jump to the corresponding interrupt vector.
+ * For the level-1 interrupt convert it to either user, kernel or double
+ * exception with the 'level-1 interrupt' exception cause.
+ */
+static void handle_interrupt(CPUXtensaState *env)
+{
+ int level = env->pending_irq_level;
+
+ if (level > xtensa_get_cintlevel(env) &&
+ level <= env->config->nlevel &&
+ (env->config->level_mask[level] &
+ env->sregs[INTSET] &
+ env->sregs[INTENABLE])) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ if (level > 1) {
+ env->sregs[EPC1 + level - 1] = env->pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] =
+ (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM;
+ env->pc = relocated_vector(env,
+ env->config->interrupt_vector[level]);
+ } else {
+ env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE;
+
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = env->pc;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ }
+ cs->exception_index = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = env->pc;
+ cs->exception_index =
+ (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+ env->sregs[PS] |= PS_EXCM;
+ }
+ env->exception_taken = 1;
+ }
+}
+
+void xtensa_cpu_do_interrupt(CPUState *cs)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (cs->exception_index == EXC_IRQ) {
+ qemu_log_mask(CPU_LOG_INT,
+ "%s(EXC_IRQ) level = %d, cintlevel = %d, "
+ "pc = %08x, a0 = %08x, ps = %08x, "
+ "intset = %08x, intenable = %08x, "
+ "ccount = %08x\n",
+ __func__, env->pending_irq_level, xtensa_get_cintlevel(env),
+ env->pc, env->regs[0], env->sregs[PS],
+ env->sregs[INTSET], env->sregs[INTENABLE],
+ env->sregs[CCOUNT]);
+ handle_interrupt(env);
+ }
+
+ switch (cs->exception_index) {
+ case EXC_WINDOW_OVERFLOW4:
+ case EXC_WINDOW_UNDERFLOW4:
+ case EXC_WINDOW_OVERFLOW8:
+ case EXC_WINDOW_UNDERFLOW8:
+ case EXC_WINDOW_OVERFLOW12:
+ case EXC_WINDOW_UNDERFLOW12:
+ case EXC_KERNEL:
+ case EXC_USER:
+ case EXC_DOUBLE:
+ case EXC_DEBUG:
+ qemu_log_mask(CPU_LOG_INT, "%s(%d) "
+ "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n",
+ __func__, cs->exception_index,
+ env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]);
+ if (env->config->exception_vector[cs->exception_index]) {
+ env->pc = relocated_vector(env,
+ env->config->exception_vector[cs->exception_index]);
+ env->exception_taken = 1;
+ } else {
+ qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ }
+ break;
+
+ case EXC_IRQ:
+ break;
+
+ default:
+ qemu_log("%s(pc = %08x) unknown exception_index: %d\n",
+ __func__, env->pc, cs->exception_index);
+ break;
+ }
+ check_interrupts(env);
+}
+
+bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
+{
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
+ cs->exception_index = EXC_IRQ;
+ xtensa_cpu_do_interrupt(cs);
+ return true;
+ }
+ return false;
+}
+
+static void reset_tlb_mmu_all_ways(CPUXtensaState *env,
+ const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned wi, ei;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ for (ei = 0; ei < tlb->way_size[wi]; ++ei) {
+ entry[wi][ei].asid = 0;
+ entry[wi][ei].variable = true;
+ }
+ }
+}
+
+static void reset_tlb_mmu_ways56(CPUXtensaState *env,
+ const xtensa_tlb *tlb, xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ if (!tlb->varway56) {
+ static const xtensa_tlb_entry way5[] = {
+ {
+ .vaddr = 0xd0000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xd8000000,
+ .paddr = 0,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ static const xtensa_tlb_entry way6[] = {
+ {
+ .vaddr = 0xe0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 7,
+ .variable = false,
+ }, {
+ .vaddr = 0xf0000000,
+ .paddr = 0xf0000000,
+ .asid = 1,
+ .attr = 3,
+ .variable = false,
+ }
+ };
+ memcpy(entry[5], way5, sizeof(way5));
+ memcpy(entry[6], way6, sizeof(way6));
+ } else {
+ uint32_t ei;
+ for (ei = 0; ei < 8; ++ei) {
+ entry[6][ei].vaddr = ei << 29;
+ entry[6][ei].paddr = ei << 29;
+ entry[6][ei].asid = 1;
+ entry[6][ei].attr = 3;
+ }
+ }
+}
+
+static void reset_tlb_region_way0(CPUXtensaState *env,
+ xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE])
+{
+ unsigned ei;
+
+ for (ei = 0; ei < 8; ++ei) {
+ entry[0][ei].vaddr = ei << 29;
+ entry[0][ei].paddr = ei << 29;
+ entry[0][ei].asid = 1;
+ entry[0][ei].attr = 2;
+ entry[0][ei].variable = true;
+ }
+}
+
+void reset_mmu(CPUXtensaState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ env->sregs[RASID] = 0x04030201;
+ env->sregs[ITLBCFG] = 0;
+ env->sregs[DTLBCFG] = 0;
+ env->autorefill_idx = 0;
+ reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb);
+ reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb);
+ reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb);
+ } else {
+ reset_tlb_region_way0(env, env->itlb);
+ reset_tlb_region_way0(env, env->dtlb);
+ }
+}
+
+static unsigned get_ring(const CPUXtensaState *env, uint8_t asid)
+{
+ unsigned i;
+ for (i = 0; i < 4; ++i) {
+ if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) {
+ return i;
+ }
+ }
+ return 0xff;
+}
+
+/*!
+ * Lookup xtensa TLB for the given virtual address.
+ * See ISA, 4.6.2.2
+ *
+ * \param pwi: [out] way index
+ * \param pei: [out] entry index
+ * \param pring: [out] access ring
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_tlb_lookup(const CPUXtensaState *env, uint32_t addr, bool dtlb,
+ uint32_t *pwi, uint32_t *pei, uint8_t *pring)
+{
+ const xtensa_tlb *tlb = dtlb ?
+ &env->config->dtlb : &env->config->itlb;
+ const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ?
+ env->dtlb : env->itlb;
+
+ int nhits = 0;
+ unsigned wi;
+
+ for (wi = 0; wi < tlb->nways; ++wi) {
+ uint32_t vpn;
+ uint32_t ei;
+ split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei);
+ if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) {
+ unsigned ring = get_ring(env, entry[wi][ei].asid);
+ if (ring < 4) {
+ if (++nhits > 1) {
+ return dtlb ?
+ LOAD_STORE_TLB_MULTI_HIT_CAUSE :
+ INST_TLB_MULTI_HIT_CAUSE;
+ }
+ *pwi = wi;
+ *pei = ei;
+ *pring = ring;
+ }
+ }
+ }
+ return nhits ? 0 :
+ (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE);
+}
+
+/*!
+ * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.5.10
+ */
+static unsigned mmu_attr_to_access(uint32_t attr)
+{
+ unsigned access = 0;
+
+ if (attr < 12) {
+ access |= PAGE_READ;
+ if (attr & 0x1) {
+ access |= PAGE_EXEC;
+ }
+ if (attr & 0x2) {
+ access |= PAGE_WRITE;
+ }
+
+ switch (attr & 0xc) {
+ case 0:
+ access |= PAGE_CACHE_BYPASS;
+ break;
+
+ case 4:
+ access |= PAGE_CACHE_WB;
+ break;
+
+ case 8:
+ access |= PAGE_CACHE_WT;
+ break;
+ }
+ } else if (attr == 13) {
+ access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE;
+ }
+ return access;
+}
+
+/*!
+ * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, 4.6.3.3
+ */
+static unsigned region_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+/*!
+ * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask.
+ * See ISA, A.2.14 The Cache Attribute Register
+ */
+static unsigned cacheattr_attr_to_access(uint32_t attr)
+{
+ static const unsigned access[16] = {
+ [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT,
+ [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT,
+ [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS,
+ [3] = PAGE_EXEC | PAGE_CACHE_WB,
+ [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB,
+ [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE,
+ };
+
+ return access[attr & 0xf];
+}
+
+static bool is_access_granted(unsigned access, int is_write)
+{
+ switch (is_write) {
+ case 0:
+ return access & PAGE_READ;
+
+ case 1:
+ return access & PAGE_WRITE;
+
+ case 2:
+ return access & PAGE_EXEC;
+
+ default:
+ return 0;
+ }
+}
+
+static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte);
+
+static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access,
+ bool may_lookup_pt)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ uint32_t vpn;
+ uint32_t pte;
+ const xtensa_tlb_entry *entry = NULL;
+ xtensa_tlb_entry tmp_entry;
+ int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring);
+
+ if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) &&
+ may_lookup_pt && get_pte(env, vaddr, &pte) == 0) {
+ ring = (pte >> 4) & 0x3;
+ wi = 0;
+ split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei);
+
+ if (update_tlb) {
+ wi = ++env->autorefill_idx & 0x3;
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte);
+ env->sregs[EXCVADDR] = vaddr;
+ qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n",
+ __func__, vaddr, vpn, pte);
+ } else {
+ xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte);
+ entry = &tmp_entry;
+ }
+ ret = 0;
+ }
+ if (ret != 0) {
+ return ret;
+ }
+
+ if (entry == NULL) {
+ entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+ }
+
+ if (ring < mmu_idx) {
+ return dtlb ?
+ LOAD_STORE_PRIVILEGE_CAUSE :
+ INST_FETCH_PRIVILEGE_CAUSE;
+ }
+
+ *access = mmu_attr_to_access(entry->attr) &
+ ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi));
+ *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+
+ return 0;
+}
+
+static int get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ uint32_t pt_vaddr =
+ (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc;
+ int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0,
+ &paddr, &page_size, &access, false);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s: trying autorefill(%08x) -> %08x\n",
+ __func__, vaddr, ret ? ~0 : paddr);
+
+ if (ret == 0) {
+ *pte = ldl_phys(cs->as, paddr);
+ }
+ return ret;
+}
+
+static int get_physical_addr_region(CPUXtensaState *env,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access)
+{
+ bool dtlb = is_write != 2;
+ uint32_t wi = 0;
+ uint32_t ei = (vaddr >> 29) & 0x7;
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ *access = region_attr_to_access(entry->attr);
+ if (!is_access_granted(*access, is_write)) {
+ return dtlb ?
+ (is_write ?
+ STORE_PROHIBITED_CAUSE :
+ LOAD_PROHIBITED_CAUSE) :
+ INST_FETCH_PROHIBITED_CAUSE;
+ }
+
+ *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK);
+ *page_size = ~REGION_PAGE_MASK + 1;
+
+ return 0;
+}
+
+/*!
+ * Convert virtual address to physical addr.
+ * MMU may issue pagewalk and change xtensa autorefill TLB way entry.
+ *
+ * \return 0 if ok, exception cause code otherwise
+ */
+int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb,
+ uint32_t vaddr, int is_write, int mmu_idx,
+ uint32_t *paddr, uint32_t *page_size, unsigned *access)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return get_physical_addr_mmu(env, update_tlb,
+ vaddr, is_write, mmu_idx, paddr, page_size, access, true);
+ } else if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) {
+ return get_physical_addr_region(env, vaddr, is_write, mmu_idx,
+ paddr, page_size, access);
+ } else {
+ *paddr = vaddr;
+ *page_size = TARGET_PAGE_SIZE;
+ *access = cacheattr_attr_to_access(
+ env->sregs[CACHEATTR] >> ((vaddr & 0xe0000000) >> 27));
+ return 0;
+ }
+}
+
+static void dump_tlb(FILE *f, fprintf_function cpu_fprintf,
+ CPUXtensaState *env, bool dtlb)
+{
+ unsigned wi, ei;
+ const xtensa_tlb *conf =
+ dtlb ? &env->config->dtlb : &env->config->itlb;
+ unsigned (*attr_to_access)(uint32_t) =
+ xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ?
+ mmu_attr_to_access : region_attr_to_access;
+
+ for (wi = 0; wi < conf->nways; ++wi) {
+ uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1;
+ const char *sz_text;
+ bool print_header = true;
+
+ if (sz >= 0x100000) {
+ sz >>= 20;
+ sz_text = "MB";
+ } else {
+ sz >>= 10;
+ sz_text = "KB";
+ }
+
+ for (ei = 0; ei < conf->way_size[wi]; ++ei) {
+ const xtensa_tlb_entry *entry =
+ xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (entry->asid) {
+ static const char * const cache_text[8] = {
+ [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass",
+ [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT",
+ [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB",
+ [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate",
+ };
+ unsigned access = attr_to_access(entry->attr);
+ unsigned cache_idx = (access & PAGE_CACHE_MASK) >>
+ PAGE_CACHE_SHIFT;
+
+ if (print_header) {
+ print_header = false;
+ cpu_fprintf(f, "Way %u (%d %s)\n", wi, sz, sz_text);
+ cpu_fprintf(f,
+ "\tVaddr Paddr ASID Attr RWX Cache\n"
+ "\t---------- ---------- ---- ---- --- -------\n");
+ }
+ cpu_fprintf(f,
+ "\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n",
+ entry->vaddr,
+ entry->paddr,
+ entry->asid,
+ entry->attr,
+ (access & PAGE_READ) ? 'R' : '-',
+ (access & PAGE_WRITE) ? 'W' : '-',
+ (access & PAGE_EXEC) ? 'X' : '-',
+ cache_text[cache_idx] ? cache_text[cache_idx] :
+ "Invalid");
+ }
+ }
+ }
+}
+
+void dump_mmu(FILE *f, fprintf_function cpu_fprintf, CPUXtensaState *env)
+{
+ if (xtensa_option_bits_enabled(env->config,
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) {
+
+ cpu_fprintf(f, "ITLB:\n");
+ dump_tlb(f, cpu_fprintf, env, false);
+ cpu_fprintf(f, "\nDTLB:\n");
+ dump_tlb(f, cpu_fprintf, env, true);
+ } else {
+ cpu_fprintf(f, "No TLB for this CPU core\n");
+ }
+}
diff --git a/target/xtensa/helper.h b/target/xtensa/helper.h
new file mode 100644
index 0000000000..5ea9c5beec
--- /dev/null
+++ b/target/xtensa/helper.h
@@ -0,0 +1,58 @@
+DEF_HELPER_2(exception, noreturn, env, i32)
+DEF_HELPER_3(exception_cause, noreturn, env, i32, i32)
+DEF_HELPER_4(exception_cause_vaddr, noreturn, env, i32, i32, i32)
+DEF_HELPER_3(debug_exception, noreturn, env, i32, i32)
+
+DEF_HELPER_FLAGS_1(nsa, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_FLAGS_1(nsau, TCG_CALL_NO_RWG_SE, i32, i32)
+DEF_HELPER_2(wsr_windowbase, void, env, i32)
+DEF_HELPER_4(entry, void, env, i32, i32, i32)
+DEF_HELPER_2(retw, i32, env, i32)
+DEF_HELPER_2(rotw, void, env, i32)
+DEF_HELPER_3(window_check, noreturn, env, i32, i32)
+DEF_HELPER_1(restore_owb, void, env)
+DEF_HELPER_2(movsp, void, env, i32)
+DEF_HELPER_2(wsr_lbeg, void, env, i32)
+DEF_HELPER_2(wsr_lend, void, env, i32)
+DEF_HELPER_1(simcall, void, env)
+DEF_HELPER_1(dump_state, void, env)
+
+DEF_HELPER_3(waiti, void, env, i32, i32)
+DEF_HELPER_3(timer_irq, void, env, i32, i32)
+DEF_HELPER_2(advance_ccount, void, env, i32)
+DEF_HELPER_1(check_interrupts, void, env)
+DEF_HELPER_3(check_atomctl, void, env, i32, i32)
+
+DEF_HELPER_2(itlb_hit_test, void, env, i32)
+DEF_HELPER_2(wsr_rasid, void, env, i32)
+DEF_HELPER_FLAGS_3(rtlb0, TCG_CALL_NO_RWG_SE, i32, env, i32, i32)
+DEF_HELPER_FLAGS_3(rtlb1, TCG_CALL_NO_RWG_SE, i32, env, i32, i32)
+DEF_HELPER_3(itlb, void, env, i32, i32)
+DEF_HELPER_3(ptlb, i32, env, i32, i32)
+DEF_HELPER_4(wtlb, void, env, i32, i32, i32)
+
+DEF_HELPER_2(wsr_ibreakenable, void, env, i32)
+DEF_HELPER_3(wsr_ibreaka, void, env, i32, i32)
+DEF_HELPER_3(wsr_dbreaka, void, env, i32, i32)
+DEF_HELPER_3(wsr_dbreakc, void, env, i32, i32)
+
+DEF_HELPER_2(wur_fcr, void, env, i32)
+DEF_HELPER_FLAGS_1(abs_s, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_FLAGS_1(neg_s, TCG_CALL_NO_RWG_SE, f32, f32)
+DEF_HELPER_3(add_s, f32, env, f32, f32)
+DEF_HELPER_3(sub_s, f32, env, f32, f32)
+DEF_HELPER_3(mul_s, f32, env, f32, f32)
+DEF_HELPER_4(madd_s, f32, env, f32, f32, f32)
+DEF_HELPER_4(msub_s, f32, env, f32, f32, f32)
+DEF_HELPER_FLAGS_3(ftoi, TCG_CALL_NO_RWG_SE, i32, f32, i32, i32)
+DEF_HELPER_FLAGS_3(ftoui, TCG_CALL_NO_RWG_SE, i32, f32, i32, i32)
+DEF_HELPER_3(itof, f32, env, i32, i32)
+DEF_HELPER_3(uitof, f32, env, i32, i32)
+
+DEF_HELPER_4(un_s, void, env, i32, f32, f32)
+DEF_HELPER_4(oeq_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ueq_s, void, env, i32, f32, f32)
+DEF_HELPER_4(olt_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ult_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ole_s, void, env, i32, f32, f32)
+DEF_HELPER_4(ule_s, void, env, i32, f32, f32)
diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh
new file mode 100755
index 0000000000..351bee41c2
--- /dev/null
+++ b/target/xtensa/import_core.sh
@@ -0,0 +1,51 @@
+#! /bin/bash -e
+
+OVERLAY="$1"
+NAME="$2"
+FREQ=40000
+BASE=$(dirname "$0")
+TARGET="$BASE"/core-$NAME
+
+[ $# -ge 2 -a -f "$OVERLAY" ] || { cat <<EOF
+Usage: $0 overlay-archive-to-import core-name [frequency-in-KHz]
+ overlay-archive-to-import: file name of xtensa-config-overlay.tar.gz
+ to import configuration from.
+ core-name: QEMU name of the imported core. Must be valid
+ C identifier.
+ frequency-in-KHz: core frequency (40MHz if not specified).
+EOF
+exit
+}
+
+[ $# -ge 3 ] && FREQ="$3"
+mkdir -p "$TARGET"
+tar -xf "$OVERLAY" -C "$TARGET" --strip-components=1 \
+ --xform='s/core/core-isa/' config/core.h
+tar -xf "$OVERLAY" -O gdb/xtensa-config.c | \
+ sed -n '1,/*\//p;/XTREG/,/XTREG_END/p' > "$TARGET"/gdb-config.c
+
+cat <<EOF > "${TARGET}.c"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/gdbstub.h"
+#include "qemu/host-utils.h"
+
+#include "core-$NAME/core-isa.h"
+#include "overlay_tool.h"
+
+static XtensaConfig $NAME __attribute__((unused)) = {
+ .name = "$NAME",
+ .gdb_regmap = {
+ .reg = {
+#include "core-$NAME/gdb-config.c"
+ }
+ },
+ .clock_freq_khz = $FREQ,
+ DEFAULT_SECTIONS
+};
+
+REGISTER_CORE($NAME)
+EOF
+
+grep -q core-${NAME}.o "$BASE"/Makefile.objs || \
+ echo "obj-y += core-${NAME}.o" >> "$BASE"/Makefile.objs
diff --git a/target/xtensa/monitor.c b/target/xtensa/monitor.c
new file mode 100644
index 0000000000..f3fa4cd278
--- /dev/null
+++ b/target/xtensa/monitor.c
@@ -0,0 +1,35 @@
+/*
+ * QEMU monitor
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "monitor/monitor.h"
+#include "monitor/hmp-target.h"
+#include "hmp.h"
+
+void hmp_info_tlb(Monitor *mon, const QDict *qdict)
+{
+ CPUArchState *env1 = mon_get_cpu_env();
+
+ dump_mmu((FILE*)mon, (fprintf_function)monitor_printf, env1);
+}
diff --git a/target/xtensa/op_helper.c b/target/xtensa/op_helper.c
new file mode 100644
index 0000000000..0a4b2147bc
--- /dev/null
+++ b/target/xtensa/op_helper.c
@@ -0,0 +1,984 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/host-utils.h"
+#include "exec/exec-all.h"
+#include "exec/cpu_ldst.h"
+#include "exec/address-spaces.h"
+#include "qemu/timer.h"
+
+void xtensa_cpu_do_unaligned_access(CPUState *cs,
+ vaddr addr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_UNALIGNED_EXCEPTION) &&
+ !xtensa_option_enabled(env->config, XTENSA_OPTION_HW_ALIGNMENT)) {
+ cpu_restore_state(CPU(cpu), retaddr);
+ HELPER(exception_cause_vaddr)(env,
+ env->pc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
+ }
+}
+
+void tlb_fill(CPUState *cs, target_ulong vaddr, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, true, vaddr, access_type, mmu_idx,
+ &paddr, &page_size, &access);
+
+ qemu_log_mask(CPU_LOG_MMU, "%s(%08x, %d, %d) -> %08x, ret = %d\n",
+ __func__, vaddr, access_type, mmu_idx, paddr, ret);
+
+ if (ret == 0) {
+ tlb_set_page(cs,
+ vaddr & TARGET_PAGE_MASK,
+ paddr & TARGET_PAGE_MASK,
+ access, mmu_idx, page_size);
+ } else {
+ cpu_restore_state(cs, retaddr);
+ HELPER(exception_cause_vaddr)(env, env->pc, ret, vaddr);
+ }
+}
+
+void xtensa_cpu_do_unassigned_access(CPUState *cs, hwaddr addr,
+ bool is_write, bool is_exec, int opaque,
+ unsigned size)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+
+ HELPER(exception_cause_vaddr)(env, env->pc,
+ is_exec ?
+ INSTR_PIF_ADDR_ERROR_CAUSE :
+ LOAD_STORE_PIF_ADDR_ERROR_CAUSE,
+ is_exec ? addr : cs->mem_io_vaddr);
+}
+
+static void tb_invalidate_virtual_addr(CPUXtensaState *env, uint32_t vaddr)
+{
+ uint32_t paddr;
+ uint32_t page_size;
+ unsigned access;
+ int ret = xtensa_get_physical_addr(env, false, vaddr, 2, 0,
+ &paddr, &page_size, &access);
+ if (ret == 0) {
+ tb_invalidate_phys_addr(&address_space_memory, paddr);
+ }
+}
+
+void HELPER(exception)(CPUXtensaState *env, uint32_t excp)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cs->exception_index = excp;
+ if (excp == EXCP_DEBUG) {
+ env->exception_taken = 0;
+ }
+ cpu_loop_exit(cs);
+}
+
+void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ uint32_t vector;
+
+ env->pc = pc;
+ if (env->sregs[PS] & PS_EXCM) {
+ if (env->config->ndepc) {
+ env->sregs[DEPC] = pc;
+ } else {
+ env->sregs[EPC1] = pc;
+ }
+ vector = EXC_DOUBLE;
+ } else {
+ env->sregs[EPC1] = pc;
+ vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL;
+ }
+
+ env->sregs[EXCCAUSE] = cause;
+ env->sregs[PS] |= PS_EXCM;
+
+ HELPER(exception)(env, vector);
+}
+
+void HELPER(exception_cause_vaddr)(CPUXtensaState *env,
+ uint32_t pc, uint32_t cause, uint32_t vaddr)
+{
+ env->sregs[EXCVADDR] = vaddr;
+ HELPER(exception_cause)(env, pc, cause);
+}
+
+void debug_exception_env(CPUXtensaState *env, uint32_t cause)
+{
+ if (xtensa_get_cintlevel(env) < env->config->debug_level) {
+ HELPER(debug_exception)(env, env->pc, cause);
+ }
+}
+
+void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause)
+{
+ unsigned level = env->config->debug_level;
+
+ env->pc = pc;
+ env->sregs[DEBUGCAUSE] = cause;
+ env->sregs[EPC1 + level - 1] = pc;
+ env->sregs[EPS2 + level - 2] = env->sregs[PS];
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM |
+ (level << PS_INTLEVEL_SHIFT);
+ HELPER(exception)(env, EXC_DEBUG);
+}
+
+uint32_t HELPER(nsa)(uint32_t v)
+{
+ if (v & 0x80000000) {
+ v = ~v;
+ }
+ return v ? clz32(v) - 1 : 31;
+}
+
+uint32_t HELPER(nsau)(uint32_t v)
+{
+ return v ? clz32(v) : 32;
+}
+
+static void copy_window_from_phys(CPUXtensaState *env,
+ uint32_t window, uint32_t phys, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->regs + window, env->phys_regs + phys,
+ n1 * sizeof(uint32_t));
+ memcpy(env->regs + window + n1, env->phys_regs,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+static void copy_phys_from_window(CPUXtensaState *env,
+ uint32_t phys, uint32_t window, uint32_t n)
+{
+ assert(phys < env->config->nareg);
+ if (phys + n <= env->config->nareg) {
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n * sizeof(uint32_t));
+ } else {
+ uint32_t n1 = env->config->nareg - phys;
+ memcpy(env->phys_regs + phys, env->regs + window,
+ n1 * sizeof(uint32_t));
+ memcpy(env->phys_regs, env->regs + window + n1,
+ (n - n1) * sizeof(uint32_t));
+ }
+}
+
+
+static inline unsigned windowbase_bound(unsigned a, const CPUXtensaState *env)
+{
+ return a & (env->config->nareg / 4 - 1);
+}
+
+static inline unsigned windowstart_bit(unsigned a, const CPUXtensaState *env)
+{
+ return 1 << windowbase_bound(a, env);
+}
+
+void xtensa_sync_window_from_phys(CPUXtensaState *env)
+{
+ copy_window_from_phys(env, 0, env->sregs[WINDOW_BASE] * 4, 16);
+}
+
+void xtensa_sync_phys_from_window(CPUXtensaState *env)
+{
+ copy_phys_from_window(env, env->sregs[WINDOW_BASE] * 4, 0, 16);
+}
+
+static void rotate_window_abs(CPUXtensaState *env, uint32_t position)
+{
+ xtensa_sync_phys_from_window(env);
+ env->sregs[WINDOW_BASE] = windowbase_bound(position, env);
+ xtensa_sync_window_from_phys(env);
+}
+
+static void rotate_window(CPUXtensaState *env, uint32_t delta)
+{
+ rotate_window_abs(env, env->sregs[WINDOW_BASE] + delta);
+}
+
+void HELPER(wsr_windowbase)(CPUXtensaState *env, uint32_t v)
+{
+ rotate_window_abs(env, v);
+}
+
+void HELPER(entry)(CPUXtensaState *env, uint32_t pc, uint32_t s, uint32_t imm)
+{
+ int callinc = (env->sregs[PS] & PS_CALLINC) >> PS_CALLINC_SHIFT;
+ if (s > 3 || ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x), PS = %08x\n",
+ pc, env->sregs[PS]);
+ HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
+ } else {
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+
+ if (windowstart & ((1 << callinc) - 1)) {
+ HELPER(window_check)(env, pc, callinc);
+ }
+ env->regs[(callinc << 2) | (s & 3)] = env->regs[s] - (imm << 3);
+ rotate_window(env, callinc);
+ env->sregs[WINDOW_START] |=
+ windowstart_bit(env->sregs[WINDOW_BASE], env);
+ }
+}
+
+void HELPER(window_check)(CPUXtensaState *env, uint32_t pc, uint32_t w)
+{
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = xtensa_replicate_windowstart(env) >>
+ (env->sregs[WINDOW_BASE] + 1);
+ uint32_t n = ctz32(windowstart) + 1;
+
+ assert(n <= w);
+
+ rotate_window(env, n);
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ switch (ctz32(windowstart >> n)) {
+ case 0:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW4);
+ break;
+ case 1:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW8);
+ break;
+ default:
+ HELPER(exception)(env, EXC_WINDOW_OVERFLOW12);
+ break;
+ }
+}
+
+uint32_t HELPER(retw)(CPUXtensaState *env, uint32_t pc)
+{
+ int n = (env->regs[0] >> 30) & 0x3;
+ int m = 0;
+ uint32_t windowbase = windowbase_bound(env->sregs[WINDOW_BASE], env);
+ uint32_t windowstart = env->sregs[WINDOW_START];
+ uint32_t ret_pc = 0;
+
+ if (windowstart & windowstart_bit(windowbase - 1, env)) {
+ m = 1;
+ } else if (windowstart & windowstart_bit(windowbase - 2, env)) {
+ m = 2;
+ } else if (windowstart & windowstart_bit(windowbase - 3, env)) {
+ m = 3;
+ }
+
+ if (n == 0 || (m != 0 && m != n) ||
+ ((env->sregs[PS] & (PS_WOE | PS_EXCM)) ^ PS_WOE) != 0) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x), "
+ "PS = %08x, m = %d, n = %d\n",
+ pc, env->sregs[PS], m, n);
+ HELPER(exception_cause)(env, pc, ILLEGAL_INSTRUCTION_CAUSE);
+ } else {
+ int owb = windowbase;
+
+ ret_pc = (pc & 0xc0000000) | (env->regs[0] & 0x3fffffff);
+
+ rotate_window(env, -n);
+ if (windowstart & windowstart_bit(env->sregs[WINDOW_BASE], env)) {
+ env->sregs[WINDOW_START] &= ~windowstart_bit(owb, env);
+ } else {
+ /* window underflow */
+ env->sregs[PS] = (env->sregs[PS] & ~PS_OWB) |
+ (windowbase << PS_OWB_SHIFT) | PS_EXCM;
+ env->sregs[EPC1] = env->pc = pc;
+
+ if (n == 1) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW4);
+ } else if (n == 2) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW8);
+ } else if (n == 3) {
+ HELPER(exception)(env, EXC_WINDOW_UNDERFLOW12);
+ }
+ }
+ }
+ return ret_pc;
+}
+
+void HELPER(rotw)(CPUXtensaState *env, uint32_t imm4)
+{
+ rotate_window(env, imm4);
+}
+
+void HELPER(restore_owb)(CPUXtensaState *env)
+{
+ rotate_window_abs(env, (env->sregs[PS] & PS_OWB) >> PS_OWB_SHIFT);
+}
+
+void HELPER(movsp)(CPUXtensaState *env, uint32_t pc)
+{
+ if ((env->sregs[WINDOW_START] &
+ (windowstart_bit(env->sregs[WINDOW_BASE] - 3, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 2, env) |
+ windowstart_bit(env->sregs[WINDOW_BASE] - 1, env))) == 0) {
+ HELPER(exception_cause)(env, pc, ALLOCA_CAUSE);
+ }
+}
+
+void HELPER(wsr_lbeg)(CPUXtensaState *env, uint32_t v)
+{
+ if (env->sregs[LBEG] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
+ env->sregs[LBEG] = v;
+ }
+}
+
+void HELPER(wsr_lend)(CPUXtensaState *env, uint32_t v)
+{
+ if (env->sregs[LEND] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
+ env->sregs[LEND] = v;
+ tb_invalidate_virtual_addr(env, env->sregs[LEND] - 1);
+ }
+}
+
+void HELPER(dump_state)(CPUXtensaState *env)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
+ cpu_dump_state(CPU(cpu), stderr, fprintf, 0);
+}
+
+void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel)
+{
+ CPUState *cpu;
+
+ env->pc = pc;
+ env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) |
+ (intlevel << PS_INTLEVEL_SHIFT);
+ check_interrupts(env);
+ if (env->pending_irq_level) {
+ cpu_loop_exit(CPU(xtensa_env_get_cpu(env)));
+ return;
+ }
+
+ cpu = CPU(xtensa_env_get_cpu(env));
+ env->halt_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ cpu->halted = 1;
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_TIMER_INTERRUPT)) {
+ xtensa_rearm_ccompare_timer(env);
+ }
+ HELPER(exception)(env, EXCP_HLT);
+}
+
+void HELPER(timer_irq)(CPUXtensaState *env, uint32_t id, uint32_t active)
+{
+ xtensa_timer_irq(env, id, active);
+}
+
+void HELPER(advance_ccount)(CPUXtensaState *env, uint32_t d)
+{
+ xtensa_advance_ccount(env, d);
+}
+
+void HELPER(check_interrupts)(CPUXtensaState *env)
+{
+ check_interrupts(env);
+}
+
+void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr)
+{
+ get_page_addr_code(env, vaddr);
+}
+
+/*!
+ * Check vaddr accessibility/cache attributes and raise an exception if
+ * specified by the ATOMCTL SR.
+ *
+ * Note: local memory exclusion is not implemented
+ */
+void HELPER(check_atomctl)(CPUXtensaState *env, uint32_t pc, uint32_t vaddr)
+{
+ uint32_t paddr, page_size, access;
+ uint32_t atomctl = env->sregs[ATOMCTL];
+ int rc = xtensa_get_physical_addr(env, true, vaddr, 1,
+ xtensa_get_cring(env), &paddr, &page_size, &access);
+
+ /*
+ * s32c1i never causes LOAD_PROHIBITED_CAUSE exceptions,
+ * see opcode description in the ISA
+ */
+ if (rc == 0 &&
+ (access & (PAGE_READ | PAGE_WRITE)) != (PAGE_READ | PAGE_WRITE)) {
+ rc = STORE_PROHIBITED_CAUSE;
+ }
+
+ if (rc) {
+ HELPER(exception_cause_vaddr)(env, pc, rc, vaddr);
+ }
+
+ /*
+ * When data cache is not configured use ATOMCTL bypass field.
+ * See ISA, 4.3.12.4 The Atomic Operation Control Register (ATOMCTL)
+ * under the Conditional Store Option.
+ */
+ if (!xtensa_option_enabled(env->config, XTENSA_OPTION_DCACHE)) {
+ access = PAGE_CACHE_BYPASS;
+ }
+
+ switch (access & PAGE_CACHE_MASK) {
+ case PAGE_CACHE_WB:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_WT:
+ atomctl >>= 2;
+ /* fall through */
+ case PAGE_CACHE_BYPASS:
+ if ((atomctl & 0x3) == 0) {
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ }
+ break;
+
+ case PAGE_CACHE_ISOLATE:
+ HELPER(exception_cause_vaddr)(env, pc,
+ LOAD_STORE_ERROR_CAUSE, vaddr);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+
+ v = (v & 0xffffff00) | 0x1;
+ if (v != env->sregs[RASID]) {
+ env->sregs[RASID] = v;
+ tlb_flush(CPU(cpu), 1);
+ }
+}
+
+static uint32_t get_page_size(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG];
+
+ switch (way) {
+ case 4:
+ return (tlbcfg >> 16) & 0x3;
+
+ case 5:
+ return (tlbcfg >> 20) & 0x1;
+
+ case 6:
+ return (tlbcfg >> 24) & 0x1;
+
+ default:
+ return 0;
+ }
+}
+
+/*!
+ * Get bit mask for the virtual address bits translated by the TLB way
+ */
+uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ switch (way) {
+ case 4:
+ return 0xfff00000 << get_page_size(env, dtlb, way) * 2;
+
+ case 5:
+ if (varway56) {
+ return 0xf8000000 << get_page_size(env, dtlb, way);
+ } else {
+ return 0xf8000000;
+ }
+
+ case 6:
+ if (varway56) {
+ return 0xf0000000 << (1 - get_page_size(env, dtlb, way));
+ } else {
+ return 0xf0000000;
+ }
+
+ default:
+ return 0xfffff000;
+ }
+ } else {
+ return REGION_PAGE_MASK;
+ }
+}
+
+/*!
+ * Get bit mask for the 'VPN without index' field.
+ * See ISA, 4.6.5.6, data format for RxTLB0
+ */
+static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way)
+{
+ if (way < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ return is32 ? 0xffff8000 : 0xffffc000;
+ } else if (way == 4) {
+ return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2;
+ } else if (way <= 6) {
+ uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way);
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (varway56) {
+ return mask << (way == 5 ? 2 : 3);
+ } else {
+ return mask << 1;
+ }
+ } else {
+ return 0xfffff000;
+ }
+}
+
+/*!
+ * Split virtual address into VPN (with index) and entry index
+ * for the given TLB way
+ */
+void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t wi, uint32_t *ei)
+{
+ bool varway56 = dtlb ?
+ env->config->dtlb.varway56 :
+ env->config->itlb.varway56;
+
+ if (!dtlb) {
+ wi &= 7;
+ }
+
+ if (wi < 4) {
+ bool is32 = (dtlb ?
+ env->config->dtlb.nrefillentries :
+ env->config->itlb.nrefillentries) == 32;
+ *ei = (v >> 12) & (is32 ? 0x7 : 0x3);
+ } else {
+ switch (wi) {
+ case 4:
+ {
+ uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2;
+ *ei = (v >> eibase) & 0x3;
+ }
+ break;
+
+ case 5:
+ if (varway56) {
+ uint32_t eibase = 27 + get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x3;
+ } else {
+ *ei = (v >> 27) & 0x1;
+ }
+ break;
+
+ case 6:
+ if (varway56) {
+ uint32_t eibase = 29 - get_page_size(env, dtlb, wi);
+ *ei = (v >> eibase) & 0x7;
+ } else {
+ *ei = (v >> 28) & 0x1;
+ }
+ break;
+
+ default:
+ *ei = 0;
+ break;
+ }
+ }
+ *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+}
+
+/*!
+ * Split TLB address into TLB way, entry index and VPN (with index).
+ * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format
+ */
+static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb,
+ uint32_t *vpn, uint32_t *wi, uint32_t *ei)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ *wi = v & (dtlb ? 0xf : 0x7);
+ split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei);
+ } else {
+ *vpn = v & REGION_PAGE_MASK;
+ *wi = 0;
+ *ei = (v >> 29) & 0x7;
+ }
+}
+
+static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env,
+ uint32_t v, bool dtlb, uint32_t *pwi)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ if (pwi) {
+ *pwi = wi;
+ }
+ return xtensa_tlb_get_entry(env, dtlb, wi, ei);
+}
+
+uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid;
+ } else {
+ return v & REGION_PAGE_MASK;
+ }
+}
+
+uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL);
+ return entry->paddr | entry->attr;
+}
+
+void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi);
+ if (entry->variable && entry->asid) {
+ tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr);
+ entry->asid = 0;
+ }
+ }
+}
+
+uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ uint32_t wi;
+ uint32_t ei;
+ uint8_t ring;
+ int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring);
+
+ switch (res) {
+ case 0:
+ if (ring >= xtensa_get_ring(env)) {
+ return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8);
+ }
+ break;
+
+ case INST_TLB_MULTI_HIT_CAUSE:
+ case LOAD_STORE_TLB_MULTI_HIT_CAUSE:
+ HELPER(exception_cause_vaddr)(env, env->pc, res, v);
+ break;
+ }
+ return 0;
+ } else {
+ return (v & REGION_PAGE_MASK) | 0x1;
+ }
+}
+
+void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env,
+ xtensa_tlb_entry *entry, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
+{
+ entry->vaddr = vpn;
+ entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi);
+ entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff;
+ entry->attr = pte & 0xf;
+}
+
+void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb,
+ unsigned wi, unsigned ei, uint32_t vpn, uint32_t pte)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei);
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ if (entry->variable) {
+ if (entry->asid) {
+ tlb_flush_page(cs, entry->vaddr);
+ }
+ xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte);
+ tlb_flush_page(cs, entry->vaddr);
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "%s %d, %d, %d trying to set immutable entry\n",
+ __func__, dtlb, wi, ei);
+ }
+ } else {
+ tlb_flush_page(cs, entry->vaddr);
+ if (xtensa_option_enabled(env->config,
+ XTENSA_OPTION_REGION_TRANSLATION)) {
+ entry->paddr = pte & REGION_PAGE_MASK;
+ }
+ entry->attr = pte & 0xf;
+ }
+}
+
+void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb)
+{
+ uint32_t vpn;
+ uint32_t wi;
+ uint32_t ei;
+ split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei);
+ xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p);
+}
+
+
+void HELPER(wsr_ibreakenable)(CPUXtensaState *env, uint32_t v)
+{
+ uint32_t change = v ^ env->sregs[IBREAKENABLE];
+ unsigned i;
+
+ for (i = 0; i < env->config->nibreak; ++i) {
+ if (change & (1 << i)) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ }
+ }
+ env->sregs[IBREAKENABLE] = v & ((1 << env->config->nibreak) - 1);
+}
+
+void HELPER(wsr_ibreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if (env->sregs[IBREAKENABLE] & (1 << i) && env->sregs[IBREAKA + i] != v) {
+ tb_invalidate_virtual_addr(env, env->sregs[IBREAKA + i]);
+ tb_invalidate_virtual_addr(env, v);
+ }
+ env->sregs[IBREAKA + i] = v;
+}
+
+static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka,
+ uint32_t dbreakc)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ int flags = BP_CPU | BP_STOP_BEFORE_ACCESS;
+ uint32_t mask = dbreakc | ~DBREAKC_MASK;
+
+ if (env->cpu_watchpoint[i]) {
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ }
+ if (dbreakc & DBREAKC_SB) {
+ flags |= BP_MEM_WRITE;
+ }
+ if (dbreakc & DBREAKC_LB) {
+ flags |= BP_MEM_READ;
+ }
+ /* contiguous mask after inversion is one less than some power of 2 */
+ if ((~mask + 1) & ~mask) {
+ qemu_log_mask(LOG_GUEST_ERROR, "DBREAKC mask is not contiguous: 0x%08x\n", dbreakc);
+ /* cut mask after the first zero bit */
+ mask = 0xffffffff << (32 - clo32(mask));
+ }
+ if (cpu_watchpoint_insert(cs, dbreaka & mask, ~mask + 1,
+ flags, &env->cpu_watchpoint[i])) {
+ env->cpu_watchpoint[i] = NULL;
+ qemu_log_mask(LOG_GUEST_ERROR, "Failed to set data breakpoint at 0x%08x/%d\n",
+ dbreaka & mask, ~mask + 1);
+ }
+}
+
+void HELPER(wsr_dbreaka)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ uint32_t dbreakc = env->sregs[DBREAKC + i];
+
+ if ((dbreakc & DBREAKC_SB_LB) &&
+ env->sregs[DBREAKA + i] != v) {
+ set_dbreak(env, i, v, dbreakc);
+ }
+ env->sregs[DBREAKA + i] = v;
+}
+
+void HELPER(wsr_dbreakc)(CPUXtensaState *env, uint32_t i, uint32_t v)
+{
+ if ((env->sregs[DBREAKC + i] ^ v) & (DBREAKC_SB_LB | DBREAKC_MASK)) {
+ if (v & DBREAKC_SB_LB) {
+ set_dbreak(env, i, env->sregs[DBREAKA + i], v);
+ } else {
+ if (env->cpu_watchpoint[i]) {
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+
+ cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]);
+ env->cpu_watchpoint[i] = NULL;
+ }
+ }
+ }
+ env->sregs[DBREAKC + i] = v;
+}
+
+void HELPER(wur_fcr)(CPUXtensaState *env, uint32_t v)
+{
+ static const int rounding_mode[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_up,
+ float_round_down,
+ };
+
+ env->uregs[FCR] = v & 0xfffff07f;
+ set_float_rounding_mode(rounding_mode[v & 3], &env->fp_status);
+}
+
+float32 HELPER(abs_s)(float32 v)
+{
+ return float32_abs(v);
+}
+
+float32 HELPER(neg_s)(float32 v)
+{
+ return float32_chs(v);
+}
+
+float32 HELPER(add_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_add(a, b, &env->fp_status);
+}
+
+float32 HELPER(sub_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_sub(a, b, &env->fp_status);
+}
+
+float32 HELPER(mul_s)(CPUXtensaState *env, float32 a, float32 b)
+{
+ return float32_mul(a, b, &env->fp_status);
+}
+
+float32 HELPER(madd_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, 0,
+ &env->fp_status);
+}
+
+float32 HELPER(msub_s)(CPUXtensaState *env, float32 a, float32 b, float32 c)
+{
+ return float32_muladd(b, c, a, float_muladd_negate_product,
+ &env->fp_status);
+}
+
+uint32_t HELPER(ftoi)(float32 v, uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = {0};
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+ return float32_to_int32(
+ float32_scalbn(v, scale, &fp_status), &fp_status);
+}
+
+uint32_t HELPER(ftoui)(float32 v, uint32_t rounding_mode, uint32_t scale)
+{
+ float_status fp_status = {0};
+ float32 res;
+
+ set_float_rounding_mode(rounding_mode, &fp_status);
+
+ res = float32_scalbn(v, scale, &fp_status);
+
+ if (float32_is_neg(v) && !float32_is_any_nan(v)) {
+ return float32_to_int32(res, &fp_status);
+ } else {
+ return float32_to_uint32(res, &fp_status);
+ }
+}
+
+float32 HELPER(itof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(int32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+float32 HELPER(uitof)(CPUXtensaState *env, uint32_t v, uint32_t scale)
+{
+ return float32_scalbn(uint32_to_float32(v, &env->fp_status),
+ (int32_t)scale, &env->fp_status);
+}
+
+static inline void set_br(CPUXtensaState *env, bool v, uint32_t br)
+{
+ if (v) {
+ env->sregs[BR] |= br;
+ } else {
+ env->sregs[BR] &= ~br;
+ }
+}
+
+void HELPER(un_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_unordered_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(oeq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_eq_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ueq_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v == float_relation_equal || v == float_relation_unordered, br);
+}
+
+void HELPER(olt_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_lt_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ult_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v == float_relation_less || v == float_relation_unordered, br);
+}
+
+void HELPER(ole_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ set_br(env, float32_le_quiet(a, b, &env->fp_status), br);
+}
+
+void HELPER(ule_s)(CPUXtensaState *env, uint32_t br, float32 a, float32 b)
+{
+ int v = float32_compare_quiet(a, b, &env->fp_status);
+ set_br(env, v != float_relation_greater, br);
+}
diff --git a/target/xtensa/overlay_tool.h b/target/xtensa/overlay_tool.h
new file mode 100644
index 0000000000..e8a7fda3d8
--- /dev/null
+++ b/target/xtensa/overlay_tool.h
@@ -0,0 +1,602 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define XTREG(idx, ofs, bi, sz, al, no, flags, cp, typ, grp, name, \
+ a1, a2, a3, a4, a5, a6) \
+ { .targno = (no), .type = (typ), .group = (grp), .size = (sz) },
+#define XTREG_END { .targno = -1 },
+
+#ifndef XCHAL_HAVE_DEPBITS
+#define XCHAL_HAVE_DEPBITS 0
+#endif
+
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
+#ifndef XCHAL_UNALIGNED_LOAD_HW
+#define XCHAL_UNALIGNED_LOAD_HW 0
+#endif
+
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#define XCHAL_VECBASE_RESET_VADDR 0
+#endif
+
+#ifndef XCHAL_HW_MIN_VERSION
+#define XCHAL_HW_MIN_VERSION 0
+#endif
+
+#define XCHAL_OPTION(xchal, qemu) ((xchal) ? XTENSA_OPTION_BIT(qemu) : 0)
+
+#define XTENSA_OPTIONS ( \
+ XCHAL_OPTION(XCHAL_HAVE_DENSITY, XTENSA_OPTION_CODE_DENSITY) | \
+ XCHAL_OPTION(XCHAL_HAVE_LOOPS, XTENSA_OPTION_LOOP) | \
+ XCHAL_OPTION(XCHAL_HAVE_ABSOLUTE_LITERALS, XTENSA_OPTION_EXTENDED_L32R) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL16, XTENSA_OPTION_16_BIT_IMUL) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL32, XTENSA_OPTION_32_BIT_IMUL) | \
+ XCHAL_OPTION(XCHAL_HAVE_MUL32_HIGH, XTENSA_OPTION_32_BIT_IMUL_HIGH) | \
+ XCHAL_OPTION(XCHAL_HAVE_DIV32, XTENSA_OPTION_32_BIT_IDIV) | \
+ XCHAL_OPTION(XCHAL_HAVE_MAC16, XTENSA_OPTION_MAC16) | \
+ XCHAL_OPTION(XCHAL_HAVE_NSA, XTENSA_OPTION_MISC_OP_NSA) | \
+ XCHAL_OPTION(XCHAL_HAVE_MINMAX, XTENSA_OPTION_MISC_OP_MINMAX) | \
+ XCHAL_OPTION(XCHAL_HAVE_SEXT, XTENSA_OPTION_MISC_OP_SEXT) | \
+ XCHAL_OPTION(XCHAL_HAVE_CLAMPS, XTENSA_OPTION_MISC_OP_CLAMPS) | \
+ XCHAL_OPTION(XCHAL_HAVE_CP, XTENSA_OPTION_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_BOOLEANS, XTENSA_OPTION_BOOLEAN) | \
+ XCHAL_OPTION(XCHAL_HAVE_FP, XTENSA_OPTION_FP_COPROCESSOR) | \
+ XCHAL_OPTION(XCHAL_HAVE_RELEASE_SYNC, XTENSA_OPTION_MP_SYNCHRO) | \
+ XCHAL_OPTION(XCHAL_HAVE_S32C1I, XTENSA_OPTION_CONDITIONAL_STORE) | \
+ XCHAL_OPTION(XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION >= 230000, \
+ XTENSA_OPTION_ATOMCTL) | \
+ XCHAL_OPTION(XCHAL_HAVE_DEPBITS, XTENSA_OPTION_DEPBITS) | \
+ /* Interrupts and exceptions */ \
+ XCHAL_OPTION(XCHAL_HAVE_EXCEPTIONS, XTENSA_OPTION_EXCEPTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_VECBASE, XTENSA_OPTION_RELOCATABLE_VECTOR) | \
+ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_EXCEPTION, \
+ XTENSA_OPTION_UNALIGNED_EXCEPTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_INTERRUPTS, XTENSA_OPTION_INTERRUPT) | \
+ XCHAL_OPTION(XCHAL_HAVE_HIGHPRI_INTERRUPTS, \
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT) | \
+ XCHAL_OPTION(XCHAL_HAVE_CCOUNT, XTENSA_OPTION_TIMER_INTERRUPT) | \
+ /* Local memory, TODO */ \
+ XCHAL_OPTION(XCHAL_ICACHE_WAYS, XTENSA_OPTION_ICACHE) | \
+ XCHAL_OPTION(XCHAL_ICACHE_LINE_LOCKABLE, \
+ XTENSA_OPTION_ICACHE_INDEX_LOCK) | \
+ XCHAL_OPTION(XCHAL_DCACHE_WAYS, XTENSA_OPTION_DCACHE) | \
+ XCHAL_OPTION(XCHAL_DCACHE_LINE_LOCKABLE, \
+ XTENSA_OPTION_DCACHE_INDEX_LOCK) | \
+ XCHAL_OPTION(XCHAL_UNALIGNED_LOAD_HW, XTENSA_OPTION_HW_ALIGNMENT) | \
+ /* Memory protection and translation */ \
+ XCHAL_OPTION(XCHAL_HAVE_MIMIC_CACHEATTR, \
+ XTENSA_OPTION_REGION_PROTECTION) | \
+ XCHAL_OPTION(XCHAL_HAVE_XLT_CACHEATTR, \
+ XTENSA_OPTION_REGION_TRANSLATION) | \
+ XCHAL_OPTION(XCHAL_HAVE_PTP_MMU, XTENSA_OPTION_MMU) | \
+ XCHAL_OPTION(XCHAL_HAVE_CACHEATTR, XTENSA_OPTION_CACHEATTR) | \
+ /* Other, TODO */ \
+ XCHAL_OPTION(XCHAL_HAVE_WINDOWED, XTENSA_OPTION_WINDOWED_REGISTER) | \
+ XCHAL_OPTION(XCHAL_HAVE_DEBUG, XTENSA_OPTION_DEBUG) |\
+ XCHAL_OPTION(XCHAL_NUM_MISC_REGS > 0, XTENSA_OPTION_MISC_SR) | \
+ XCHAL_OPTION(XCHAL_HAVE_THREADPTR, XTENSA_OPTION_THREAD_POINTER) | \
+ XCHAL_OPTION(XCHAL_HAVE_PRID, XTENSA_OPTION_PROCESSOR_ID))
+
+#ifndef XCHAL_WINDOW_OF4_VECOFS
+#define XCHAL_WINDOW_OF4_VECOFS 0x00000000
+#define XCHAL_WINDOW_UF4_VECOFS 0x00000040
+#define XCHAL_WINDOW_OF8_VECOFS 0x00000080
+#define XCHAL_WINDOW_UF8_VECOFS 0x000000C0
+#define XCHAL_WINDOW_OF12_VECOFS 0x00000100
+#define XCHAL_WINDOW_UF12_VECOFS 0x00000140
+#endif
+
+#if XCHAL_HAVE_WINDOWED
+#define WINDOW_VECTORS \
+ [EXC_WINDOW_OVERFLOW4] = XCHAL_WINDOW_OF4_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW4] = XCHAL_WINDOW_UF4_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_OVERFLOW8] = XCHAL_WINDOW_OF8_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW8] = XCHAL_WINDOW_UF8_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_OVERFLOW12] = XCHAL_WINDOW_OF12_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR, \
+ [EXC_WINDOW_UNDERFLOW12] = XCHAL_WINDOW_UF12_VECOFS + \
+ XCHAL_WINDOW_VECTORS_VADDR,
+#else
+#define WINDOW_VECTORS
+#endif
+
+#define EXCEPTION_VECTORS { \
+ [EXC_RESET] = XCHAL_RESET_VECTOR_VADDR, \
+ WINDOW_VECTORS \
+ [EXC_KERNEL] = XCHAL_KERNEL_VECTOR_VADDR, \
+ [EXC_USER] = XCHAL_USER_VECTOR_VADDR, \
+ [EXC_DOUBLE] = XCHAL_DOUBLEEXC_VECTOR_VADDR, \
+ [EXC_DEBUG] = XCHAL_DEBUG_VECTOR_VADDR, \
+ }
+
+#define INTERRUPT_VECTORS { \
+ 0, \
+ 0, \
+ XCHAL_INTLEVEL2_VECTOR_VADDR, \
+ XCHAL_INTLEVEL3_VECTOR_VADDR, \
+ XCHAL_INTLEVEL4_VECTOR_VADDR, \
+ XCHAL_INTLEVEL5_VECTOR_VADDR, \
+ XCHAL_INTLEVEL6_VECTOR_VADDR, \
+ XCHAL_INTLEVEL7_VECTOR_VADDR, \
+ }
+
+#define LEVEL_MASKS { \
+ [1] = XCHAL_INTLEVEL1_MASK, \
+ [2] = XCHAL_INTLEVEL2_MASK, \
+ [3] = XCHAL_INTLEVEL3_MASK, \
+ [4] = XCHAL_INTLEVEL4_MASK, \
+ [5] = XCHAL_INTLEVEL5_MASK, \
+ [6] = XCHAL_INTLEVEL6_MASK, \
+ [7] = XCHAL_INTLEVEL7_MASK, \
+ }
+
+#define INTTYPE_MASKS { \
+ [INTTYPE_EDGE] = XCHAL_INTTYPE_MASK_EXTERN_EDGE, \
+ [INTTYPE_NMI] = XCHAL_INTTYPE_MASK_NMI, \
+ [INTTYPE_SOFTWARE] = XCHAL_INTTYPE_MASK_SOFTWARE, \
+ }
+
+#define XTHAL_INTTYPE_EXTERN_LEVEL INTTYPE_LEVEL
+#define XTHAL_INTTYPE_EXTERN_EDGE INTTYPE_EDGE
+#define XTHAL_INTTYPE_NMI INTTYPE_NMI
+#define XTHAL_INTTYPE_SOFTWARE INTTYPE_SOFTWARE
+#define XTHAL_INTTYPE_TIMER INTTYPE_TIMER
+#define XTHAL_INTTYPE_TBD1 INTTYPE_DEBUG
+#define XTHAL_INTTYPE_TBD2 INTTYPE_WRITE_ERR
+#define XTHAL_INTTYPE_WRITE_ERROR INTTYPE_WRITE_ERR
+#define XTHAL_INTTYPE_PROFILING INTTYPE_PROFILING
+
+
+#define INTERRUPT(i) { \
+ .level = XCHAL_INT ## i ## _LEVEL, \
+ .inttype = XCHAL_INT ## i ## _TYPE, \
+ }
+
+#define INTERRUPTS { \
+ [0] = INTERRUPT(0), \
+ [1] = INTERRUPT(1), \
+ [2] = INTERRUPT(2), \
+ [3] = INTERRUPT(3), \
+ [4] = INTERRUPT(4), \
+ [5] = INTERRUPT(5), \
+ [6] = INTERRUPT(6), \
+ [7] = INTERRUPT(7), \
+ [8] = INTERRUPT(8), \
+ [9] = INTERRUPT(9), \
+ [10] = INTERRUPT(10), \
+ [11] = INTERRUPT(11), \
+ [12] = INTERRUPT(12), \
+ [13] = INTERRUPT(13), \
+ [14] = INTERRUPT(14), \
+ [15] = INTERRUPT(15), \
+ [16] = INTERRUPT(16), \
+ [17] = INTERRUPT(17), \
+ [18] = INTERRUPT(18), \
+ [19] = INTERRUPT(19), \
+ [20] = INTERRUPT(20), \
+ [21] = INTERRUPT(21), \
+ [22] = INTERRUPT(22), \
+ [23] = INTERRUPT(23), \
+ [24] = INTERRUPT(24), \
+ [25] = INTERRUPT(25), \
+ [26] = INTERRUPT(26), \
+ [27] = INTERRUPT(27), \
+ [28] = INTERRUPT(28), \
+ [29] = INTERRUPT(29), \
+ [30] = INTERRUPT(30), \
+ [31] = INTERRUPT(31), \
+ }
+
+#define TIMERINTS { \
+ [0] = XCHAL_TIMER0_INTERRUPT, \
+ [1] = XCHAL_TIMER1_INTERRUPT, \
+ [2] = XCHAL_TIMER2_INTERRUPT, \
+ }
+
+#define EXTINTS { \
+ [0] = XCHAL_EXTINT0_NUM, \
+ [1] = XCHAL_EXTINT1_NUM, \
+ [2] = XCHAL_EXTINT2_NUM, \
+ [3] = XCHAL_EXTINT3_NUM, \
+ [4] = XCHAL_EXTINT4_NUM, \
+ [5] = XCHAL_EXTINT5_NUM, \
+ [6] = XCHAL_EXTINT6_NUM, \
+ [7] = XCHAL_EXTINT7_NUM, \
+ [8] = XCHAL_EXTINT8_NUM, \
+ [9] = XCHAL_EXTINT9_NUM, \
+ [10] = XCHAL_EXTINT10_NUM, \
+ [11] = XCHAL_EXTINT11_NUM, \
+ [12] = XCHAL_EXTINT12_NUM, \
+ [13] = XCHAL_EXTINT13_NUM, \
+ [14] = XCHAL_EXTINT14_NUM, \
+ [15] = XCHAL_EXTINT15_NUM, \
+ [16] = XCHAL_EXTINT16_NUM, \
+ [17] = XCHAL_EXTINT17_NUM, \
+ [18] = XCHAL_EXTINT18_NUM, \
+ [19] = XCHAL_EXTINT19_NUM, \
+ [20] = XCHAL_EXTINT20_NUM, \
+ [21] = XCHAL_EXTINT21_NUM, \
+ [22] = XCHAL_EXTINT22_NUM, \
+ [23] = XCHAL_EXTINT23_NUM, \
+ [24] = XCHAL_EXTINT24_NUM, \
+ [25] = XCHAL_EXTINT25_NUM, \
+ [26] = XCHAL_EXTINT26_NUM, \
+ [27] = XCHAL_EXTINT27_NUM, \
+ [28] = XCHAL_EXTINT28_NUM, \
+ [29] = XCHAL_EXTINT29_NUM, \
+ [30] = XCHAL_EXTINT30_NUM, \
+ [31] = XCHAL_EXTINT31_NUM, \
+ }
+
+#define EXCEPTIONS_SECTION \
+ .excm_level = XCHAL_EXCM_LEVEL, \
+ .vecbase = XCHAL_VECBASE_RESET_VADDR, \
+ .exception_vector = EXCEPTION_VECTORS
+
+#define INTERRUPTS_SECTION \
+ .ninterrupt = XCHAL_NUM_INTERRUPTS, \
+ .nlevel = XCHAL_NUM_INTLEVELS, \
+ .interrupt_vector = INTERRUPT_VECTORS, \
+ .level_mask = LEVEL_MASKS, \
+ .inttype_mask = INTTYPE_MASKS, \
+ .interrupt = INTERRUPTS, \
+ .nccompare = XCHAL_NUM_TIMERS, \
+ .timerint = TIMERINTS, \
+ .nextint = XCHAL_NUM_EXTINTERRUPTS, \
+ .extint = EXTINTS
+
+#if XCHAL_HAVE_PTP_MMU
+
+#define TLB_TEMPLATE(ways, refill_way_size, way56) { \
+ .nways = ways, \
+ .way_size = { \
+ (refill_way_size), (refill_way_size), \
+ (refill_way_size), (refill_way_size), \
+ 4, (way56) ? 4 : 2, (way56) ? 8 : 2, 1, 1, 1, \
+ }, \
+ .varway56 = (way56), \
+ .nrefillentries = (refill_way_size) * 4, \
+ }
+
+#define ITLB(varway56) \
+ TLB_TEMPLATE(7, 1 << XCHAL_ITLB_ARF_ENTRIES_LOG2, varway56)
+
+#define DTLB(varway56) \
+ TLB_TEMPLATE(10, 1 << XCHAL_DTLB_ARF_ENTRIES_LOG2, varway56)
+
+#define TLB_SECTION \
+ .itlb = ITLB(XCHAL_HAVE_SPANNING_WAY), \
+ .dtlb = DTLB(XCHAL_HAVE_SPANNING_WAY)
+
+#elif XCHAL_HAVE_XLT_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR
+
+#define TLB_TEMPLATE { \
+ .nways = 1, \
+ .way_size = { \
+ 8, \
+ } \
+ }
+
+#define TLB_SECTION \
+ .itlb = TLB_TEMPLATE, \
+ .dtlb = TLB_TEMPLATE
+
+#endif
+
+#if (defined(TARGET_WORDS_BIGENDIAN) != 0) == (XCHAL_HAVE_BE != 0)
+#define REGISTER_CORE(core) \
+ static void __attribute__((constructor)) register_core(void) \
+ { \
+ static XtensaConfigList node = { \
+ .config = &core, \
+ }; \
+ xtensa_finalize_config(&core); \
+ xtensa_register_core(&node); \
+ }
+#else
+#define REGISTER_CORE(core)
+#endif
+
+#define DEBUG_SECTION \
+ .debug_level = XCHAL_DEBUGLEVEL, \
+ .nibreak = XCHAL_NUM_IBREAK, \
+ .ndbreak = XCHAL_NUM_DBREAK
+
+#define CONFIG_SECTION \
+ .configid = { \
+ XCHAL_HW_CONFIGID0, \
+ XCHAL_HW_CONFIGID1, \
+ }
+
+#define DEFAULT_SECTIONS \
+ .options = XTENSA_OPTIONS, \
+ .nareg = XCHAL_NUM_AREGS, \
+ .ndepc = (XCHAL_XEA_VERSION >= 2), \
+ EXCEPTIONS_SECTION, \
+ INTERRUPTS_SECTION, \
+ TLB_SECTION, \
+ DEBUG_SECTION, \
+ CONFIG_SECTION
+
+
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 2
+#define XCHAL_INTLEVEL2_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 3
+#define XCHAL_INTLEVEL3_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 4
+#define XCHAL_INTLEVEL4_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 5
+#define XCHAL_INTLEVEL5_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 6
+#define XCHAL_INTLEVEL6_VECTOR_VADDR 0
+#endif
+#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI + 1 <= 7
+#define XCHAL_INTLEVEL7_VECTOR_VADDR 0
+#endif
+
+
+#if XCHAL_NUM_INTERRUPTS <= 0
+#define XCHAL_INT0_LEVEL 0
+#define XCHAL_INT0_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 1
+#define XCHAL_INT1_LEVEL 0
+#define XCHAL_INT1_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 2
+#define XCHAL_INT2_LEVEL 0
+#define XCHAL_INT2_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 3
+#define XCHAL_INT3_LEVEL 0
+#define XCHAL_INT3_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 4
+#define XCHAL_INT4_LEVEL 0
+#define XCHAL_INT4_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 5
+#define XCHAL_INT5_LEVEL 0
+#define XCHAL_INT5_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 6
+#define XCHAL_INT6_LEVEL 0
+#define XCHAL_INT6_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 7
+#define XCHAL_INT7_LEVEL 0
+#define XCHAL_INT7_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 8
+#define XCHAL_INT8_LEVEL 0
+#define XCHAL_INT8_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 9
+#define XCHAL_INT9_LEVEL 0
+#define XCHAL_INT9_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 10
+#define XCHAL_INT10_LEVEL 0
+#define XCHAL_INT10_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 11
+#define XCHAL_INT11_LEVEL 0
+#define XCHAL_INT11_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 12
+#define XCHAL_INT12_LEVEL 0
+#define XCHAL_INT12_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 13
+#define XCHAL_INT13_LEVEL 0
+#define XCHAL_INT13_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 14
+#define XCHAL_INT14_LEVEL 0
+#define XCHAL_INT14_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 15
+#define XCHAL_INT15_LEVEL 0
+#define XCHAL_INT15_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 16
+#define XCHAL_INT16_LEVEL 0
+#define XCHAL_INT16_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 17
+#define XCHAL_INT17_LEVEL 0
+#define XCHAL_INT17_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 18
+#define XCHAL_INT18_LEVEL 0
+#define XCHAL_INT18_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 19
+#define XCHAL_INT19_LEVEL 0
+#define XCHAL_INT19_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 20
+#define XCHAL_INT20_LEVEL 0
+#define XCHAL_INT20_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 21
+#define XCHAL_INT21_LEVEL 0
+#define XCHAL_INT21_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 22
+#define XCHAL_INT22_LEVEL 0
+#define XCHAL_INT22_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 23
+#define XCHAL_INT23_LEVEL 0
+#define XCHAL_INT23_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 24
+#define XCHAL_INT24_LEVEL 0
+#define XCHAL_INT24_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 25
+#define XCHAL_INT25_LEVEL 0
+#define XCHAL_INT25_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 26
+#define XCHAL_INT26_LEVEL 0
+#define XCHAL_INT26_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 27
+#define XCHAL_INT27_LEVEL 0
+#define XCHAL_INT27_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 28
+#define XCHAL_INT28_LEVEL 0
+#define XCHAL_INT28_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 29
+#define XCHAL_INT29_LEVEL 0
+#define XCHAL_INT29_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 30
+#define XCHAL_INT30_LEVEL 0
+#define XCHAL_INT30_TYPE 0
+#endif
+#if XCHAL_NUM_INTERRUPTS <= 31
+#define XCHAL_INT31_LEVEL 0
+#define XCHAL_INT31_TYPE 0
+#endif
+
+
+#if XCHAL_NUM_EXTINTERRUPTS <= 0
+#define XCHAL_EXTINT0_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 1
+#define XCHAL_EXTINT1_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 2
+#define XCHAL_EXTINT2_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 3
+#define XCHAL_EXTINT3_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 4
+#define XCHAL_EXTINT4_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 5
+#define XCHAL_EXTINT5_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 6
+#define XCHAL_EXTINT6_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 7
+#define XCHAL_EXTINT7_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 8
+#define XCHAL_EXTINT8_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 9
+#define XCHAL_EXTINT9_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 10
+#define XCHAL_EXTINT10_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 11
+#define XCHAL_EXTINT11_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 12
+#define XCHAL_EXTINT12_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 13
+#define XCHAL_EXTINT13_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 14
+#define XCHAL_EXTINT14_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 15
+#define XCHAL_EXTINT15_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 16
+#define XCHAL_EXTINT16_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 17
+#define XCHAL_EXTINT17_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 18
+#define XCHAL_EXTINT18_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 19
+#define XCHAL_EXTINT19_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 20
+#define XCHAL_EXTINT20_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 21
+#define XCHAL_EXTINT21_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 22
+#define XCHAL_EXTINT22_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 23
+#define XCHAL_EXTINT23_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 24
+#define XCHAL_EXTINT24_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 25
+#define XCHAL_EXTINT25_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 26
+#define XCHAL_EXTINT26_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 27
+#define XCHAL_EXTINT27_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 28
+#define XCHAL_EXTINT28_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 29
+#define XCHAL_EXTINT29_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 30
+#define XCHAL_EXTINT30_NUM 0
+#endif
+#if XCHAL_NUM_EXTINTERRUPTS <= 31
+#define XCHAL_EXTINT31_NUM 0
+#endif
+
+
+#define XTHAL_TIMER_UNCONFIGURED 0
diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c
new file mode 100644
index 0000000000..0858c296ea
--- /dev/null
+++ b/target/xtensa/translate.c
@@ -0,0 +1,3225 @@
+/*
+ * Xtensa ISA:
+ * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm
+ *
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "disas/disas.h"
+#include "tcg-op.h"
+#include "qemu/log.h"
+#include "sysemu/sysemu.h"
+#include "exec/cpu_ldst.h"
+#include "exec/semihost.h"
+
+#include "exec/helper-proto.h"
+#include "exec/helper-gen.h"
+
+#include "trace-tcg.h"
+#include "exec/log.h"
+
+
+typedef struct DisasContext {
+ const XtensaConfig *config;
+ TranslationBlock *tb;
+ uint32_t pc;
+ uint32_t next_pc;
+ int cring;
+ int ring;
+ uint32_t lbeg;
+ uint32_t lend;
+ TCGv_i32 litbase;
+ int is_jmp;
+ int singlestep_enabled;
+
+ bool sar_5bit;
+ bool sar_m32_5bit;
+ bool sar_m32_allocated;
+ TCGv_i32 sar_m32;
+
+ uint32_t ccount_delta;
+ unsigned window;
+
+ bool debug;
+ bool icount;
+ TCGv_i32 next_icount;
+
+ unsigned cpenable;
+} DisasContext;
+
+static TCGv_env cpu_env;
+static TCGv_i32 cpu_pc;
+static TCGv_i32 cpu_R[16];
+static TCGv_i32 cpu_FR[16];
+static TCGv_i32 cpu_SR[256];
+static TCGv_i32 cpu_UR[256];
+
+#include "exec/gen-icount.h"
+
+typedef struct XtensaReg {
+ const char *name;
+ uint64_t opt_bits;
+ enum {
+ SR_R = 1,
+ SR_W = 2,
+ SR_X = 4,
+ SR_RW = 3,
+ SR_RWX = 7,
+ } access;
+} XtensaReg;
+
+#define XTENSA_REG_ACCESS(regname, opt, acc) { \
+ .name = (regname), \
+ .opt_bits = XTENSA_OPTION_BIT(opt), \
+ .access = (acc), \
+ }
+
+#define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX)
+
+#define XTENSA_REG_BITS_ACCESS(regname, opt, acc) { \
+ .name = (regname), \
+ .opt_bits = (opt), \
+ .access = (acc), \
+ }
+
+#define XTENSA_REG_BITS(regname, opt) \
+ XTENSA_REG_BITS_ACCESS(regname, opt, SR_RWX)
+
+static const XtensaReg sregnames[256] = {
+ [LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP),
+ [LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP),
+ [LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP),
+ [SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL),
+ [BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN),
+ [LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R),
+ [SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE),
+ [ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16),
+ [ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16),
+ [MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16),
+ [MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16),
+ [MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16),
+ [MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16),
+ [WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER),
+ [WINDOW_START] = XTENSA_REG("WINDOW_START",
+ XTENSA_OPTION_WINDOWED_REGISTER),
+ [PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU),
+ [RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU),
+ [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU),
+ [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU),
+ [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG),
+ [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR),
+ [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL),
+ [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG),
+ [IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG),
+ [DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG),
+ [DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG),
+ [DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG),
+ [DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG),
+ [CONFIGID0] = XTENSA_REG_BITS_ACCESS("CONFIGID0", XTENSA_OPTION_ALL, SR_R),
+ [EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION),
+ [EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION),
+ [EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [CONFIGID1] = XTENSA_REG_BITS_ACCESS("CONFIGID1", XTENSA_OPTION_ALL, SR_R),
+ [EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION),
+ [EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7",
+ XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT),
+ [CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR),
+ [INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW),
+ [INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W),
+ [INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT),
+ [PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL),
+ [VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR),
+ [EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION),
+ [DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R),
+ [CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT),
+ [PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R),
+ [ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG),
+ [ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG),
+ [EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION),
+ [CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT),
+ [CCOMPARE + 1] = XTENSA_REG("CCOMPARE1",
+ XTENSA_OPTION_TIMER_INTERRUPT),
+ [CCOMPARE + 2] = XTENSA_REG("CCOMPARE2",
+ XTENSA_OPTION_TIMER_INTERRUPT),
+ [MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR),
+ [MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR),
+ [MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR),
+ [MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR),
+};
+
+static const XtensaReg uregnames[256] = {
+ [THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER),
+ [FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR),
+ [FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR),
+};
+
+void xtensa_translate_init(void)
+{
+ static const char * const regnames[] = {
+ "ar0", "ar1", "ar2", "ar3",
+ "ar4", "ar5", "ar6", "ar7",
+ "ar8", "ar9", "ar10", "ar11",
+ "ar12", "ar13", "ar14", "ar15",
+ };
+ static const char * const fregnames[] = {
+ "f0", "f1", "f2", "f3",
+ "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11",
+ "f12", "f13", "f14", "f15",
+ };
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ tcg_ctx.tcg_env = cpu_env;
+ cpu_pc = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, pc), "pc");
+
+ for (i = 0; i < 16; i++) {
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, regs[i]),
+ regnames[i]);
+ }
+
+ for (i = 0; i < 16; i++) {
+ cpu_FR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, fregs[i].f32[FP_F32_LOW]),
+ fregnames[i]);
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (sregnames[i].name) {
+ cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, sregs[i]),
+ sregnames[i].name);
+ }
+ }
+
+ for (i = 0; i < 256; ++i) {
+ if (uregnames[i].name) {
+ cpu_UR[i] = tcg_global_mem_new_i32(cpu_env,
+ offsetof(CPUXtensaState, uregs[i]),
+ uregnames[i].name);
+ }
+ }
+}
+
+static inline bool option_bits_enabled(DisasContext *dc, uint64_t opt)
+{
+ return xtensa_option_bits_enabled(dc->config, opt);
+}
+
+static inline bool option_enabled(DisasContext *dc, int opt)
+{
+ return xtensa_option_enabled(dc->config, opt);
+}
+
+static void init_litbase(DisasContext *dc)
+{
+ if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
+ dc->litbase = tcg_temp_local_new_i32();
+ tcg_gen_andi_i32(dc->litbase, cpu_SR[LITBASE], 0xfffff000);
+ }
+}
+
+static void reset_litbase(DisasContext *dc)
+{
+ if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
+ tcg_temp_free(dc->litbase);
+ }
+}
+
+static void init_sar_tracker(DisasContext *dc)
+{
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = false;
+ dc->sar_m32_allocated = false;
+}
+
+static void reset_sar_tracker(DisasContext *dc)
+{
+ if (dc->sar_m32_allocated) {
+ tcg_temp_free(dc->sar_m32);
+ }
+}
+
+static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa)
+{
+ tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f);
+ if (dc->sar_m32_5bit) {
+ tcg_gen_discard_i32(dc->sar_m32);
+ }
+ dc->sar_5bit = true;
+ dc->sar_m32_5bit = false;
+}
+
+static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa)
+{
+ TCGv_i32 tmp = tcg_const_i32(32);
+ if (!dc->sar_m32_allocated) {
+ dc->sar_m32 = tcg_temp_local_new_i32();
+ dc->sar_m32_allocated = true;
+ }
+ tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f);
+ tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32);
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = true;
+ tcg_temp_free(tmp);
+}
+
+static void gen_advance_ccount(DisasContext *dc)
+{
+ if (dc->ccount_delta > 0) {
+ TCGv_i32 tmp = tcg_const_i32(dc->ccount_delta);
+ gen_helper_advance_ccount(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ }
+ dc->ccount_delta = 0;
+}
+
+static void gen_exception(DisasContext *dc, int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_advance_ccount(dc);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free(tmp);
+}
+
+static void gen_exception_cause(DisasContext *dc, uint32_t cause)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_advance_ccount(dc);
+ gen_helper_exception_cause(cpu_env, tpc, tcause);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+ if (cause == ILLEGAL_INSTRUCTION_CAUSE ||
+ cause == SYSCALL_CAUSE) {
+ dc->is_jmp = DISAS_UPDATE;
+ }
+}
+
+static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause,
+ TCGv_i32 vaddr)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_advance_ccount(dc);
+ gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+}
+
+static void gen_debug_exception(DisasContext *dc, uint32_t cause)
+{
+ TCGv_i32 tpc = tcg_const_i32(dc->pc);
+ TCGv_i32 tcause = tcg_const_i32(cause);
+ gen_advance_ccount(dc);
+ gen_helper_debug_exception(cpu_env, tpc, tcause);
+ tcg_temp_free(tpc);
+ tcg_temp_free(tcause);
+ if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) {
+ dc->is_jmp = DISAS_UPDATE;
+ }
+}
+
+static bool gen_check_privilege(DisasContext *dc)
+{
+ if (dc->cring) {
+ gen_exception_cause(dc, PRIVILEGED_CAUSE);
+ dc->is_jmp = DISAS_UPDATE;
+ return false;
+ }
+ return true;
+}
+
+static bool gen_check_cpenable(DisasContext *dc, unsigned cp)
+{
+ if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
+ !(dc->cpenable & (1 << cp))) {
+ gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
+ dc->is_jmp = DISAS_UPDATE;
+ return false;
+ }
+ return true;
+}
+
+static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot)
+{
+ tcg_gen_mov_i32(cpu_pc, dest);
+ gen_advance_ccount(dc);
+ if (dc->icount) {
+ tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount);
+ }
+ if (dc->singlestep_enabled) {
+ gen_exception(dc, EXCP_DEBUG);
+ } else {
+ if (slot >= 0) {
+ tcg_gen_goto_tb(slot);
+ tcg_gen_exit_tb((uintptr_t)dc->tb + slot);
+ } else {
+ tcg_gen_exit_tb(0);
+ }
+ }
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+static void gen_jump(DisasContext *dc, TCGv dest)
+{
+ gen_jump_slot(dc, dest, -1);
+}
+
+static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot)
+{
+ TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
+ if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
+ slot = -1;
+ }
+#endif
+ gen_jump_slot(dc, tmp, slot);
+ tcg_temp_free(tmp);
+}
+
+static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest,
+ int slot)
+{
+ TCGv_i32 tcallinc = tcg_const_i32(callinc);
+
+ tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS],
+ tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN);
+ tcg_temp_free(tcallinc);
+ tcg_gen_movi_i32(cpu_R[callinc << 2],
+ (callinc << 30) | (dc->next_pc & 0x3fffffff));
+ gen_jump_slot(dc, dest, slot);
+}
+
+static void gen_callw(DisasContext *dc, int callinc, TCGv_i32 dest)
+{
+ gen_callw_slot(dc, callinc, dest, -1);
+}
+
+static void gen_callwi(DisasContext *dc, int callinc, uint32_t dest, int slot)
+{
+ TCGv_i32 tmp = tcg_const_i32(dest);
+#ifndef CONFIG_USER_ONLY
+ if (((dc->tb->pc ^ dest) & TARGET_PAGE_MASK) != 0) {
+ slot = -1;
+ }
+#endif
+ gen_callw_slot(dc, callinc, tmp, slot);
+ tcg_temp_free(tmp);
+}
+
+static bool gen_check_loop_end(DisasContext *dc, int slot)
+{
+ if (option_enabled(dc, XTENSA_OPTION_LOOP) &&
+ !(dc->tb->flags & XTENSA_TBFLAG_EXCM) &&
+ dc->next_pc == dc->lend) {
+ TCGLabel *label = gen_new_label();
+
+ gen_advance_ccount(dc);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label);
+ tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1);
+ gen_jumpi(dc, dc->lbeg, slot);
+ gen_set_label(label);
+ gen_jumpi(dc, dc->next_pc, -1);
+ return true;
+ }
+ return false;
+}
+
+static void gen_jumpi_check_loop_end(DisasContext *dc, int slot)
+{
+ if (!gen_check_loop_end(dc, slot)) {
+ gen_jumpi(dc, dc->next_pc, slot);
+ }
+}
+
+static void gen_brcond(DisasContext *dc, TCGCond cond,
+ TCGv_i32 t0, TCGv_i32 t1, uint32_t offset)
+{
+ TCGLabel *label = gen_new_label();
+
+ gen_advance_ccount(dc);
+ tcg_gen_brcond_i32(cond, t0, t1, label);
+ gen_jumpi_check_loop_end(dc, 0);
+ gen_set_label(label);
+ gen_jumpi(dc, dc->pc + offset, 1);
+}
+
+static void gen_brcondi(DisasContext *dc, TCGCond cond,
+ TCGv_i32 t0, uint32_t t1, uint32_t offset)
+{
+ TCGv_i32 tmp = tcg_const_i32(t1);
+ gen_brcond(dc, cond, t0, tmp, offset);
+ tcg_temp_free(tmp);
+}
+
+static bool gen_check_sr(DisasContext *dc, uint32_t sr, unsigned access)
+{
+ if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) {
+ if (sregnames[sr].name) {
+ qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not configured\n", sregnames[sr].name);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "SR %d is not implemented\n", sr);
+ }
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return false;
+ } else if (!(sregnames[sr].access & access)) {
+ static const char * const access_text[] = {
+ [SR_R] = "rsr",
+ [SR_W] = "wsr",
+ [SR_X] = "xsr",
+ };
+ assert(access < ARRAY_SIZE(access_text) && access_text[access]);
+ qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not available for %s\n", sregnames[sr].name,
+ access_text[access]);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ return false;
+ }
+ return true;
+}
+
+static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr)
+{
+ gen_advance_ccount(dc);
+ tcg_gen_mov_i32(d, cpu_SR[sr]);
+}
+
+static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
+{
+ tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10);
+ tcg_gen_or_i32(d, d, cpu_SR[sr]);
+ tcg_gen_andi_i32(d, d, 0xfffffffc);
+}
+
+static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr)
+{
+ static void (* const rsr_handler[256])(DisasContext *dc,
+ TCGv_i32 d, uint32_t sr) = {
+ [CCOUNT] = gen_rsr_ccount,
+ [PTEVADDR] = gen_rsr_ptevaddr,
+ };
+
+ if (rsr_handler[sr]) {
+ rsr_handler[sr](dc, d, sr);
+ } else {
+ tcg_gen_mov_i32(d, cpu_SR[sr]);
+ }
+}
+
+static void gen_wsr_lbeg(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ gen_helper_wsr_lbeg(cpu_env, s);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_lend(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ gen_helper_wsr_lend(cpu_env, s);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f);
+ if (dc->sar_m32_5bit) {
+ tcg_gen_discard_i32(dc->sar_m32);
+ }
+ dc->sar_5bit = false;
+ dc->sar_m32_5bit = false;
+}
+
+static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff);
+}
+
+static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ tcg_gen_ext8s_i32(cpu_SR[sr], s);
+}
+
+static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ gen_helper_wsr_windowbase(cpu_env, v);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000);
+}
+
+static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ gen_helper_wsr_rasid(cpu_env, v);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000);
+}
+
+static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ gen_helper_wsr_ibreakenable(cpu_env, v);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f);
+}
+
+static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ unsigned id = sr - IBREAKA;
+
+ if (id < dc->config->nibreak) {
+ TCGv_i32 tmp = tcg_const_i32(id);
+ gen_helper_wsr_ibreaka(cpu_env, tmp, v);
+ tcg_temp_free(tmp);
+ gen_jumpi_check_loop_end(dc, 0);
+ }
+}
+
+static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ unsigned id = sr - DBREAKA;
+
+ if (id < dc->config->ndbreak) {
+ TCGv_i32 tmp = tcg_const_i32(id);
+ gen_helper_wsr_dbreaka(cpu_env, tmp, v);
+ tcg_temp_free(tmp);
+ }
+}
+
+static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ unsigned id = sr - DBREAKC;
+
+ if (id < dc->config->ndbreak) {
+ TCGv_i32 tmp = tcg_const_i32(id);
+ gen_helper_wsr_dbreakc(cpu_env, tmp, v);
+ tcg_temp_free(tmp);
+ }
+}
+
+static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0xff);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v,
+ dc->config->inttype_mask[INTTYPE_SOFTWARE]);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, v,
+ dc->config->inttype_mask[INTTYPE_EDGE] |
+ dc->config->inttype_mask[INTTYPE_NMI] |
+ dc->config->inttype_mask[INTTYPE_SOFTWARE]);
+ tcg_gen_andc_i32(cpu_SR[INTSET], cpu_SR[INTSET], tmp);
+ tcg_temp_free(tmp);
+ gen_helper_check_interrupts(cpu_env);
+}
+
+static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_mov_i32(cpu_SR[sr], v);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jumpi_check_loop_end(dc, 0);
+}
+
+static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
+ PS_UM | PS_EXCM | PS_INTLEVEL;
+
+ if (option_enabled(dc, XTENSA_OPTION_MMU)) {
+ mask |= PS_RING;
+ }
+ tcg_gen_andi_i32(cpu_SR[sr], v, mask);
+ gen_helper_check_interrupts(cpu_env);
+ /* This can change mmu index and tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ if (dc->icount) {
+ tcg_gen_mov_i32(dc->next_icount, v);
+ } else {
+ tcg_gen_mov_i32(cpu_SR[sr], v);
+ }
+}
+
+static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ tcg_gen_andi_i32(cpu_SR[sr], v, 0xf);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+}
+
+static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ uint32_t id = sr - CCOMPARE;
+ if (id < dc->config->nccompare) {
+ uint32_t int_bit = 1 << dc->config->timerint[id];
+ gen_advance_ccount(dc);
+ tcg_gen_mov_i32(cpu_SR[sr], v);
+ tcg_gen_andi_i32(cpu_SR[INTSET], cpu_SR[INTSET], ~int_bit);
+ gen_helper_check_interrupts(cpu_env);
+ }
+}
+
+static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
+{
+ static void (* const wsr_handler[256])(DisasContext *dc,
+ uint32_t sr, TCGv_i32 v) = {
+ [LBEG] = gen_wsr_lbeg,
+ [LEND] = gen_wsr_lend,
+ [SAR] = gen_wsr_sar,
+ [BR] = gen_wsr_br,
+ [LITBASE] = gen_wsr_litbase,
+ [ACCHI] = gen_wsr_acchi,
+ [WINDOW_BASE] = gen_wsr_windowbase,
+ [WINDOW_START] = gen_wsr_windowstart,
+ [PTEVADDR] = gen_wsr_ptevaddr,
+ [RASID] = gen_wsr_rasid,
+ [ITLBCFG] = gen_wsr_tlbcfg,
+ [DTLBCFG] = gen_wsr_tlbcfg,
+ [IBREAKENABLE] = gen_wsr_ibreakenable,
+ [ATOMCTL] = gen_wsr_atomctl,
+ [IBREAKA] = gen_wsr_ibreaka,
+ [IBREAKA + 1] = gen_wsr_ibreaka,
+ [DBREAKA] = gen_wsr_dbreaka,
+ [DBREAKA + 1] = gen_wsr_dbreaka,
+ [DBREAKC] = gen_wsr_dbreakc,
+ [DBREAKC + 1] = gen_wsr_dbreakc,
+ [CPENABLE] = gen_wsr_cpenable,
+ [INTSET] = gen_wsr_intset,
+ [INTCLEAR] = gen_wsr_intclear,
+ [INTENABLE] = gen_wsr_intenable,
+ [PS] = gen_wsr_ps,
+ [ICOUNT] = gen_wsr_icount,
+ [ICOUNTLEVEL] = gen_wsr_icountlevel,
+ [CCOMPARE] = gen_wsr_ccompare,
+ [CCOMPARE + 1] = gen_wsr_ccompare,
+ [CCOMPARE + 2] = gen_wsr_ccompare,
+ };
+
+ if (wsr_handler[sr]) {
+ wsr_handler[sr](dc, sr, s);
+ } else {
+ tcg_gen_mov_i32(cpu_SR[sr], s);
+ }
+}
+
+static void gen_wur(uint32_t ur, TCGv_i32 s)
+{
+ switch (ur) {
+ case FCR:
+ gen_helper_wur_fcr(cpu_env, s);
+ break;
+
+ case FSR:
+ tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80);
+ break;
+
+ default:
+ tcg_gen_mov_i32(cpu_UR[ur], s);
+ break;
+ }
+}
+
+static void gen_load_store_alignment(DisasContext *dc, int shift,
+ TCGv_i32 addr, bool no_hw_alignment)
+{
+ if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) {
+ tcg_gen_andi_i32(addr, addr, ~0 << shift);
+ } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) &&
+ no_hw_alignment) {
+ TCGLabel *label = gen_new_label();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, addr, ~(~0 << shift));
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
+ gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr);
+ gen_set_label(label);
+ tcg_temp_free(tmp);
+ }
+}
+
+static void gen_waiti(DisasContext *dc, uint32_t imm4)
+{
+ TCGv_i32 pc = tcg_const_i32(dc->next_pc);
+ TCGv_i32 intlevel = tcg_const_i32(imm4);
+ gen_advance_ccount(dc);
+ gen_helper_waiti(cpu_env, pc, intlevel);
+ tcg_temp_free(pc);
+ tcg_temp_free(intlevel);
+}
+
+static bool gen_window_check1(DisasContext *dc, unsigned r1)
+{
+ if (r1 / 4 > dc->window) {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ TCGv_i32 w = tcg_const_i32(r1 / 4);
+
+ gen_advance_ccount(dc);
+ gen_helper_window_check(cpu_env, pc, w);
+ dc->is_jmp = DISAS_UPDATE;
+ return false;
+ }
+ return true;
+}
+
+static bool gen_window_check2(DisasContext *dc, unsigned r1, unsigned r2)
+{
+ return gen_window_check1(dc, r1 > r2 ? r1 : r2);
+}
+
+static bool gen_window_check3(DisasContext *dc, unsigned r1, unsigned r2,
+ unsigned r3)
+{
+ return gen_window_check2(dc, r1, r2 > r3 ? r2 : r3);
+}
+
+static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned)
+{
+ TCGv_i32 m = tcg_temp_new_i32();
+
+ if (hi) {
+ (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16);
+ } else {
+ (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v);
+ }
+ return m;
+}
+
+static inline unsigned xtensa_op0_insn_len(unsigned op0)
+{
+ return op0 >= 8 ? 2 : 3;
+}
+
+static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc)
+{
+#define HAS_OPTION_BITS(opt) do { \
+ if (!option_bits_enabled(dc, opt)) { \
+ qemu_log_mask(LOG_GUEST_ERROR, "Option is not enabled %s:%d\n", \
+ __FILE__, __LINE__); \
+ goto invalid_opcode; \
+ } \
+ } while (0)
+
+#define HAS_OPTION(opt) HAS_OPTION_BITS(XTENSA_OPTION_BIT(opt))
+
+#define TBD() qemu_log_mask(LOG_UNIMP, "TBD(pc = %08x): %s:%d\n", dc->pc, __FILE__, __LINE__)
+#define RESERVED() do { \
+ qemu_log_mask(LOG_GUEST_ERROR, "RESERVED(pc = %08x, %02x%02x%02x): %s:%d\n", \
+ dc->pc, b0, b1, b2, __FILE__, __LINE__); \
+ goto invalid_opcode; \
+ } while (0)
+
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define OP0 (((b0) & 0xf0) >> 4)
+#define OP1 (((b2) & 0xf0) >> 4)
+#define OP2 ((b2) & 0xf)
+#define RRR_R ((b1) & 0xf)
+#define RRR_S (((b1) & 0xf0) >> 4)
+#define RRR_T ((b0) & 0xf)
+#else
+#define OP0 (((b0) & 0xf))
+#define OP1 (((b2) & 0xf))
+#define OP2 (((b2) & 0xf0) >> 4)
+#define RRR_R (((b1) & 0xf0) >> 4)
+#define RRR_S (((b1) & 0xf))
+#define RRR_T (((b0) & 0xf0) >> 4)
+#endif
+#define RRR_X ((RRR_R & 0x4) >> 2)
+#define RRR_Y ((RRR_T & 0x4) >> 2)
+#define RRR_W (RRR_R & 0x3)
+
+#define RRRN_R RRR_R
+#define RRRN_S RRR_S
+#define RRRN_T RRR_T
+
+#define RRI4_R RRR_R
+#define RRI4_S RRR_S
+#define RRI4_T RRR_T
+#ifdef TARGET_WORDS_BIGENDIAN
+#define RRI4_IMM4 ((b2) & 0xf)
+#else
+#define RRI4_IMM4 (((b2) & 0xf0) >> 4)
+#endif
+
+#define RRI8_R RRR_R
+#define RRI8_S RRR_S
+#define RRI8_T RRR_T
+#define RRI8_IMM8 (b2)
+#define RRI8_IMM8_SE ((((b2) & 0x80) ? 0xffffff00 : 0) | RRI8_IMM8)
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define RI16_IMM16 (((b1) << 8) | (b2))
+#else
+#define RI16_IMM16 (((b2) << 8) | (b1))
+#endif
+
+#ifdef TARGET_WORDS_BIGENDIAN
+#define CALL_N (((b0) & 0xc) >> 2)
+#define CALL_OFFSET ((((b0) & 0x3) << 16) | ((b1) << 8) | (b2))
+#else
+#define CALL_N (((b0) & 0x30) >> 4)
+#define CALL_OFFSET ((((b0) & 0xc0) >> 6) | ((b1) << 2) | ((b2) << 10))
+#endif
+#define CALL_OFFSET_SE \
+ (((CALL_OFFSET & 0x20000) ? 0xfffc0000 : 0) | CALL_OFFSET)
+
+#define CALLX_N CALL_N
+#ifdef TARGET_WORDS_BIGENDIAN
+#define CALLX_M ((b0) & 0x3)
+#else
+#define CALLX_M (((b0) & 0xc0) >> 6)
+#endif
+#define CALLX_S RRR_S
+
+#define BRI12_M CALLX_M
+#define BRI12_S RRR_S
+#ifdef TARGET_WORDS_BIGENDIAN
+#define BRI12_IMM12 ((((b1) & 0xf) << 8) | (b2))
+#else
+#define BRI12_IMM12 ((((b1) & 0xf0) >> 4) | ((b2) << 4))
+#endif
+#define BRI12_IMM12_SE (((BRI12_IMM12 & 0x800) ? 0xfffff000 : 0) | BRI12_IMM12)
+
+#define BRI8_M BRI12_M
+#define BRI8_R RRI8_R
+#define BRI8_S RRI8_S
+#define BRI8_IMM8 RRI8_IMM8
+#define BRI8_IMM8_SE RRI8_IMM8_SE
+
+#define RSR_SR (b1)
+
+ uint8_t b0 = cpu_ldub_code(env, dc->pc);
+ uint8_t b1 = cpu_ldub_code(env, dc->pc + 1);
+ uint8_t b2 = 0;
+ unsigned len = xtensa_op0_insn_len(OP0);
+
+ static const uint32_t B4CONST[] = {
+ 0xffffffff, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
+ };
+
+ static const uint32_t B4CONSTU[] = {
+ 32768, 65536, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 32, 64, 128, 256
+ };
+
+ switch (len) {
+ case 2:
+ HAS_OPTION(XTENSA_OPTION_CODE_DENSITY);
+ break;
+
+ case 3:
+ b2 = cpu_ldub_code(env, dc->pc + 2);
+ break;
+
+ default:
+ RESERVED();
+ }
+ dc->next_pc = dc->pc + len;
+
+ switch (OP0) {
+ case 0: /*QRST*/
+ switch (OP1) {
+ case 0: /*RST0*/
+ switch (OP2) {
+ case 0: /*ST0*/
+ if ((RRR_R & 0xc) == 0x8) {
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ }
+
+ switch (RRR_R) {
+ case 0: /*SNM0*/
+ switch (CALLX_M) {
+ case 0: /*ILL*/
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ break;
+
+ case 1: /*reserved*/
+ RESERVED();
+ break;
+
+ case 2: /*JR*/
+ switch (CALLX_N) {
+ case 0: /*RET*/
+ case 2: /*JX*/
+ if (gen_window_check1(dc, CALLX_S)) {
+ gen_jump(dc, cpu_R[CALLX_S]);
+ }
+ break;
+
+ case 1: /*RETWw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+ gen_advance_ccount(dc);
+ gen_helper_retw(tmp, cpu_env, tmp);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 3: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*CALLX*/
+ if (!gen_window_check2(dc, CALLX_S, CALLX_N << 2)) {
+ break;
+ }
+ switch (CALLX_N) {
+ case 0: /*CALLX0*/
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
+ tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 1: /*CALLX4w*/
+ case 2: /*CALLX8w*/
+ case 3: /*CALLX12w*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, cpu_R[CALLX_S]);
+ gen_callw(dc, CALLX_N, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+ }
+ break;
+ }
+ break;
+
+ case 1: /*MOVSPw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_window_check2(dc, RRR_T, RRR_S)) {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ gen_advance_ccount(dc);
+ gen_helper_movsp(cpu_env, pc);
+ tcg_gen_mov_i32(cpu_R[RRR_T], cpu_R[RRR_S]);
+ tcg_temp_free(pc);
+ }
+ break;
+
+ case 2: /*SYNC*/
+ switch (RRR_T) {
+ case 0: /*ISYNC*/
+ break;
+
+ case 1: /*RSYNC*/
+ break;
+
+ case 2: /*ESYNC*/
+ break;
+
+ case 3: /*DSYNC*/
+ break;
+
+ case 8: /*EXCW*/
+ HAS_OPTION(XTENSA_OPTION_EXCEPTION);
+ break;
+
+ case 12: /*MEMW*/
+ break;
+
+ case 13: /*EXTW*/
+ break;
+
+ case 15: /*NOP*/
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*RFEIx*/
+ switch (RRR_T) {
+ case 0: /*RFETx*/
+ HAS_OPTION(XTENSA_OPTION_EXCEPTION);
+ switch (RRR_S) {
+ case 0: /*RFEx*/
+ if (gen_check_privilege(dc)) {
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1]);
+ }
+ break;
+
+ case 1: /*RFUEx*/
+ RESERVED();
+ break;
+
+ case 2: /*RFDEx*/
+ if (gen_check_privilege(dc)) {
+ gen_jump(dc, cpu_SR[
+ dc->config->ndepc ? DEPC : EPC1]);
+ }
+ break;
+
+ case 4: /*RFWOw*/
+ case 5: /*RFWUw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc)) {
+ TCGv_i32 tmp = tcg_const_i32(1);
+
+ tcg_gen_andi_i32(
+ cpu_SR[PS], cpu_SR[PS], ~PS_EXCM);
+ tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]);
+
+ if (RRR_S == 4) {
+ tcg_gen_andc_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ } else {
+ tcg_gen_or_i32(cpu_SR[WINDOW_START],
+ cpu_SR[WINDOW_START], tmp);
+ }
+
+ gen_helper_restore_owb(cpu_env);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1]);
+
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 1: /*RFIx*/
+ HAS_OPTION(XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT);
+ if (RRR_S >= 2 && RRR_S <= dc->config->nlevel) {
+ if (gen_check_privilege(dc)) {
+ tcg_gen_mov_i32(cpu_SR[PS],
+ cpu_SR[EPS2 + RRR_S - 2]);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jump(dc, cpu_SR[EPC1 + RRR_S - 1]);
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "RFI %d is illegal\n", RRR_S);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ }
+ break;
+
+ case 2: /*RFME*/
+ TBD();
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+ case 4: /*BREAKx*/
+ HAS_OPTION(XTENSA_OPTION_DEBUG);
+ if (dc->debug) {
+ gen_debug_exception(dc, DEBUGCAUSE_BI);
+ }
+ break;
+
+ case 5: /*SYSCALLx*/
+ HAS_OPTION(XTENSA_OPTION_EXCEPTION);
+ switch (RRR_S) {
+ case 0: /*SYSCALLx*/
+ gen_exception_cause(dc, SYSCALL_CAUSE);
+ break;
+
+ case 1: /*SIMCALL*/
+ if (semihosting_enabled()) {
+ if (gen_check_privilege(dc)) {
+ gen_helper_simcall(cpu_env);
+ }
+ } else {
+ qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n");
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ }
+ break;
+
+ default:
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 6: /*RSILx*/
+ HAS_OPTION(XTENSA_OPTION_INTERRUPT);
+ if (gen_check_privilege(dc) &&
+ gen_window_check1(dc, RRR_T)) {
+ tcg_gen_mov_i32(cpu_R[RRR_T], cpu_SR[PS]);
+ tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL);
+ tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], RRR_S);
+ gen_helper_check_interrupts(cpu_env);
+ gen_jumpi_check_loop_end(dc, 0);
+ }
+ break;
+
+ case 7: /*WAITIx*/
+ HAS_OPTION(XTENSA_OPTION_INTERRUPT);
+ if (gen_check_privilege(dc)) {
+ gen_waiti(dc, RRR_S);
+ }
+ break;
+
+ case 8: /*ANY4p*/
+ case 9: /*ALL4p*/
+ case 10: /*ANY8p*/
+ case 11: /*ALL8p*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ {
+ const unsigned shift = (RRR_R & 2) ? 8 : 4;
+ TCGv_i32 mask = tcg_const_i32(
+ ((1 << shift) - 1) << RRR_S);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_and_i32(tmp, cpu_SR[BR], mask);
+ if (RRR_R & 1) { /*ALL*/
+ tcg_gen_addi_i32(tmp, tmp, 1 << RRR_S);
+ } else { /*ANY*/
+ tcg_gen_add_i32(tmp, tmp, mask);
+ }
+ tcg_gen_shri_i32(tmp, tmp, RRR_S + shift);
+ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR],
+ tmp, RRR_T, 1);
+ tcg_temp_free(mask);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+ case 1: /*AND*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_and_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 2: /*OR*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_or_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 3: /*XOR*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_xor_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 4: /*ST1*/
+ switch (RRR_R) {
+ case 0: /*SSR*/
+ if (gen_window_check1(dc, RRR_S)) {
+ gen_right_shift_sar(dc, cpu_R[RRR_S]);
+ }
+ break;
+
+ case 1: /*SSL*/
+ if (gen_window_check1(dc, RRR_S)) {
+ gen_left_shift_sar(dc, cpu_R[RRR_S]);
+ }
+ break;
+
+ case 2: /*SSA8L*/
+ if (gen_window_check1(dc, RRR_S)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
+ gen_right_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 3: /*SSA8B*/
+ if (gen_window_check1(dc, RRR_S)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], 3);
+ gen_left_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 4: /*SSAI*/
+ {
+ TCGv_i32 tmp = tcg_const_i32(
+ RRR_S | ((RRR_T & 1) << 4));
+ gen_right_shift_sar(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 6: /*RER*/
+ TBD();
+ break;
+
+ case 7: /*WER*/
+ TBD();
+ break;
+
+ case 8: /*ROTWw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc)) {
+ TCGv_i32 tmp = tcg_const_i32(
+ RRR_T | ((RRR_T & 8) ? 0xfffffff0 : 0));
+ gen_helper_rotw(cpu_env, tmp);
+ tcg_temp_free(tmp);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ }
+ break;
+
+ case 14: /*NSAu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
+ if (gen_window_check2(dc, RRR_S, RRR_T)) {
+ gen_helper_nsa(cpu_R[RRR_T], cpu_R[RRR_S]);
+ }
+ break;
+
+ case 15: /*NSAUu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_NSA);
+ if (gen_window_check2(dc, RRR_S, RRR_T)) {
+ gen_helper_nsau(cpu_R[RRR_T], cpu_R[RRR_S]);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 5: /*TLB*/
+ HAS_OPTION_BITS(
+ XTENSA_OPTION_BIT(XTENSA_OPTION_MMU) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) |
+ XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION));
+ if (gen_check_privilege(dc) &&
+ gen_window_check2(dc, RRR_S, RRR_T)) {
+ TCGv_i32 dtlb = tcg_const_i32((RRR_R & 8) != 0);
+
+ switch (RRR_R & 7) {
+ case 3: /*RITLB0*/ /*RDTLB0*/
+ gen_helper_rtlb0(cpu_R[RRR_T],
+ cpu_env, cpu_R[RRR_S], dtlb);
+ break;
+
+ case 4: /*IITLB*/ /*IDTLB*/
+ gen_helper_itlb(cpu_env, cpu_R[RRR_S], dtlb);
+ /* This could change memory mapping, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ break;
+
+ case 5: /*PITLB*/ /*PDTLB*/
+ tcg_gen_movi_i32(cpu_pc, dc->pc);
+ gen_helper_ptlb(cpu_R[RRR_T],
+ cpu_env, cpu_R[RRR_S], dtlb);
+ break;
+
+ case 6: /*WITLB*/ /*WDTLB*/
+ gen_helper_wtlb(
+ cpu_env, cpu_R[RRR_T], cpu_R[RRR_S], dtlb);
+ /* This could change memory mapping, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ break;
+
+ case 7: /*RITLB1*/ /*RDTLB1*/
+ gen_helper_rtlb1(cpu_R[RRR_T],
+ cpu_env, cpu_R[RRR_S], dtlb);
+ break;
+
+ default:
+ tcg_temp_free(dtlb);
+ RESERVED();
+ break;
+ }
+ tcg_temp_free(dtlb);
+ }
+ break;
+
+ case 6: /*RT0*/
+ if (!gen_window_check2(dc, RRR_R, RRR_T)) {
+ break;
+ }
+ switch (RRR_S) {
+ case 0: /*NEG*/
+ tcg_gen_neg_i32(cpu_R[RRR_R], cpu_R[RRR_T]);
+ break;
+
+ case 1: /*ABS*/
+ {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 neg = tcg_temp_new_i32();
+
+ tcg_gen_neg_i32(neg, cpu_R[RRR_T]);
+ tcg_gen_movcond_i32(TCG_COND_GE, cpu_R[RRR_R],
+ cpu_R[RRR_T], zero, cpu_R[RRR_T], neg);
+ tcg_temp_free(neg);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 7: /*reserved*/
+ RESERVED();
+ break;
+
+ case 8: /*ADD*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_add_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 9: /*ADD**/
+ case 10:
+ case 11:
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 8);
+ tcg_gen_add_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 12: /*SUB*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ tcg_gen_sub_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 13: /*SUB**/
+ case 14:
+ case 15:
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], OP2 - 12);
+ tcg_gen_sub_i32(cpu_R[RRR_R], tmp, cpu_R[RRR_T]);
+ tcg_temp_free(tmp);
+ }
+ break;
+ }
+ break;
+
+ case 1: /*RST1*/
+ switch (OP2) {
+ case 0: /*SLLI*/
+ case 1:
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ tcg_gen_shli_i32(cpu_R[RRR_R], cpu_R[RRR_S],
+ 32 - (RRR_T | ((OP2 & 1) << 4)));
+ }
+ break;
+
+ case 2: /*SRAI*/
+ case 3:
+ if (gen_window_check2(dc, RRR_R, RRR_T)) {
+ tcg_gen_sari_i32(cpu_R[RRR_R], cpu_R[RRR_T],
+ RRR_S | ((OP2 & 1) << 4));
+ }
+ break;
+
+ case 4: /*SRLI*/
+ if (gen_window_check2(dc, RRR_R, RRR_T)) {
+ tcg_gen_shri_i32(cpu_R[RRR_R], cpu_R[RRR_T], RRR_S);
+ }
+ break;
+
+ case 6: /*XSR*/
+ if (gen_check_sr(dc, RSR_SR, SR_X) &&
+ (RSR_SR < 64 || gen_check_privilege(dc)) &&
+ gen_window_check1(dc, RRR_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_mov_i32(tmp, cpu_R[RRR_T]);
+ gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
+ gen_wsr(dc, RSR_SR, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ /*
+ * Note: 64 bit ops are used here solely because SAR values
+ * have range 0..63
+ */
+#define gen_shift_reg(cmd, reg) do { \
+ TCGv_i64 tmp = tcg_temp_new_i64(); \
+ tcg_gen_extu_i32_i64(tmp, reg); \
+ tcg_gen_##cmd##_i64(v, v, tmp); \
+ tcg_gen_extrl_i64_i32(cpu_R[RRR_R], v); \
+ tcg_temp_free_i64(v); \
+ tcg_temp_free_i64(tmp); \
+ } while (0)
+
+#define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR])
+
+ case 8: /*SRC*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_concat_i32_i64(v, cpu_R[RRR_T], cpu_R[RRR_S]);
+ gen_shift(shr);
+ }
+ break;
+
+ case 9: /*SRL*/
+ if (!gen_window_check2(dc, RRR_R, RRR_T)) {
+ break;
+ }
+ if (dc->sar_5bit) {
+ tcg_gen_shr_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(v, cpu_R[RRR_T]);
+ gen_shift(shr);
+ }
+ break;
+
+ case 10: /*SLL*/
+ if (!gen_window_check2(dc, RRR_R, RRR_S)) {
+ break;
+ }
+ if (dc->sar_m32_5bit) {
+ tcg_gen_shl_i32(cpu_R[RRR_R], cpu_R[RRR_S], dc->sar_m32);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ TCGv_i32 s = tcg_const_i32(32);
+ tcg_gen_sub_i32(s, s, cpu_SR[SAR]);
+ tcg_gen_andi_i32(s, s, 0x3f);
+ tcg_gen_extu_i32_i64(v, cpu_R[RRR_S]);
+ gen_shift_reg(shl, s);
+ tcg_temp_free(s);
+ }
+ break;
+
+ case 11: /*SRA*/
+ if (!gen_window_check2(dc, RRR_R, RRR_T)) {
+ break;
+ }
+ if (dc->sar_5bit) {
+ tcg_gen_sar_i32(cpu_R[RRR_R], cpu_R[RRR_T], cpu_SR[SAR]);
+ } else {
+ TCGv_i64 v = tcg_temp_new_i64();
+ tcg_gen_ext_i32_i64(v, cpu_R[RRR_T]);
+ gen_shift(sar);
+ }
+ break;
+#undef gen_shift
+#undef gen_shift_reg
+
+ case 12: /*MUL16U*/
+ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 v1 = tcg_temp_new_i32();
+ TCGv_i32 v2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(v1, cpu_R[RRR_S]);
+ tcg_gen_ext16u_i32(v2, cpu_R[RRR_T]);
+ tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
+ tcg_temp_free(v2);
+ tcg_temp_free(v1);
+ }
+ break;
+
+ case 13: /*MUL16S*/
+ HAS_OPTION(XTENSA_OPTION_16_BIT_IMUL);
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ TCGv_i32 v1 = tcg_temp_new_i32();
+ TCGv_i32 v2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(v1, cpu_R[RRR_S]);
+ tcg_gen_ext16s_i32(v2, cpu_R[RRR_T]);
+ tcg_gen_mul_i32(cpu_R[RRR_R], v1, v2);
+ tcg_temp_free(v2);
+ tcg_temp_free(v1);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 2: /*RST2*/
+ if (OP2 >= 8 && !gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ break;
+ }
+
+ if (OP2 >= 12) {
+ HAS_OPTION(XTENSA_OPTION_32_BIT_IDIV);
+ TCGLabel *label = gen_new_label();
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0, label);
+ gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE);
+ gen_set_label(label);
+ }
+
+ switch (OP2) {
+#define BOOLEAN_LOGIC(fn, r, s, t) \
+ do { \
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN); \
+ TCGv_i32 tmp1 = tcg_temp_new_i32(); \
+ TCGv_i32 tmp2 = tcg_temp_new_i32(); \
+ \
+ tcg_gen_shri_i32(tmp1, cpu_SR[BR], s); \
+ tcg_gen_shri_i32(tmp2, cpu_SR[BR], t); \
+ tcg_gen_##fn##_i32(tmp1, tmp1, tmp2); \
+ tcg_gen_deposit_i32(cpu_SR[BR], cpu_SR[BR], tmp1, r, 1); \
+ tcg_temp_free(tmp1); \
+ tcg_temp_free(tmp2); \
+ } while (0)
+
+ case 0: /*ANDBp*/
+ BOOLEAN_LOGIC(and, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 1: /*ANDBCp*/
+ BOOLEAN_LOGIC(andc, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 2: /*ORBp*/
+ BOOLEAN_LOGIC(or, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 3: /*ORBCp*/
+ BOOLEAN_LOGIC(orc, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 4: /*XORBp*/
+ BOOLEAN_LOGIC(xor, RRR_R, RRR_S, RRR_T);
+ break;
+
+#undef BOOLEAN_LOGIC
+
+ case 8: /*MULLi*/
+ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL);
+ tcg_gen_mul_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ break;
+
+ case 10: /*MULUHi*/
+ case 11: /*MULSHi*/
+ HAS_OPTION(XTENSA_OPTION_32_BIT_IMUL_HIGH);
+ {
+ TCGv lo = tcg_temp_new();
+
+ if (OP2 == 10) {
+ tcg_gen_mulu2_i32(lo, cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ } else {
+ tcg_gen_muls2_i32(lo, cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ tcg_temp_free(lo);
+ }
+ break;
+
+ case 12: /*QUOUi*/
+ tcg_gen_divu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ break;
+
+ case 13: /*QUOSi*/
+ case 15: /*REMSi*/
+ {
+ TCGLabel *label1 = gen_new_label();
+ TCGLabel *label2 = gen_new_label();
+
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_S], 0x80000000,
+ label1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, cpu_R[RRR_T], 0xffffffff,
+ label1);
+ tcg_gen_movi_i32(cpu_R[RRR_R],
+ OP2 == 13 ? 0x80000000 : 0);
+ tcg_gen_br(label2);
+ gen_set_label(label1);
+ if (OP2 == 13) {
+ tcg_gen_div_i32(cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ } else {
+ tcg_gen_rem_i32(cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ gen_set_label(label2);
+ }
+ break;
+
+ case 14: /*REMUi*/
+ tcg_gen_remu_i32(cpu_R[RRR_R], cpu_R[RRR_S], cpu_R[RRR_T]);
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*RST3*/
+ switch (OP2) {
+ case 0: /*RSR*/
+ if (gen_check_sr(dc, RSR_SR, SR_R) &&
+ (RSR_SR < 64 || gen_check_privilege(dc)) &&
+ gen_window_check1(dc, RRR_T)) {
+ gen_rsr(dc, cpu_R[RRR_T], RSR_SR);
+ }
+ break;
+
+ case 1: /*WSR*/
+ if (gen_check_sr(dc, RSR_SR, SR_W) &&
+ (RSR_SR < 64 || gen_check_privilege(dc)) &&
+ gen_window_check1(dc, RRR_T)) {
+ gen_wsr(dc, RSR_SR, cpu_R[RRR_T]);
+ }
+ break;
+
+ case 2: /*SEXTu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_SEXT);
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ int shift = 24 - RRR_T;
+
+ if (shift == 24) {
+ tcg_gen_ext8s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
+ } else if (shift == 16) {
+ tcg_gen_ext16s_i32(cpu_R[RRR_R], cpu_R[RRR_S]);
+ } else {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shli_i32(tmp, cpu_R[RRR_S], shift);
+ tcg_gen_sari_i32(cpu_R[RRR_R], tmp, shift);
+ tcg_temp_free(tmp);
+ }
+ }
+ break;
+
+ case 3: /*CLAMPSu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_CLAMPS);
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ TCGv_i32 tmp1 = tcg_temp_new_i32();
+ TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 24 - RRR_T);
+ tcg_gen_xor_i32(tmp2, tmp1, cpu_R[RRR_S]);
+ tcg_gen_andi_i32(tmp2, tmp2, 0xffffffff << (RRR_T + 7));
+
+ tcg_gen_sari_i32(tmp1, cpu_R[RRR_S], 31);
+ tcg_gen_xori_i32(tmp1, tmp1, 0xffffffff >> (25 - RRR_T));
+
+ tcg_gen_movcond_i32(TCG_COND_EQ, cpu_R[RRR_R], tmp2, zero,
+ cpu_R[RRR_S], tmp1);
+ tcg_temp_free(tmp1);
+ tcg_temp_free(tmp2);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 4: /*MINu*/
+ case 5: /*MAXu*/
+ case 6: /*MINUu*/
+ case 7: /*MAXUu*/
+ HAS_OPTION(XTENSA_OPTION_MISC_OP_MINMAX);
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ static const TCGCond cond[] = {
+ TCG_COND_LE,
+ TCG_COND_GE,
+ TCG_COND_LEU,
+ TCG_COND_GEU
+ };
+ tcg_gen_movcond_i32(cond[OP2 - 4], cpu_R[RRR_R],
+ cpu_R[RRR_S], cpu_R[RRR_T],
+ cpu_R[RRR_S], cpu_R[RRR_T]);
+ }
+ break;
+
+ case 8: /*MOVEQZ*/
+ case 9: /*MOVNEZ*/
+ case 10: /*MOVLTZ*/
+ case 11: /*MOVGEZ*/
+ if (gen_window_check3(dc, RRR_R, RRR_S, RRR_T)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ,
+ TCG_COND_NE,
+ TCG_COND_LT,
+ TCG_COND_GE,
+ };
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(cond[OP2 - 8], cpu_R[RRR_R],
+ cpu_R[RRR_T], zero, cpu_R[RRR_S], cpu_R[RRR_R]);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 12: /*MOVFp*/
+ case 13: /*MOVTp*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ if (gen_window_check2(dc, RRR_R, RRR_S)) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
+ tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
+ cpu_R[RRR_R], tmp, zero,
+ cpu_R[RRR_S], cpu_R[RRR_R]);
+
+ tcg_temp_free(tmp);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 14: /*RUR*/
+ if (gen_window_check1(dc, RRR_R)) {
+ int st = (RRR_S << 4) + RRR_T;
+ if (uregnames[st].name) {
+ tcg_gen_mov_i32(cpu_R[RRR_R], cpu_UR[st]);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "RUR %d not implemented, ", st);
+ TBD();
+ }
+ }
+ break;
+
+ case 15: /*WUR*/
+ if (gen_window_check1(dc, RRR_T)) {
+ if (uregnames[RSR_SR].name) {
+ gen_wur(RSR_SR, cpu_R[RRR_T]);
+ } else {
+ qemu_log_mask(LOG_UNIMP, "WUR %d not implemented, ", RSR_SR);
+ TBD();
+ }
+ }
+ break;
+
+ }
+ break;
+
+ case 4: /*EXTUI*/
+ case 5:
+ if (gen_window_check2(dc, RRR_R, RRR_T)) {
+ int shiftimm = RRR_S | ((OP1 & 1) << 4);
+ int maskimm = (1 << (OP2 + 1)) - 1;
+
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_shri_i32(tmp, cpu_R[RRR_T], shiftimm);
+ tcg_gen_andi_i32(cpu_R[RRR_R], tmp, maskimm);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 6: /*CUST0*/
+ RESERVED();
+ break;
+
+ case 7: /*CUST1*/
+ RESERVED();
+ break;
+
+ case 8: /*LSCXp*/
+ switch (OP2) {
+ case 0: /*LSXf*/
+ case 1: /*LSXUf*/
+ case 4: /*SSXf*/
+ case 5: /*SSXUf*/
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+ if (gen_window_check2(dc, RRR_S, RRR_T) &&
+ gen_check_cpenable(dc, 0)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_add_i32(addr, cpu_R[RRR_S], cpu_R[RRR_T]);
+ gen_load_store_alignment(dc, 2, addr, false);
+ if (OP2 & 0x4) {
+ tcg_gen_qemu_st32(cpu_FR[RRR_R], addr, dc->cring);
+ } else {
+ tcg_gen_qemu_ld32u(cpu_FR[RRR_R], addr, dc->cring);
+ }
+ if (OP2 & 0x1) {
+ tcg_gen_mov_i32(cpu_R[RRR_S], addr);
+ }
+ tcg_temp_free(addr);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 9: /*LSC4*/
+ if (!gen_window_check2(dc, RRR_S, RRR_T)) {
+ break;
+ }
+ switch (OP2) {
+ case 0: /*L32E*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc) &&
+ gen_window_check2(dc, RRR_S, RRR_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, cpu_R[RRR_S],
+ (0xffffffc0 | (RRR_R << 2)));
+ tcg_gen_qemu_ld32u(cpu_R[RRR_T], addr, dc->ring);
+ tcg_temp_free(addr);
+ }
+ break;
+
+ case 4: /*S32E*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_check_privilege(dc) &&
+ gen_window_check2(dc, RRR_S, RRR_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, cpu_R[RRR_S],
+ (0xffffffc0 | (RRR_R << 2)));
+ tcg_gen_qemu_st32(cpu_R[RRR_T], addr, dc->ring);
+ tcg_temp_free(addr);
+ }
+ break;
+
+ case 5: /*S32N*/
+ if (gen_window_check2(dc, RRI4_S, RRI4_T)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+
+ tcg_gen_addi_i32(addr, cpu_R[RRI4_S], RRI4_IMM4 << 2);
+ gen_load_store_alignment(dc, 2, addr, false);
+ tcg_gen_qemu_st32(cpu_R[RRI4_T], addr, dc->cring);
+ tcg_temp_free(addr);
+ }
+ break;
+
+ default:
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 10: /*FP0*/
+ /*DEPBITS*/
+ if (option_enabled(dc, XTENSA_OPTION_DEPBITS)) {
+ if (!gen_window_check2(dc, RRR_S, RRR_T)) {
+ break;
+ }
+ tcg_gen_deposit_i32(cpu_R[RRR_T], cpu_R[RRR_T], cpu_R[RRR_S],
+ OP2, RRR_R + 1);
+ break;
+ }
+
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+ switch (OP2) {
+ case 0: /*ADD.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_add_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_S], cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 1: /*SUB.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_sub_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_S], cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 2: /*MUL.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_mul_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_S], cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 4: /*MADD.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_madd_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_R], cpu_FR[RRR_S],
+ cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 5: /*MSUB.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_msub_s(cpu_FR[RRR_R], cpu_env,
+ cpu_FR[RRR_R], cpu_FR[RRR_S],
+ cpu_FR[RRR_T]);
+ }
+ break;
+
+ case 8: /*ROUND.Sf*/
+ case 9: /*TRUNC.Sf*/
+ case 10: /*FLOOR.Sf*/
+ case 11: /*CEIL.Sf*/
+ case 14: /*UTRUNC.Sf*/
+ if (gen_window_check1(dc, RRR_R) &&
+ gen_check_cpenable(dc, 0)) {
+ static const unsigned rounding_mode_const[] = {
+ float_round_nearest_even,
+ float_round_to_zero,
+ float_round_down,
+ float_round_up,
+ [6] = float_round_to_zero,
+ };
+ TCGv_i32 rounding_mode = tcg_const_i32(
+ rounding_mode_const[OP2 & 7]);
+ TCGv_i32 scale = tcg_const_i32(RRR_T);
+
+ if (OP2 == 14) {
+ gen_helper_ftoui(cpu_R[RRR_R], cpu_FR[RRR_S],
+ rounding_mode, scale);
+ } else {
+ gen_helper_ftoi(cpu_R[RRR_R], cpu_FR[RRR_S],
+ rounding_mode, scale);
+ }
+
+ tcg_temp_free(rounding_mode);
+ tcg_temp_free(scale);
+ }
+ break;
+
+ case 12: /*FLOAT.Sf*/
+ case 13: /*UFLOAT.Sf*/
+ if (gen_window_check1(dc, RRR_S) &&
+ gen_check_cpenable(dc, 0)) {
+ TCGv_i32 scale = tcg_const_i32(-RRR_T);
+
+ if (OP2 == 13) {
+ gen_helper_uitof(cpu_FR[RRR_R], cpu_env,
+ cpu_R[RRR_S], scale);
+ } else {
+ gen_helper_itof(cpu_FR[RRR_R], cpu_env,
+ cpu_R[RRR_S], scale);
+ }
+ tcg_temp_free(scale);
+ }
+ break;
+
+ case 15: /*FP1OP*/
+ switch (RRR_T) {
+ case 0: /*MOV.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ case 1: /*ABS.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_abs_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ case 4: /*RFRf*/
+ if (gen_window_check1(dc, RRR_R) &&
+ gen_check_cpenable(dc, 0)) {
+ tcg_gen_mov_i32(cpu_R[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ case 5: /*WFRf*/
+ if (gen_window_check1(dc, RRR_S) &&
+ gen_check_cpenable(dc, 0)) {
+ tcg_gen_mov_i32(cpu_FR[RRR_R], cpu_R[RRR_S]);
+ }
+ break;
+
+ case 6: /*NEG.Sf*/
+ if (gen_check_cpenable(dc, 0)) {
+ gen_helper_neg_s(cpu_FR[RRR_R], cpu_FR[RRR_S]);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 11: /*FP1*/
+ /*DEPBITS*/
+ if (option_enabled(dc, XTENSA_OPTION_DEPBITS)) {
+ if (!gen_window_check2(dc, RRR_S, RRR_T)) {
+ break;
+ }
+ tcg_gen_deposit_i32(cpu_R[RRR_T], cpu_R[RRR_T], cpu_R[RRR_S],
+ OP2 + 16, RRR_R + 1);
+ break;
+ }
+
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+
+#define gen_compare(rel, br, a, b) \
+ do { \
+ if (gen_check_cpenable(dc, 0)) { \
+ TCGv_i32 bit = tcg_const_i32(1 << br); \
+ \
+ gen_helper_##rel(cpu_env, bit, cpu_FR[a], cpu_FR[b]); \
+ tcg_temp_free(bit); \
+ } \
+ } while (0)
+
+ switch (OP2) {
+ case 1: /*UN.Sf*/
+ gen_compare(un_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 2: /*OEQ.Sf*/
+ gen_compare(oeq_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 3: /*UEQ.Sf*/
+ gen_compare(ueq_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 4: /*OLT.Sf*/
+ gen_compare(olt_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 5: /*ULT.Sf*/
+ gen_compare(ult_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 6: /*OLE.Sf*/
+ gen_compare(ole_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+ case 7: /*ULE.Sf*/
+ gen_compare(ule_s, RRR_R, RRR_S, RRR_T);
+ break;
+
+#undef gen_compare
+
+ case 8: /*MOVEQZ.Sf*/
+ case 9: /*MOVNEZ.Sf*/
+ case 10: /*MOVLTZ.Sf*/
+ case 11: /*MOVGEZ.Sf*/
+ if (gen_window_check1(dc, RRR_T) &&
+ gen_check_cpenable(dc, 0)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ,
+ TCG_COND_NE,
+ TCG_COND_LT,
+ TCG_COND_GE,
+ };
+ TCGv_i32 zero = tcg_const_i32(0);
+
+ tcg_gen_movcond_i32(cond[OP2 - 8], cpu_FR[RRR_R],
+ cpu_R[RRR_T], zero, cpu_FR[RRR_S], cpu_FR[RRR_R]);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ case 12: /*MOVF.Sf*/
+ case 13: /*MOVT.Sf*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ if (gen_check_cpenable(dc, 0)) {
+ TCGv_i32 zero = tcg_const_i32(0);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRR_T);
+ tcg_gen_movcond_i32(OP2 & 1 ? TCG_COND_NE : TCG_COND_EQ,
+ cpu_FR[RRR_R], tmp, zero,
+ cpu_FR[RRR_S], cpu_FR[RRR_R]);
+
+ tcg_temp_free(tmp);
+ tcg_temp_free(zero);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 1: /*L32R*/
+ if (gen_window_check1(dc, RRR_T)) {
+ TCGv_i32 tmp = tcg_const_i32(
+ ((dc->tb->flags & XTENSA_TBFLAG_LITBASE) ?
+ 0 : ((dc->pc + 3) & ~3)) +
+ (0xfffc0000 | (RI16_IMM16 << 2)));
+
+ if (dc->tb->flags & XTENSA_TBFLAG_LITBASE) {
+ tcg_gen_add_i32(tmp, tmp, dc->litbase);
+ }
+ tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 2: /*LSAI*/
+#define gen_load_store(type, shift) do { \
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ \
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
+ if (shift) { \
+ gen_load_store_alignment(dc, shift, addr, false); \
+ } \
+ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
+ tcg_temp_free(addr); \
+ } \
+ } while (0)
+
+ switch (RRI8_R) {
+ case 0: /*L8UI*/
+ gen_load_store(ld8u, 0);
+ break;
+
+ case 1: /*L16UI*/
+ gen_load_store(ld16u, 1);
+ break;
+
+ case 2: /*L32I*/
+ gen_load_store(ld32u, 2);
+ break;
+
+ case 4: /*S8I*/
+ gen_load_store(st8, 0);
+ break;
+
+ case 5: /*S16I*/
+ gen_load_store(st16, 1);
+ break;
+
+ case 6: /*S32I*/
+ gen_load_store(st32, 2);
+ break;
+
+#define gen_dcache_hit_test(w, shift) do { \
+ if (gen_window_check1(dc, RRI##w##_S)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ TCGv_i32 res = tcg_temp_new_i32(); \
+ tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
+ RRI##w##_IMM##w << shift); \
+ tcg_gen_qemu_ld8u(res, addr, dc->cring); \
+ tcg_temp_free(addr); \
+ tcg_temp_free(res); \
+ } \
+ } while (0)
+
+#define gen_dcache_hit_test4() gen_dcache_hit_test(4, 4)
+#define gen_dcache_hit_test8() gen_dcache_hit_test(8, 2)
+
+ case 7: /*CACHEc*/
+ if (RRI8_T < 8) {
+ HAS_OPTION(XTENSA_OPTION_DCACHE);
+ }
+
+ switch (RRI8_T) {
+ case 0: /*DPFRc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 1: /*DPFWc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 2: /*DPFROc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 3: /*DPFWOc*/
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 4: /*DHWBc*/
+ gen_dcache_hit_test8();
+ break;
+
+ case 5: /*DHWBIc*/
+ gen_dcache_hit_test8();
+ break;
+
+ case 6: /*DHIc*/
+ if (gen_check_privilege(dc)) {
+ gen_dcache_hit_test8();
+ }
+ break;
+
+ case 7: /*DIIc*/
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI8_S);
+ }
+ break;
+
+ case 8: /*DCEc*/
+ switch (OP1) {
+ case 0: /*DPFLl*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_dcache_hit_test4();
+ }
+ break;
+
+ case 2: /*DHUl*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_dcache_hit_test4();
+ }
+ break;
+
+ case 3: /*DIUl*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ case 4: /*DIWBc*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ case 5: /*DIWBIc*/
+ HAS_OPTION(XTENSA_OPTION_DCACHE);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+#undef gen_dcache_hit_test
+#undef gen_dcache_hit_test4
+#undef gen_dcache_hit_test8
+
+#define gen_icache_hit_test(w, shift) do { \
+ if (gen_window_check1(dc, RRI##w##_S)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ tcg_gen_movi_i32(cpu_pc, dc->pc); \
+ tcg_gen_addi_i32(addr, cpu_R[RRI##w##_S], \
+ RRI##w##_IMM##w << shift); \
+ gen_helper_itlb_hit_test(cpu_env, addr); \
+ tcg_temp_free(addr); \
+ }\
+ } while (0)
+
+#define gen_icache_hit_test4() gen_icache_hit_test(4, 4)
+#define gen_icache_hit_test8() gen_icache_hit_test(8, 2)
+
+ case 12: /*IPFc*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE);
+ gen_window_check1(dc, RRI8_S);
+ break;
+
+ case 13: /*ICEc*/
+ switch (OP1) {
+ case 0: /*IPFLl*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_icache_hit_test4();
+ }
+ break;
+
+ case 2: /*IHUl*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_icache_hit_test4();
+ }
+ break;
+
+ case 3: /*IIUl*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE_INDEX_LOCK);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI4_S);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 14: /*IHIc*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE);
+ gen_icache_hit_test8();
+ break;
+
+ case 15: /*IIIc*/
+ HAS_OPTION(XTENSA_OPTION_ICACHE);
+ if (gen_check_privilege(dc)) {
+ gen_window_check1(dc, RRI8_S);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+#undef gen_icache_hit_test
+#undef gen_icache_hit_test4
+#undef gen_icache_hit_test8
+
+ case 9: /*L16SI*/
+ gen_load_store(ld16s, 1);
+ break;
+#undef gen_load_store
+
+ case 10: /*MOVI*/
+ if (gen_window_check1(dc, RRI8_T)) {
+ tcg_gen_movi_i32(cpu_R[RRI8_T],
+ RRI8_IMM8 | (RRI8_S << 8) |
+ ((RRI8_S & 0x8) ? 0xfffff000 : 0));
+ }
+ break;
+
+#define gen_load_store_no_hw_align(type) do { \
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) { \
+ TCGv_i32 addr = tcg_temp_local_new_i32(); \
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2); \
+ gen_load_store_alignment(dc, 2, addr, true); \
+ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
+ tcg_temp_free(addr); \
+ } \
+ } while (0)
+
+ case 11: /*L32AIy*/
+ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
+ gen_load_store_no_hw_align(ld32u); /*TODO acquire?*/
+ break;
+
+ case 12: /*ADDI*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S], RRI8_IMM8_SE);
+ }
+ break;
+
+ case 13: /*ADDMI*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ tcg_gen_addi_i32(cpu_R[RRI8_T], cpu_R[RRI8_S],
+ RRI8_IMM8_SE << 8);
+ }
+ break;
+
+ case 14: /*S32C1Iy*/
+ HAS_OPTION(XTENSA_OPTION_CONDITIONAL_STORE);
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ TCGLabel *label = gen_new_label();
+ TCGv_i32 tmp = tcg_temp_local_new_i32();
+ TCGv_i32 addr = tcg_temp_local_new_i32();
+ TCGv_i32 tpc;
+
+ tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
+ gen_load_store_alignment(dc, 2, addr, true);
+
+ gen_advance_ccount(dc);
+ tpc = tcg_const_i32(dc->pc);
+ gen_helper_check_atomctl(cpu_env, tpc, addr);
+ tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
+ tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
+ cpu_SR[SCOMPARE1], label);
+
+ tcg_gen_qemu_st32(tmp, addr, dc->cring);
+
+ gen_set_label(label);
+ tcg_temp_free(tpc);
+ tcg_temp_free(addr);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 15: /*S32RIy*/
+ HAS_OPTION(XTENSA_OPTION_MP_SYNCHRO);
+ gen_load_store_no_hw_align(st32); /*TODO release?*/
+ break;
+#undef gen_load_store_no_hw_align
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 3: /*LSCIp*/
+ switch (RRI8_R) {
+ case 0: /*LSIf*/
+ case 4: /*SSIf*/
+ case 8: /*LSIUf*/
+ case 12: /*SSIUf*/
+ HAS_OPTION(XTENSA_OPTION_FP_COPROCESSOR);
+ if (gen_window_check1(dc, RRI8_S) &&
+ gen_check_cpenable(dc, 0)) {
+ TCGv_i32 addr = tcg_temp_new_i32();
+ tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
+ gen_load_store_alignment(dc, 2, addr, false);
+ if (RRI8_R & 0x4) {
+ tcg_gen_qemu_st32(cpu_FR[RRI8_T], addr, dc->cring);
+ } else {
+ tcg_gen_qemu_ld32u(cpu_FR[RRI8_T], addr, dc->cring);
+ }
+ if (RRI8_R & 0x8) {
+ tcg_gen_mov_i32(cpu_R[RRI8_S], addr);
+ }
+ tcg_temp_free(addr);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ case 4: /*MAC16d*/
+ HAS_OPTION(XTENSA_OPTION_MAC16);
+ {
+ enum {
+ MAC16_UMUL = 0x0,
+ MAC16_MUL = 0x4,
+ MAC16_MULA = 0x8,
+ MAC16_MULS = 0xc,
+ MAC16_NONE = 0xf,
+ } op = OP1 & 0xc;
+ bool is_m1_sr = (OP2 & 0x3) == 2;
+ bool is_m2_sr = (OP2 & 0xc) == 0;
+ uint32_t ld_offset = 0;
+
+ if (OP2 > 9) {
+ RESERVED();
+ }
+
+ switch (OP2 & 2) {
+ case 0: /*MACI?/MACC?*/
+ is_m1_sr = true;
+ ld_offset = (OP2 & 1) ? -4 : 4;
+
+ if (OP2 >= 8) { /*MACI/MACC*/
+ if (OP1 == 0) { /*LDINC/LDDEC*/
+ op = MAC16_NONE;
+ } else {
+ RESERVED();
+ }
+ } else if (op != MAC16_MULA) { /*MULA.*.*.LDINC/LDDEC*/
+ RESERVED();
+ }
+ break;
+
+ case 2: /*MACD?/MACA?*/
+ if (op == MAC16_UMUL && OP2 != 7) { /*UMUL only in MACAA*/
+ RESERVED();
+ }
+ break;
+ }
+
+ if (op != MAC16_NONE) {
+ if (!is_m1_sr && !gen_window_check1(dc, RRR_S)) {
+ break;
+ }
+ if (!is_m2_sr && !gen_window_check1(dc, RRR_T)) {
+ break;
+ }
+ }
+
+ if (ld_offset && !gen_window_check1(dc, RRR_S)) {
+ break;
+ }
+
+ {
+ TCGv_i32 vaddr = tcg_temp_new_i32();
+ TCGv_i32 mem32 = tcg_temp_new_i32();
+
+ if (ld_offset) {
+ tcg_gen_addi_i32(vaddr, cpu_R[RRR_S], ld_offset);
+ gen_load_store_alignment(dc, 2, vaddr, false);
+ tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring);
+ }
+ if (op != MAC16_NONE) {
+ TCGv_i32 m1 = gen_mac16_m(
+ is_m1_sr ? cpu_SR[MR + RRR_X] : cpu_R[RRR_S],
+ OP1 & 1, op == MAC16_UMUL);
+ TCGv_i32 m2 = gen_mac16_m(
+ is_m2_sr ? cpu_SR[MR + 2 + RRR_Y] : cpu_R[RRR_T],
+ OP1 & 2, op == MAC16_UMUL);
+
+ if (op == MAC16_MUL || op == MAC16_UMUL) {
+ tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2);
+ if (op == MAC16_UMUL) {
+ tcg_gen_movi_i32(cpu_SR[ACCHI], 0);
+ } else {
+ tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31);
+ }
+ } else {
+ TCGv_i32 lo = tcg_temp_new_i32();
+ TCGv_i32 hi = tcg_temp_new_i32();
+
+ tcg_gen_mul_i32(lo, m1, m2);
+ tcg_gen_sari_i32(hi, lo, 31);
+ if (op == MAC16_MULA) {
+ tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
+ cpu_SR[ACCLO], cpu_SR[ACCHI],
+ lo, hi);
+ } else {
+ tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI],
+ cpu_SR[ACCLO], cpu_SR[ACCHI],
+ lo, hi);
+ }
+ tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]);
+
+ tcg_temp_free_i32(lo);
+ tcg_temp_free_i32(hi);
+ }
+ tcg_temp_free(m1);
+ tcg_temp_free(m2);
+ }
+ if (ld_offset) {
+ tcg_gen_mov_i32(cpu_R[RRR_S], vaddr);
+ tcg_gen_mov_i32(cpu_SR[MR + RRR_W], mem32);
+ }
+ tcg_temp_free(vaddr);
+ tcg_temp_free(mem32);
+ }
+ }
+ break;
+
+ case 5: /*CALLN*/
+ switch (CALL_N) {
+ case 0: /*CALL0*/
+ tcg_gen_movi_i32(cpu_R[0], dc->next_pc);
+ gen_jumpi(dc, (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
+ break;
+
+ case 1: /*CALL4w*/
+ case 2: /*CALL8w*/
+ case 3: /*CALL12w*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ if (gen_window_check1(dc, CALL_N << 2)) {
+ gen_callwi(dc, CALL_N,
+ (dc->pc & ~3) + (CALL_OFFSET_SE << 2) + 4, 0);
+ }
+ break;
+ }
+ break;
+
+ case 6: /*SI*/
+ switch (CALL_N) {
+ case 0: /*J*/
+ gen_jumpi(dc, dc->pc + 4 + CALL_OFFSET_SE, 0);
+ break;
+
+ case 1: /*BZ*/
+ if (gen_window_check1(dc, BRI12_S)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ, /*BEQZ*/
+ TCG_COND_NE, /*BNEZ*/
+ TCG_COND_LT, /*BLTZ*/
+ TCG_COND_GE, /*BGEZ*/
+ };
+
+ gen_brcondi(dc, cond[BRI12_M & 3], cpu_R[BRI12_S], 0,
+ 4 + BRI12_IMM12_SE);
+ }
+ break;
+
+ case 2: /*BI0*/
+ if (gen_window_check1(dc, BRI8_S)) {
+ static const TCGCond cond[] = {
+ TCG_COND_EQ, /*BEQI*/
+ TCG_COND_NE, /*BNEI*/
+ TCG_COND_LT, /*BLTI*/
+ TCG_COND_GE, /*BGEI*/
+ };
+
+ gen_brcondi(dc, cond[BRI8_M & 3],
+ cpu_R[BRI8_S], B4CONST[BRI8_R], 4 + BRI8_IMM8_SE);
+ }
+ break;
+
+ case 3: /*BI1*/
+ switch (BRI8_M) {
+ case 0: /*ENTRYw*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 pc = tcg_const_i32(dc->pc);
+ TCGv_i32 s = tcg_const_i32(BRI12_S);
+ TCGv_i32 imm = tcg_const_i32(BRI12_IMM12);
+ gen_advance_ccount(dc);
+ gen_helper_entry(cpu_env, pc, s, imm);
+ tcg_temp_free(imm);
+ tcg_temp_free(s);
+ tcg_temp_free(pc);
+ /* This can change tb->flags, so exit tb */
+ gen_jumpi_check_loop_end(dc, -1);
+ }
+ break;
+
+ case 1: /*B1*/
+ switch (BRI8_R) {
+ case 0: /*BFp*/
+ case 1: /*BTp*/
+ HAS_OPTION(XTENSA_OPTION_BOOLEAN);
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cpu_SR[BR], 1 << RRI8_S);
+ gen_brcondi(dc,
+ BRI8_R == 1 ? TCG_COND_NE : TCG_COND_EQ,
+ tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 8: /*LOOP*/
+ case 9: /*LOOPNEZ*/
+ case 10: /*LOOPGTZ*/
+ HAS_OPTION(XTENSA_OPTION_LOOP);
+ if (gen_window_check1(dc, RRI8_S)) {
+ uint32_t lend = dc->pc + RRI8_IMM8 + 4;
+ TCGv_i32 tmp = tcg_const_i32(lend);
+
+ tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_R[RRI8_S], 1);
+ tcg_gen_movi_i32(cpu_SR[LBEG], dc->next_pc);
+ gen_helper_wsr_lend(cpu_env, tmp);
+ tcg_temp_free(tmp);
+
+ if (BRI8_R > 8) {
+ TCGLabel *label = gen_new_label();
+ tcg_gen_brcondi_i32(
+ BRI8_R == 9 ? TCG_COND_NE : TCG_COND_GT,
+ cpu_R[RRI8_S], 0, label);
+ gen_jumpi(dc, lend, 1);
+ gen_set_label(label);
+ }
+
+ gen_jumpi(dc, dc->next_pc, 0);
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+
+ }
+ break;
+
+ case 2: /*BLTUI*/
+ case 3: /*BGEUI*/
+ if (gen_window_check1(dc, BRI8_S)) {
+ gen_brcondi(dc, BRI8_M == 2 ? TCG_COND_LTU : TCG_COND_GEU,
+ cpu_R[BRI8_S], B4CONSTU[BRI8_R],
+ 4 + BRI8_IMM8_SE);
+ }
+ break;
+ }
+ break;
+
+ }
+ break;
+
+ case 7: /*B*/
+ {
+ TCGCond eq_ne = (RRI8_R & 8) ? TCG_COND_NE : TCG_COND_EQ;
+
+ switch (RRI8_R & 7) {
+ case 0: /*BNONE*/ /*BANY*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
+ gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 1: /*BEQ*/ /*BNE*/
+ case 2: /*BLT*/ /*BGE*/
+ case 3: /*BLTU*/ /*BGEU*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ static const TCGCond cond[] = {
+ [1] = TCG_COND_EQ,
+ [2] = TCG_COND_LT,
+ [3] = TCG_COND_LTU,
+ [9] = TCG_COND_NE,
+ [10] = TCG_COND_GE,
+ [11] = TCG_COND_GEU,
+ };
+ gen_brcond(dc, cond[RRI8_R], cpu_R[RRI8_S], cpu_R[RRI8_T],
+ 4 + RRI8_IMM8_SE);
+ }
+ break;
+
+ case 4: /*BALL*/ /*BNALL*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_and_i32(tmp, cpu_R[RRI8_S], cpu_R[RRI8_T]);
+ gen_brcond(dc, eq_ne, tmp, cpu_R[RRI8_T],
+ 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 5: /*BBC*/ /*BBS*/
+ if (gen_window_check2(dc, RRI8_S, RRI8_T)) {
+#ifdef TARGET_WORDS_BIGENDIAN
+ TCGv_i32 bit = tcg_const_i32(0x80000000);
+#else
+ TCGv_i32 bit = tcg_const_i32(0x00000001);
+#endif
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_shr_i32(bit, bit, tmp);
+#else
+ tcg_gen_shl_i32(bit, bit, tmp);
+#endif
+ tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
+ gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ tcg_temp_free(bit);
+ }
+ break;
+
+ case 6: /*BBCI*/ /*BBSI*/
+ case 7:
+ if (gen_window_check1(dc, RRI8_S)) {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
+#ifdef TARGET_WORDS_BIGENDIAN
+ 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T));
+#else
+ 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T));
+#endif
+ gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ }
+ }
+ break;
+
+#define gen_narrow_load_store(type) do { \
+ if (gen_window_check2(dc, RRRN_S, RRRN_T)) { \
+ TCGv_i32 addr = tcg_temp_new_i32(); \
+ tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
+ gen_load_store_alignment(dc, 2, addr, false); \
+ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
+ tcg_temp_free(addr); \
+ } \
+ } while (0)
+
+ case 8: /*L32I.Nn*/
+ gen_narrow_load_store(ld32u);
+ break;
+
+ case 9: /*S32I.Nn*/
+ gen_narrow_load_store(st32);
+ break;
+#undef gen_narrow_load_store
+
+ case 10: /*ADD.Nn*/
+ if (gen_window_check3(dc, RRRN_R, RRRN_S, RRRN_T)) {
+ tcg_gen_add_i32(cpu_R[RRRN_R], cpu_R[RRRN_S], cpu_R[RRRN_T]);
+ }
+ break;
+
+ case 11: /*ADDI.Nn*/
+ if (gen_window_check2(dc, RRRN_R, RRRN_S)) {
+ tcg_gen_addi_i32(cpu_R[RRRN_R], cpu_R[RRRN_S],
+ RRRN_T ? RRRN_T : -1);
+ }
+ break;
+
+ case 12: /*ST2n*/
+ if (!gen_window_check1(dc, RRRN_S)) {
+ break;
+ }
+ if (RRRN_T < 8) { /*MOVI.Nn*/
+ tcg_gen_movi_i32(cpu_R[RRRN_S],
+ RRRN_R | (RRRN_T << 4) |
+ ((RRRN_T & 6) == 6 ? 0xffffff80 : 0));
+ } else { /*BEQZ.Nn*/ /*BNEZ.Nn*/
+ TCGCond eq_ne = (RRRN_T & 4) ? TCG_COND_NE : TCG_COND_EQ;
+
+ gen_brcondi(dc, eq_ne, cpu_R[RRRN_S], 0,
+ 4 + (RRRN_R | ((RRRN_T & 3) << 4)));
+ }
+ break;
+
+ case 13: /*ST3n*/
+ switch (RRRN_R) {
+ case 0: /*MOV.Nn*/
+ if (gen_window_check2(dc, RRRN_S, RRRN_T)) {
+ tcg_gen_mov_i32(cpu_R[RRRN_T], cpu_R[RRRN_S]);
+ }
+ break;
+
+ case 15: /*S3*/
+ switch (RRRN_T) {
+ case 0: /*RET.Nn*/
+ gen_jump(dc, cpu_R[0]);
+ break;
+
+ case 1: /*RETW.Nn*/
+ HAS_OPTION(XTENSA_OPTION_WINDOWED_REGISTER);
+ {
+ TCGv_i32 tmp = tcg_const_i32(dc->pc);
+ gen_advance_ccount(dc);
+ gen_helper_retw(tmp, cpu_env, tmp);
+ gen_jump(dc, tmp);
+ tcg_temp_free(tmp);
+ }
+ break;
+
+ case 2: /*BREAK.Nn*/
+ HAS_OPTION(XTENSA_OPTION_DEBUG);
+ if (dc->debug) {
+ gen_debug_exception(dc, DEBUGCAUSE_BN);
+ }
+ break;
+
+ case 3: /*NOP.Nn*/
+ break;
+
+ case 6: /*ILL.Nn*/
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+ break;
+
+ default: /*reserved*/
+ RESERVED();
+ break;
+ }
+
+ if (dc->is_jmp == DISAS_NEXT) {
+ gen_check_loop_end(dc, 0);
+ }
+ dc->pc = dc->next_pc;
+
+ return;
+
+invalid_opcode:
+ qemu_log_mask(LOG_GUEST_ERROR, "INVALID(pc = %08x)\n", dc->pc);
+ gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE);
+#undef HAS_OPTION
+}
+
+static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc)
+{
+ uint8_t b0 = cpu_ldub_code(env, dc->pc);
+ return xtensa_op0_insn_len(OP0);
+}
+
+static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc)
+{
+ unsigned i;
+
+ for (i = 0; i < dc->config->nibreak; ++i) {
+ if ((env->sregs[IBREAKENABLE] & (1 << i)) &&
+ env->sregs[IBREAKA + i] == dc->pc) {
+ gen_debug_exception(dc, DEBUGCAUSE_IB);
+ break;
+ }
+ }
+}
+
+void gen_intermediate_code(CPUXtensaState *env, TranslationBlock *tb)
+{
+ XtensaCPU *cpu = xtensa_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+ DisasContext dc;
+ int insn_count = 0;
+ int max_insns = tb->cflags & CF_COUNT_MASK;
+ uint32_t pc_start = tb->pc;
+ uint32_t next_page_start =
+ (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+ if (max_insns > TCG_MAX_INSNS) {
+ max_insns = TCG_MAX_INSNS;
+ }
+
+ dc.config = env->config;
+ dc.singlestep_enabled = cs->singlestep_enabled;
+ dc.tb = tb;
+ dc.pc = pc_start;
+ dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
+ dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
+ dc.lbeg = env->sregs[LBEG];
+ dc.lend = env->sregs[LEND];
+ dc.is_jmp = DISAS_NEXT;
+ dc.ccount_delta = 0;
+ dc.debug = tb->flags & XTENSA_TBFLAG_DEBUG;
+ dc.icount = tb->flags & XTENSA_TBFLAG_ICOUNT;
+ dc.cpenable = (tb->flags & XTENSA_TBFLAG_CPENABLE_MASK) >>
+ XTENSA_TBFLAG_CPENABLE_SHIFT;
+ dc.window = ((tb->flags & XTENSA_TBFLAG_WINDOW_MASK) >>
+ XTENSA_TBFLAG_WINDOW_SHIFT);
+
+ init_litbase(&dc);
+ init_sar_tracker(&dc);
+ if (dc.icount) {
+ dc.next_icount = tcg_temp_local_new_i32();
+ }
+
+ gen_tb_start(tb);
+
+ if (tb->flags & XTENSA_TBFLAG_EXCEPTION) {
+ tcg_gen_movi_i32(cpu_pc, dc.pc);
+ gen_exception(&dc, EXCP_DEBUG);
+ }
+
+ do {
+ tcg_gen_insn_start(dc.pc);
+ ++insn_count;
+
+ ++dc.ccount_delta;
+
+ if (unlikely(cpu_breakpoint_test(cs, dc.pc, BP_ANY))) {
+ tcg_gen_movi_i32(cpu_pc, dc.pc);
+ gen_exception(&dc, EXCP_DEBUG);
+ dc.is_jmp = DISAS_UPDATE;
+ /* The address covered by the breakpoint must be included in
+ [tb->pc, tb->pc + tb->size) in order to for it to be
+ properly cleared -- thus we increment the PC here so that
+ the logic setting tb->size below does the right thing. */
+ dc.pc += 2;
+ break;
+ }
+
+ if (insn_count == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+
+ if (dc.icount) {
+ TCGLabel *label = gen_new_label();
+
+ tcg_gen_addi_i32(dc.next_icount, cpu_SR[ICOUNT], 1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, dc.next_icount, 0, label);
+ tcg_gen_mov_i32(dc.next_icount, cpu_SR[ICOUNT]);
+ if (dc.debug) {
+ gen_debug_exception(&dc, DEBUGCAUSE_IC);
+ }
+ gen_set_label(label);
+ }
+
+ if (dc.debug) {
+ gen_ibreak_check(env, &dc);
+ }
+
+ disas_xtensa_insn(env, &dc);
+ if (dc.icount) {
+ tcg_gen_mov_i32(cpu_SR[ICOUNT], dc.next_icount);
+ }
+ if (cs->singlestep_enabled) {
+ tcg_gen_movi_i32(cpu_pc, dc.pc);
+ gen_exception(&dc, EXCP_DEBUG);
+ break;
+ }
+ } while (dc.is_jmp == DISAS_NEXT &&
+ insn_count < max_insns &&
+ dc.pc < next_page_start &&
+ dc.pc + xtensa_insn_len(env, &dc) <= next_page_start &&
+ !tcg_op_buf_full());
+
+ reset_litbase(&dc);
+ reset_sar_tracker(&dc);
+ if (dc.icount) {
+ tcg_temp_free(dc.next_icount);
+ }
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+
+ if (dc.is_jmp == DISAS_NEXT) {
+ gen_jumpi(&dc, dc.pc, 0);
+ }
+ gen_tb_end(tb, insn_count);
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
+ && qemu_log_in_addr_range(pc_start)) {
+ qemu_log_lock();
+ qemu_log("----------------\n");
+ qemu_log("IN: %s\n", lookup_symbol(pc_start));
+ log_target_disas(cs, pc_start, dc.pc - pc_start, 0);
+ qemu_log("\n");
+ qemu_log_unlock();
+ }
+#endif
+ tb->size = dc.pc - pc_start;
+ tb->icount = insn_count;
+}
+
+void xtensa_cpu_dump_state(CPUState *cs, FILE *f,
+ fprintf_function cpu_fprintf, int flags)
+{
+ XtensaCPU *cpu = XTENSA_CPU(cs);
+ CPUXtensaState *env = &cpu->env;
+ int i, j;
+
+ cpu_fprintf(f, "PC=%08x\n\n", env->pc);
+
+ for (i = j = 0; i < 256; ++i) {
+ if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) {
+ cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i],
+ (j++ % 4) == 3 ? '\n' : ' ');
+ }
+ }
+
+ cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
+
+ for (i = j = 0; i < 256; ++i) {
+ if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) {
+ cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i],
+ (j++ % 4) == 3 ? '\n' : ' ');
+ }
+ }
+
+ cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n");
+
+ for (i = 0; i < 16; ++i) {
+ cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i],
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+
+ cpu_fprintf(f, "\n");
+
+ for (i = 0; i < env->config->nareg; ++i) {
+ cpu_fprintf(f, "AR%02d=%08x%c", i, env->phys_regs[i],
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) {
+ cpu_fprintf(f, "\n");
+
+ for (i = 0; i < 16; ++i) {
+ cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i,
+ float32_val(env->fregs[i].f32[FP_F32_LOW]),
+ *(float *)(env->fregs[i].f32 + FP_F32_LOW),
+ (i % 2) == 1 ? '\n' : ' ');
+ }
+ }
+}
+
+void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb,
+ target_ulong *data)
+{
+ env->pc = data[0];
+}
diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c
new file mode 100644
index 0000000000..370e365c65
--- /dev/null
+++ b/target/xtensa/xtensa-semi.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Open Source and Linux Lab nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "qemu/log.h"
+
+enum {
+ TARGET_SYS_exit = 1,
+ TARGET_SYS_read = 3,
+ TARGET_SYS_write = 4,
+ TARGET_SYS_open = 5,
+ TARGET_SYS_close = 6,
+ TARGET_SYS_lseek = 19,
+ TARGET_SYS_select_one = 29,
+
+ TARGET_SYS_argc = 1000,
+ TARGET_SYS_argv_sz = 1001,
+ TARGET_SYS_argv = 1002,
+ TARGET_SYS_memset = 1004,
+};
+
+enum {
+ SELECT_ONE_READ = 1,
+ SELECT_ONE_WRITE = 2,
+ SELECT_ONE_EXCEPT = 3,
+};
+
+enum {
+ TARGET_EPERM = 1,
+ TARGET_ENOENT = 2,
+ TARGET_ESRCH = 3,
+ TARGET_EINTR = 4,
+ TARGET_EIO = 5,
+ TARGET_ENXIO = 6,
+ TARGET_E2BIG = 7,
+ TARGET_ENOEXEC = 8,
+ TARGET_EBADF = 9,
+ TARGET_ECHILD = 10,
+ TARGET_EAGAIN = 11,
+ TARGET_ENOMEM = 12,
+ TARGET_EACCES = 13,
+ TARGET_EFAULT = 14,
+ TARGET_ENOTBLK = 15,
+ TARGET_EBUSY = 16,
+ TARGET_EEXIST = 17,
+ TARGET_EXDEV = 18,
+ TARGET_ENODEV = 19,
+ TARGET_ENOTDIR = 20,
+ TARGET_EISDIR = 21,
+ TARGET_EINVAL = 22,
+ TARGET_ENFILE = 23,
+ TARGET_EMFILE = 24,
+ TARGET_ENOTTY = 25,
+ TARGET_ETXTBSY = 26,
+ TARGET_EFBIG = 27,
+ TARGET_ENOSPC = 28,
+ TARGET_ESPIPE = 29,
+ TARGET_EROFS = 30,
+ TARGET_EMLINK = 31,
+ TARGET_EPIPE = 32,
+ TARGET_EDOM = 33,
+ TARGET_ERANGE = 34,
+ TARGET_ENOSYS = 88,
+ TARGET_ELOOP = 92,
+};
+
+static uint32_t errno_h2g(int host_errno)
+{
+ static const uint32_t guest_errno[] = {
+ [EPERM] = TARGET_EPERM,
+ [ENOENT] = TARGET_ENOENT,
+ [ESRCH] = TARGET_ESRCH,
+ [EINTR] = TARGET_EINTR,
+ [EIO] = TARGET_EIO,
+ [ENXIO] = TARGET_ENXIO,
+ [E2BIG] = TARGET_E2BIG,
+ [ENOEXEC] = TARGET_ENOEXEC,
+ [EBADF] = TARGET_EBADF,
+ [ECHILD] = TARGET_ECHILD,
+ [EAGAIN] = TARGET_EAGAIN,
+ [ENOMEM] = TARGET_ENOMEM,
+ [EACCES] = TARGET_EACCES,
+ [EFAULT] = TARGET_EFAULT,
+#ifdef ENOTBLK
+ [ENOTBLK] = TARGET_ENOTBLK,
+#endif
+ [EBUSY] = TARGET_EBUSY,
+ [EEXIST] = TARGET_EEXIST,
+ [EXDEV] = TARGET_EXDEV,
+ [ENODEV] = TARGET_ENODEV,
+ [ENOTDIR] = TARGET_ENOTDIR,
+ [EISDIR] = TARGET_EISDIR,
+ [EINVAL] = TARGET_EINVAL,
+ [ENFILE] = TARGET_ENFILE,
+ [EMFILE] = TARGET_EMFILE,
+ [ENOTTY] = TARGET_ENOTTY,
+#ifdef ETXTBSY
+ [ETXTBSY] = TARGET_ETXTBSY,
+#endif
+ [EFBIG] = TARGET_EFBIG,
+ [ENOSPC] = TARGET_ENOSPC,
+ [ESPIPE] = TARGET_ESPIPE,
+ [EROFS] = TARGET_EROFS,
+ [EMLINK] = TARGET_EMLINK,
+ [EPIPE] = TARGET_EPIPE,
+ [EDOM] = TARGET_EDOM,
+ [ERANGE] = TARGET_ERANGE,
+ [ENOSYS] = TARGET_ENOSYS,
+#ifdef ELOOP
+ [ELOOP] = TARGET_ELOOP,
+#endif
+ };
+
+ if (host_errno == 0) {
+ return 0;
+ } else if (host_errno > 0 && host_errno < ARRAY_SIZE(guest_errno) &&
+ guest_errno[host_errno]) {
+ return guest_errno[host_errno];
+ } else {
+ return TARGET_EINVAL;
+ }
+}
+
+void HELPER(simcall)(CPUXtensaState *env)
+{
+ CPUState *cs = CPU(xtensa_env_get_cpu(env));
+ uint32_t *regs = env->regs;
+
+ switch (regs[2]) {
+ case TARGET_SYS_exit:
+ qemu_log("exit(%d) simcall\n", regs[3]);
+ exit(regs[3]);
+ break;
+
+ case TARGET_SYS_read:
+ case TARGET_SYS_write:
+ {
+ bool is_write = regs[2] == TARGET_SYS_write;
+ uint32_t fd = regs[3];
+ uint32_t vaddr = regs[4];
+ uint32_t len = regs[5];
+
+ while (len > 0) {
+ hwaddr paddr = cpu_get_phys_page_debug(cs, vaddr);
+ uint32_t page_left =
+ TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1));
+ uint32_t io_sz = page_left < len ? page_left : len;
+ hwaddr sz = io_sz;
+ void *buf = cpu_physical_memory_map(paddr, &sz, is_write);
+
+ if (buf) {
+ vaddr += io_sz;
+ len -= io_sz;
+ regs[2] = is_write ?
+ write(fd, buf, io_sz) :
+ read(fd, buf, io_sz);
+ regs[3] = errno_h2g(errno);
+ cpu_physical_memory_unmap(buf, sz, is_write, sz);
+ if (regs[2] == -1) {
+ break;
+ }
+ } else {
+ regs[2] = -1;
+ regs[3] = TARGET_EINVAL;
+ break;
+ }
+ }
+ }
+ break;
+
+ case TARGET_SYS_open:
+ {
+ char name[1024];
+ int rc;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(name); ++i) {
+ rc = cpu_memory_rw_debug(cs, regs[3] + i,
+ (uint8_t *)name + i, 1, 0);
+ if (rc != 0 || name[i] == 0) {
+ break;
+ }
+ }
+
+ if (rc == 0 && i < ARRAY_SIZE(name)) {
+ regs[2] = open(name, regs[4], regs[5]);
+ regs[3] = errno_h2g(errno);
+ } else {
+ regs[2] = -1;
+ regs[3] = TARGET_EINVAL;
+ }
+ }
+ break;
+
+ case TARGET_SYS_close:
+ if (regs[3] < 3) {
+ regs[2] = regs[3] = 0;
+ } else {
+ regs[2] = close(regs[3]);
+ regs[3] = errno_h2g(errno);
+ }
+ break;
+
+ case TARGET_SYS_lseek:
+ regs[2] = lseek(regs[3], (off_t)(int32_t)regs[4], regs[5]);
+ regs[3] = errno_h2g(errno);
+ break;
+
+ case TARGET_SYS_select_one:
+ {
+ uint32_t fd = regs[3];
+ uint32_t rq = regs[4];
+ uint32_t target_tv = regs[5];
+ uint32_t target_tvv[2];
+
+ struct timeval tv = {0};
+ fd_set fdset;
+
+ FD_ZERO(&fdset);
+ FD_SET(fd, &fdset);
+
+ if (target_tv) {
+ cpu_memory_rw_debug(cs, target_tv,
+ (uint8_t *)target_tvv, sizeof(target_tvv), 0);
+ tv.tv_sec = (int32_t)tswap32(target_tvv[0]);
+ tv.tv_usec = (int32_t)tswap32(target_tvv[1]);
+ }
+ regs[2] = select(fd + 1,
+ rq == SELECT_ONE_READ ? &fdset : NULL,
+ rq == SELECT_ONE_WRITE ? &fdset : NULL,
+ rq == SELECT_ONE_EXCEPT ? &fdset : NULL,
+ target_tv ? &tv : NULL);
+ regs[3] = errno_h2g(errno);
+ }
+ break;
+
+ case TARGET_SYS_argc:
+ regs[2] = 1;
+ regs[3] = 0;
+ break;
+
+ case TARGET_SYS_argv_sz:
+ regs[2] = 128;
+ regs[3] = 0;
+ break;
+
+ case TARGET_SYS_argv:
+ {
+ struct Argv {
+ uint32_t argptr[2];
+ char text[120];
+ } argv = {
+ {0, 0},
+ "test"
+ };
+
+ argv.argptr[0] = tswap32(regs[3] + offsetof(struct Argv, text));
+ cpu_memory_rw_debug(cs,
+ regs[3], (uint8_t *)&argv, sizeof(argv), 1);
+ }
+ break;
+
+ case TARGET_SYS_memset:
+ {
+ uint32_t base = regs[3];
+ uint32_t sz = regs[5];
+
+ while (sz) {
+ hwaddr len = sz;
+ void *buf = cpu_physical_memory_map(base, &len, 1);
+
+ if (buf && len) {
+ memset(buf, regs[4], len);
+ cpu_physical_memory_unmap(buf, len, 1, len);
+ } else {
+ len = 1;
+ }
+ base += len;
+ sz -= len;
+ }
+ regs[2] = regs[3];
+ regs[3] = 0;
+ }
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "%s(%d): not implemented\n", __func__, regs[2]);
+ regs[2] = -1;
+ regs[3] = TARGET_ENOSYS;
+ break;
+ }
+}